Upgrade to V8 3.3

Merge V8 at 3.3.10.39

Simple merge required updates to makefiles only.

Bug: 5688872
Change-Id: I14703f418235f5ce6013b9b3e2e502407a9f6dfd
diff --git a/src/SConscript b/src/SConscript
index 06ee907..fe21d02 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -68,7 +68,6 @@
     execution.cc
     factory.cc
     flags.cc
-    frame-element.cc
     frames.cc
     full-codegen.cc
     func-name-inferrer.cc
@@ -86,6 +85,7 @@
     inspector.cc
     interpreter-irregexp.cc
     isolate.cc
+    json-parser.cc
     jsregexp.cc
     lithium-allocator.cc
     lithium.cc
@@ -122,7 +122,6 @@
     strtod.cc
     stub-cache.cc
     token.cc
-    top.cc
     type-info.cc
     unicode.cc
     utils.cc
@@ -297,6 +296,11 @@
 '''.split()
 
 
+EXPERIMENTAL_LIBRARY_FILES = '''
+proxy.js
+'''.split()
+
+
 def Abort(message):
   print message
   sys.exit(1)
@@ -321,9 +325,16 @@
   # compile it.
   library_files = [s for s in LIBRARY_FILES]
   library_files.append('macros.py')
-  libraries_src, libraries_empty_src = env.JS2C(['libraries.cc', 'libraries-empty.cc'], library_files, TYPE='CORE')
+  libraries_src = env.JS2C(['libraries.cc'], library_files, TYPE='CORE')
   libraries_obj = context.ConfigureObject(env, libraries_src, CPPPATH=['.'])
 
+  # Combine the experimental JavaScript library files into a C++ file
+  # and compile it.
+  experimental_library_files = [ s for s in EXPERIMENTAL_LIBRARY_FILES ]
+  experimental_library_files.append('macros.py')
+  experimental_libraries_src = env.JS2C(['experimental-libraries.cc'], experimental_library_files, TYPE='EXPERIMENTAL')
+  experimental_libraries_obj = context.ConfigureObject(env, experimental_libraries_src, CPPPATH=['.'])
+
   source_objs = context.ConfigureObject(env, source_files)
   non_snapshot_files = [source_objs]
 
@@ -340,7 +351,7 @@
   mksnapshot_env = env.Copy()
   mksnapshot_env.Replace(**context.flags['mksnapshot'])
   mksnapshot_src = 'mksnapshot.cc'
-  mksnapshot = mksnapshot_env.Program('mksnapshot', [mksnapshot_src, libraries_obj, non_snapshot_files, empty_snapshot_obj], PDB='mksnapshot.exe.pdb')
+  mksnapshot = mksnapshot_env.Program('mksnapshot', [mksnapshot_src, libraries_obj, experimental_libraries_obj,  non_snapshot_files, empty_snapshot_obj], PDB='mksnapshot.exe.pdb')
   if context.use_snapshot:
     if context.build_snapshot:
       snapshot_cc = env.Snapshot('snapshot.cc', mksnapshot, LOGFILE=File('snapshot.log').abspath)
@@ -349,7 +360,7 @@
     snapshot_obj = context.ConfigureObject(env, snapshot_cc, CPPPATH=['.'])
   else:
     snapshot_obj = empty_snapshot_obj
-  library_objs = [non_snapshot_files, libraries_obj, snapshot_obj]
+  library_objs = [non_snapshot_files, libraries_obj, experimental_libraries_obj, snapshot_obj]
   return (library_objs, d8_objs, [mksnapshot], preparser_objs)
 
 
diff --git a/src/accessors.cc b/src/accessors.cc
index 7fa6982..255e3dd 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -32,6 +32,7 @@
 #include "deoptimizer.h"
 #include "execution.h"
 #include "factory.h"
+#include "list-inl.h"
 #include "safepoint-table.h"
 #include "scopeinfo.h"
 
@@ -679,6 +680,52 @@
 }
 
 
+class FrameFunctionIterator {
+ public:
+  FrameFunctionIterator(Isolate* isolate, const AssertNoAllocation& promise)
+      : frame_iterator_(isolate),
+        functions_(2),
+        index_(0) {
+    GetFunctions();
+  }
+
+  JSFunction* next() {
+    if (functions_.length() == 0) return NULL;
+    JSFunction* next_function = functions_[index_];
+    index_--;
+    if (index_ < 0) {
+      GetFunctions();
+    }
+    return next_function;
+  }
+
+  // Iterate through functions until the first occurence of 'function'.
+  // Returns true if 'function' is found, and false if the iterator ends
+  // without finding it.
+  bool Find(JSFunction* function) {
+    JSFunction* next_function;
+    do {
+      next_function = next();
+      if (next_function == function) return true;
+    } while (next_function != NULL);
+    return false;
+  }
+ private:
+  void GetFunctions() {
+    functions_.Rewind(0);
+    if (frame_iterator_.done()) return;
+    JavaScriptFrame* frame = frame_iterator_.frame();
+    frame->GetFunctions(&functions_);
+    ASSERT(functions_.length() > 0);
+    frame_iterator_.Advance();
+    index_ = functions_.length() - 1;
+  }
+  JavaScriptFrameIterator frame_iterator_;
+  List<JSFunction*> functions_;
+  int index_;
+};
+
+
 MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
   Isolate* isolate = Isolate::Current();
   HandleScope scope(isolate);
@@ -688,38 +735,30 @@
   if (!found_it) return isolate->heap()->undefined_value();
   Handle<JSFunction> function(holder, isolate);
 
-  List<JSFunction*> functions(2);
-  for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
-    JavaScriptFrame* frame = it.frame();
-    frame->GetFunctions(&functions);
-    for (int i = functions.length() - 1; i >= 0; i--) {
-      if (functions[i] == *function) {
-        // Once we have found the frame, we need to go to the caller
-        // frame. This may require skipping through a number of top-level
-        // frames, e.g. frames for scripts not functions.
-        if (i > 0) {
-          ASSERT(!functions[i - 1]->shared()->is_toplevel());
-          return CheckNonStrictCallerOrThrow(isolate, functions[i - 1]);
-        } else {
-          for (it.Advance(); !it.done(); it.Advance()) {
-            frame = it.frame();
-            functions.Rewind(0);
-            frame->GetFunctions(&functions);
-            if (!functions.last()->shared()->is_toplevel()) {
-              return CheckNonStrictCallerOrThrow(isolate, functions.last());
-            }
-            ASSERT(functions.length() == 1);
-          }
-          if (it.done()) return isolate->heap()->null_value();
-          break;
-        }
-      }
-    }
-    functions.Rewind(0);
+  FrameFunctionIterator it(isolate, no_alloc);
+
+  // Find the function from the frames.
+  if (!it.Find(*function)) {
+    // No frame corresponding to the given function found. Return null.
+    return isolate->heap()->null_value();
   }
 
-  // No frame corresponding to the given function found. Return null.
-  return isolate->heap()->null_value();
+  // Find previously called non-toplevel function.
+  JSFunction* caller;
+  do {
+    caller = it.next();
+    if (caller == NULL) return isolate->heap()->null_value();
+  } while (caller->shared()->is_toplevel());
+
+  // If caller is a built-in function and caller's caller is also built-in,
+  // use that instead.
+  JSFunction* potential_caller = caller;
+  while (potential_caller != NULL && potential_caller->IsBuiltin()) {
+    caller = potential_caller;
+    potential_caller = it.next();
+  }
+
+  return CheckNonStrictCallerOrThrow(isolate, caller);
 }
 
 
diff --git a/src/accessors.h b/src/accessors.h
index 14ccc8f..385536d 100644
--- a/src/accessors.h
+++ b/src/accessors.h
@@ -28,6 +28,8 @@
 #ifndef V8_ACCESSORS_H_
 #define V8_ACCESSORS_H_
 
+#include "allocation.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/api.cc b/src/api.cc
index 3ae6304..5980854 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -53,7 +53,6 @@
 
 #define LOG_API(isolate, expr) LOG(isolate, ApiEntryCall(expr))
 
-// TODO(isolates): avoid repeated TLS reads in function prologues.
 #ifdef ENABLE_VMSTATE_TRACKING
 #define ENTER_V8(isolate)                                        \
   ASSERT((isolate)->IsInitialized());                           \
@@ -89,7 +88,7 @@
     if (has_pending_exception) {                                               \
       if (handle_scope_implementer->CallDepthIsZero() &&                       \
           (isolate)->is_out_of_memory()) {                                     \
-        if (!(isolate)->ignore_out_of_memory())                                \
+        if (!handle_scope_implementer->ignore_out_of_memory())                 \
           i::V8::FatalProcessOutOfMemory(NULL);                                \
       }                                                                        \
       bool call_depth_is_zero = handle_scope_implementer->CallDepthIsZero();   \
@@ -290,6 +289,7 @@
   if (isolate != NULL) {
     if (isolate->IsInitialized()) return true;
   }
+  ASSERT(isolate == i::Isolate::Current());
   return ApiCheck(InitializeHelper(), location, "Error initializing V8");
 }
 
@@ -311,12 +311,74 @@
 }
 
 
+StartupData::CompressionAlgorithm V8::GetCompressedStartupDataAlgorithm() {
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+  return StartupData::kBZip2;
+#else
+  return StartupData::kUncompressed;
+#endif
+}
+
+
+enum CompressedStartupDataItems {
+  kSnapshot = 0,
+  kSnapshotContext,
+  kCompressedStartupDataCount
+};
+
+int V8::GetCompressedStartupDataCount() {
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+  return kCompressedStartupDataCount;
+#else
+  return 0;
+#endif
+}
+
+
+void V8::GetCompressedStartupData(StartupData* compressed_data) {
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+  compressed_data[kSnapshot].data =
+      reinterpret_cast<const char*>(i::Snapshot::data());
+  compressed_data[kSnapshot].compressed_size = i::Snapshot::size();
+  compressed_data[kSnapshot].raw_size = i::Snapshot::raw_size();
+
+  compressed_data[kSnapshotContext].data =
+      reinterpret_cast<const char*>(i::Snapshot::context_data());
+  compressed_data[kSnapshotContext].compressed_size =
+      i::Snapshot::context_size();
+  compressed_data[kSnapshotContext].raw_size = i::Snapshot::context_raw_size();
+#endif
+}
+
+
+void V8::SetDecompressedStartupData(StartupData* decompressed_data) {
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+  ASSERT_EQ(i::Snapshot::raw_size(), decompressed_data[kSnapshot].raw_size);
+  i::Snapshot::set_raw_data(
+      reinterpret_cast<const i::byte*>(decompressed_data[kSnapshot].data));
+
+  ASSERT_EQ(i::Snapshot::context_raw_size(),
+            decompressed_data[kSnapshotContext].raw_size);
+  i::Snapshot::set_context_raw_data(
+      reinterpret_cast<const i::byte*>(
+          decompressed_data[kSnapshotContext].data));
+#endif
+}
+
+
 void V8::SetFatalErrorHandler(FatalErrorCallback that) {
   i::Isolate* isolate = EnterIsolateIfNeeded();
   isolate->set_exception_behavior(that);
 }
 
 
+void V8::SetAllowCodeGenerationFromStringsCallback(
+    AllowCodeGenerationFromStringsCallback callback) {
+  i::Isolate* isolate = EnterIsolateIfNeeded();
+  isolate->set_allow_code_gen_callback(callback);
+}
+
+
 #ifdef DEBUG
 void ImplementationUtilities::ZapHandleRange(i::Object** begin,
                                              i::Object** end) {
@@ -477,6 +539,13 @@
 }
 
 
+void V8::MarkIndependent(i::Object** object) {
+  i::Isolate* isolate = i::Isolate::Current();
+  LOG_API(isolate, "MakeIndependent");
+  isolate->global_handles()->MarkIndependent(object);
+}
+
+
 bool V8::IsGlobalNearDeath(i::Object** obj) {
   i::Isolate* isolate = i::Isolate::Current();
   LOG_API(isolate, "IsGlobalNearDeath");
@@ -867,9 +936,9 @@
 }
 
 
-#define SET_FIELD_WRAPPED(obj, setter, cdata) do {  \
-    i::Handle<i::Object> proxy = FromCData(cdata);  \
-    (obj)->setter(*proxy);                          \
+#define SET_FIELD_WRAPPED(obj, setter, cdata) do {    \
+    i::Handle<i::Object> foreign = FromCData(cdata);  \
+    (obj)->setter(*foreign);                          \
   } while (false)
 
 
@@ -1989,7 +2058,7 @@
   if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsExternal()")) {
     return false;
   }
-  return Utils::OpenHandle(this)->IsProxy();
+  return Utils::OpenHandle(this)->IsForeign();
 }
 
 
@@ -2150,7 +2219,7 @@
 void External::CheckCast(v8::Value* that) {
   if (IsDeadCheck(i::Isolate::Current(), "v8::External::Cast()")) return;
   i::Handle<i::Object> obj = Utils::OpenHandle(that);
-  ApiCheck(obj->IsProxy(),
+  ApiCheck(obj->IsForeign(),
            "v8::External::Cast()",
            "Could not convert to external");
 }
@@ -2751,6 +2820,15 @@
 }
 
 
+bool v8::Object::HasOwnProperty(Handle<String> key) {
+  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+  ON_BAILOUT(isolate, "v8::Object::HasOwnProperty()",
+             return false);
+  return Utils::OpenHandle(this)->HasLocalProperty(
+      *Utils::OpenHandle(*key));
+}
+
+
 bool v8::Object::HasRealNamedProperty(Handle<String> key) {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   ON_BAILOUT(isolate, "v8::Object::HasRealNamedProperty()",
@@ -3161,6 +3239,8 @@
       return kExternalUnsignedIntArray;
     case i::EXTERNAL_FLOAT_ARRAY_TYPE:
       return kExternalFloatArray;
+    case i::EXTERNAL_DOUBLE_ARRAY_TYPE:
+      return kExternalDoubleArray;
     case i::EXTERNAL_PIXEL_ARRAY_TYPE:
       return kExternalPixelArray;
     default:
@@ -3182,6 +3262,85 @@
 }
 
 
+bool v8::Object::IsCallable() {
+  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+  ON_BAILOUT(isolate, "v8::Object::IsCallable()", return false);
+  ENTER_V8(isolate);
+  i::HandleScope scope(isolate);
+  i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+  if (obj->IsJSFunction()) return true;
+  return i::Execution::GetFunctionDelegate(obj)->IsJSFunction();
+}
+
+
+Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv, int argc,
+                                        v8::Handle<v8::Value> argv[]) {
+  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+  ON_BAILOUT(isolate, "v8::Object::CallAsFunction()",
+             return Local<v8::Value>());
+  LOG_API(isolate, "Object::CallAsFunction");
+  ENTER_V8(isolate);
+  i::HandleScope scope(isolate);
+  i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+  i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
+  STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
+  i::Object*** args = reinterpret_cast<i::Object***>(argv);
+  i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>();
+  if (obj->IsJSFunction()) {
+    fun = i::Handle<i::JSFunction>::cast(obj);
+  } else {
+    EXCEPTION_PREAMBLE(isolate);
+    i::Handle<i::Object> delegate =
+        i::Execution::TryGetFunctionDelegate(obj, &has_pending_exception);
+    EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
+    fun = i::Handle<i::JSFunction>::cast(delegate);
+    recv_obj = obj;
+  }
+  EXCEPTION_PREAMBLE(isolate);
+  i::Handle<i::Object> returned =
+      i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
+  EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
+  return Utils::ToLocal(scope.CloseAndEscape(returned));
+}
+
+
+Local<v8::Value> Object::CallAsConstructor(int argc,
+                                           v8::Handle<v8::Value> argv[]) {
+  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+  ON_BAILOUT(isolate, "v8::Object::CallAsConstructor()",
+             return Local<v8::Object>());
+  LOG_API(isolate, "Object::CallAsConstructor");
+  ENTER_V8(isolate);
+  i::HandleScope scope(isolate);
+  i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+  STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
+  i::Object*** args = reinterpret_cast<i::Object***>(argv);
+  if (obj->IsJSFunction()) {
+    i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(obj);
+    EXCEPTION_PREAMBLE(isolate);
+    i::Handle<i::Object> returned =
+        i::Execution::New(fun, argc, args, &has_pending_exception);
+    EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
+    return Utils::ToLocal(scope.CloseAndEscape(
+        i::Handle<i::JSObject>::cast(returned)));
+  }
+  EXCEPTION_PREAMBLE(isolate);
+  i::Handle<i::Object> delegate =
+      i::Execution::TryGetConstructorDelegate(obj, &has_pending_exception);
+  EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
+  if (!delegate->IsUndefined()) {
+    i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(delegate);
+    EXCEPTION_PREAMBLE(isolate);
+    i::Handle<i::Object> returned =
+        i::Execution::Call(fun, obj, argc, args, &has_pending_exception);
+    EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
+    ASSERT(!delegate->IsUndefined());
+    return Utils::ToLocal(scope.CloseAndEscape(returned));
+  }
+  return Local<v8::Object>();
+}
+
+
 Local<v8::Object> Function::NewInstance() const {
   return NewInstance(0, NULL);
 }
@@ -3570,11 +3729,11 @@
     Utils::OpenHandle(this)->SetInternalField(index, EncodeAsSmi(value));
   } else {
     HandleScope scope;
-    i::Handle<i::Proxy> proxy =
-        isolate->factory()->NewProxy(
+    i::Handle<i::Foreign> foreign =
+        isolate->factory()->NewForeign(
             reinterpret_cast<i::Address>(value), i::TENURED);
-    if (!proxy.is_null())
-        Utils::OpenHandle(this)->SetInternalField(index, *proxy);
+    if (!foreign.is_null())
+        Utils::OpenHandle(this)->SetInternalField(index, *foreign);
   }
   ASSERT_EQ(value, GetPointerFromInternalField(index));
 }
@@ -3707,6 +3866,7 @@
 
     // Create the environment.
     env = isolate->bootstrapper()->CreateEnvironment(
+        isolate,
         Utils::OpenHandle(*global_object),
         proxy_template,
         extensions);
@@ -3779,7 +3939,7 @@
 
 v8::Local<v8::Context> Context::GetEntered() {
   i::Isolate* isolate = i::Isolate::Current();
-  if (!EnsureInitializedForIsolate(isolate, "v8::Context::GetEntered()")) {
+  if (IsDeadCheck(isolate, "v8::Context::GetEntered()")) {
     return Local<Context>();
   }
   i::Handle<i::Object> last =
@@ -3851,6 +4011,20 @@
 }
 
 
+void Context::AllowCodeGenerationFromStrings(bool allow) {
+  i::Isolate* isolate = i::Isolate::Current();
+  if (IsDeadCheck(isolate, "v8::Context::AllowCodeGenerationFromStrings()")) {
+    return;
+  }
+  ENTER_V8(isolate);
+  i::Object** ctx = reinterpret_cast<i::Object**>(this);
+  i::Handle<i::Context> context =
+      i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
+  context->set_allow_code_gen_from_strings(
+      allow ? isolate->heap()->true_value() : isolate->heap()->false_value());
+}
+
+
 void V8::SetWrapperClassId(i::Object** global_handle, uint16_t class_id) {
   i::GlobalHandles::SetWrapperClassId(global_handle, class_id);
 }
@@ -3895,19 +4069,19 @@
 
 
 static Local<External> ExternalNewImpl(void* data) {
-  return Utils::ToLocal(FACTORY->NewProxy(static_cast<i::Address>(data)));
+  return Utils::ToLocal(FACTORY->NewForeign(static_cast<i::Address>(data)));
 }
 
 static void* ExternalValueImpl(i::Handle<i::Object> obj) {
-  return reinterpret_cast<void*>(i::Proxy::cast(*obj)->proxy());
+  return reinterpret_cast<void*>(i::Foreign::cast(*obj)->address());
 }
 
 
 Local<Value> v8::External::Wrap(void* data) {
   i::Isolate* isolate = i::Isolate::Current();
   STATIC_ASSERT(sizeof(data) == sizeof(i::Address));
-  EnsureInitializedForIsolate(isolate, "v8::External::Wrap()");
   LOG_API(isolate, "External::Wrap");
+  EnsureInitializedForIsolate(isolate, "v8::External::Wrap()");
   ENTER_V8(isolate);
 
   v8::Local<v8::Value> result = CanBeEncodedAsSmi(data)
@@ -3924,8 +4098,8 @@
   i::Object* value = obj->GetInternalField(index);
   if (value->IsSmi()) {
     return i::Internals::GetExternalPointerFromSmi(value);
-  } else if (value->IsProxy()) {
-    return reinterpret_cast<void*>(i::Proxy::cast(value)->proxy());
+  } else if (value->IsForeign()) {
+    return reinterpret_cast<void*>(i::Foreign::cast(value)->address());
   } else {
     return NULL;
   }
@@ -3938,7 +4112,7 @@
   void* result;
   if (obj->IsSmi()) {
     result = i::Internals::GetExternalPointerFromSmi(*obj);
-  } else if (obj->IsProxy()) {
+  } else if (obj->IsForeign()) {
     result = ExternalValueImpl(obj);
   } else {
     result = NULL;
@@ -3951,8 +4125,8 @@
 Local<External> v8::External::New(void* data) {
   STATIC_ASSERT(sizeof(data) == sizeof(i::Address));
   i::Isolate* isolate = i::Isolate::Current();
-  EnsureInitializedForIsolate(isolate, "v8::External::New()");
   LOG_API(isolate, "External::New");
+  EnsureInitializedForIsolate(isolate, "v8::External::New()");
   ENTER_V8(isolate);
   return ExternalNewImpl(data);
 }
@@ -4371,7 +4545,8 @@
 
 
 void V8::IgnoreOutOfMemoryException() {
-  EnterIsolateIfNeeded()->set_ignore_out_of_memory(true);
+  EnterIsolateIfNeeded()->handle_scope_implementer()->set_ignore_out_of_memory(
+      true);
 }
 
 
@@ -4383,7 +4558,7 @@
   i::HandleScope scope(isolate);
   NeanderArray listeners(isolate->factory()->message_listeners());
   NeanderObject obj(2);
-  obj.set(0, *isolate->factory()->NewProxy(FUNCTION_ADDR(that)));
+  obj.set(0, *isolate->factory()->NewForeign(FUNCTION_ADDR(that)));
   obj.set(1, data.IsEmpty() ?
              isolate->heap()->undefined_value() :
              *Utils::OpenHandle(*data));
@@ -4403,8 +4578,8 @@
     if (listeners.get(i)->IsUndefined()) continue;  // skip deleted ones
 
     NeanderObject listener(i::JSObject::cast(listeners.get(i)));
-    i::Handle<i::Proxy> callback_obj(i::Proxy::cast(listener.get(0)));
-    if (callback_obj->proxy() == FUNCTION_ADDR(that)) {
+    i::Handle<i::Foreign> callback_obj(i::Foreign::cast(listener.get(0)));
+    if (callback_obj->address() == FUNCTION_ADDR(that)) {
       listeners.set(i, isolate->heap()->undefined_value());
     }
   }
@@ -4695,26 +4870,30 @@
 }
 
 
-String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj) {
+void Isolate::SetData(void* data) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  isolate->SetData(data);
+}
+
+void* Isolate::GetData() {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  return isolate->GetData();
+}
+
+
+String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj)
+    : str_(NULL), length_(0) {
   i::Isolate* isolate = i::Isolate::Current();
   if (IsDeadCheck(isolate, "v8::String::Utf8Value::Utf8Value()")) return;
-  if (obj.IsEmpty()) {
-    str_ = NULL;
-    length_ = 0;
-    return;
-  }
+  if (obj.IsEmpty()) return;
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   TryCatch try_catch;
   Handle<String> str = obj->ToString();
-  if (str.IsEmpty()) {
-    str_ = NULL;
-    length_ = 0;
-  } else {
-    length_ = str->Utf8Length();
-    str_ = i::NewArray<char>(length_ + 1);
-    str->WriteUtf8(str_);
-  }
+  if (str.IsEmpty()) return;
+  length_ = str->Utf8Length();
+  str_ = i::NewArray<char>(length_ + 1);
+  str->WriteUtf8(str_);
 }
 
 
@@ -4723,26 +4902,19 @@
 }
 
 
-String::AsciiValue::AsciiValue(v8::Handle<v8::Value> obj) {
+String::AsciiValue::AsciiValue(v8::Handle<v8::Value> obj)
+    : str_(NULL), length_(0) {
   i::Isolate* isolate = i::Isolate::Current();
   if (IsDeadCheck(isolate, "v8::String::AsciiValue::AsciiValue()")) return;
-  if (obj.IsEmpty()) {
-    str_ = NULL;
-    length_ = 0;
-    return;
-  }
+  if (obj.IsEmpty()) return;
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   TryCatch try_catch;
   Handle<String> str = obj->ToString();
-  if (str.IsEmpty()) {
-    str_ = NULL;
-    length_ = 0;
-  } else {
-    length_ = str->Length();
-    str_ = i::NewArray<char>(length_ + 1);
-    str->WriteAscii(str_);
-  }
+  if (str.IsEmpty()) return;
+  length_ = str->Length();
+  str_ = i::NewArray<char>(length_ + 1);
+  str->WriteAscii(str_);
 }
 
 
@@ -4751,26 +4923,19 @@
 }
 
 
-String::Value::Value(v8::Handle<v8::Value> obj) {
+String::Value::Value(v8::Handle<v8::Value> obj)
+    : str_(NULL), length_(0) {
   i::Isolate* isolate = i::Isolate::Current();
   if (IsDeadCheck(isolate, "v8::String::Value::Value()")) return;
-  if (obj.IsEmpty()) {
-    str_ = NULL;
-    length_ = 0;
-    return;
-  }
+  if (obj.IsEmpty()) return;
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   TryCatch try_catch;
   Handle<String> str = obj->ToString();
-  if (str.IsEmpty()) {
-    str_ = NULL;
-    length_ = 0;
-  } else {
-    length_ = str->Length();
-    str_ = i::NewArray<uint16_t>(length_ + 1);
-    str->Write(str_);
-  }
+  if (str.IsEmpty()) return;
+  length_ = str->Length();
+  str_ = i::NewArray<uint16_t>(length_ + 1);
+  str->Write(str_);
 }
 
 
@@ -4884,11 +5049,12 @@
   isolate->set_debug_event_callback(that);
 
   i::HandleScope scope(isolate);
-  i::Handle<i::Object> proxy = isolate->factory()->undefined_value();
+  i::Handle<i::Object> foreign = isolate->factory()->undefined_value();
   if (that != NULL) {
-    proxy = isolate->factory()->NewProxy(FUNCTION_ADDR(EventCallbackWrapper));
+    foreign =
+        isolate->factory()->NewForeign(FUNCTION_ADDR(EventCallbackWrapper));
   }
-  isolate->debugger()->SetEventListener(proxy, Utils::OpenHandle(*data));
+  isolate->debugger()->SetEventListener(foreign, Utils::OpenHandle(*data));
   return true;
 }
 
@@ -4899,12 +5065,11 @@
   ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener2()", return false);
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
-  i::Handle<i::Object> proxy = isolate->factory()->undefined_value();
+  i::Handle<i::Object> foreign = isolate->factory()->undefined_value();
   if (that != NULL) {
-    proxy = isolate->factory()->NewProxy(FUNCTION_ADDR(that));
+    foreign = isolate->factory()->NewForeign(FUNCTION_ADDR(that));
   }
-  isolate->debugger()->SetEventListener(proxy,
-                                                      Utils::OpenHandle(*data));
+  isolate->debugger()->SetEventListener(foreign, Utils::OpenHandle(*data));
   return true;
 }
 
@@ -5619,9 +5784,8 @@
 
 
 char* HandleScopeImplementer::ArchiveThread(char* storage) {
-  Isolate* isolate = Isolate::Current();
   v8::ImplementationUtilities::HandleScopeData* current =
-      isolate->handle_scope_data();
+      isolate_->handle_scope_data();
   handle_scope_data_ = *current;
   memcpy(storage, this, sizeof(*this));
 
@@ -5639,7 +5803,7 @@
 
 char* HandleScopeImplementer::RestoreThread(char* storage) {
   memcpy(this, storage, sizeof(*this));
-  *Isolate::Current()->handle_scope_data() = handle_scope_data_;
+  *isolate_->handle_scope_data() = handle_scope_data_;
   return storage + ArchiveSpacePerThread();
 }
 
@@ -5665,7 +5829,7 @@
 
 void HandleScopeImplementer::Iterate(ObjectVisitor* v) {
   v8::ImplementationUtilities::HandleScopeData* current =
-      Isolate::Current()->handle_scope_data();
+      isolate_->handle_scope_data();
   handle_scope_data_ = *current;
   IterateThis(v);
 }
diff --git a/src/api.h b/src/api.h
index d38a1d5..5846414 100644
--- a/src/api.h
+++ b/src/api.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -115,14 +115,14 @@
 template <typename T> static inline T ToCData(v8::internal::Object* obj) {
   STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
   return reinterpret_cast<T>(
-      reinterpret_cast<intptr_t>(v8::internal::Proxy::cast(obj)->proxy()));
+      reinterpret_cast<intptr_t>(v8::internal::Foreign::cast(obj)->address()));
 }
 
 
 template <typename T>
 static inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
   STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
-  return FACTORY->NewProxy(
+  return FACTORY->NewForeign(
       reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(obj)));
 }
 
@@ -182,7 +182,7 @@
   static inline Local<Array> ToLocal(
       v8::internal::Handle<v8::internal::JSArray> obj);
   static inline Local<External> ToLocal(
-      v8::internal::Handle<v8::internal::Proxy> obj);
+      v8::internal::Handle<v8::internal::Foreign> obj);
   static inline Local<Message> MessageToLocal(
       v8::internal::Handle<v8::internal::Object> obj);
   static inline Local<StackTrace> StackTraceToLocal(
@@ -236,7 +236,7 @@
       OpenHandle(const v8::Signature* sig);
   static inline v8::internal::Handle<v8::internal::TypeSwitchInfo>
       OpenHandle(const v8::TypeSwitch* that);
-  static inline v8::internal::Handle<v8::internal::Proxy>
+  static inline v8::internal::Handle<v8::internal::Foreign>
       OpenHandle(const v8::External* that);
 };
 
@@ -273,7 +273,7 @@
 MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
 MAKE_TO_LOCAL(ToLocal, JSObject, Object)
 MAKE_TO_LOCAL(ToLocal, JSArray, Array)
-MAKE_TO_LOCAL(ToLocal, Proxy, External)
+MAKE_TO_LOCAL(ToLocal, Foreign, External)
 MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
 MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
 MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)
@@ -311,7 +311,7 @@
 MAKE_OPEN_HANDLE(Function, JSFunction)
 MAKE_OPEN_HANDLE(Message, JSObject)
 MAKE_OPEN_HANDLE(Context, Context)
-MAKE_OPEN_HANDLE(External, Proxy)
+MAKE_OPEN_HANDLE(External, Foreign)
 MAKE_OPEN_HANDLE(StackTrace, JSArray)
 MAKE_OPEN_HANDLE(StackFrame, JSObject)
 
@@ -396,14 +396,16 @@
 // data. In multithreaded V8 programs this data is copied in and out of storage
 // so that the currently executing thread always has its own copy of this
 // data.
-ISOLATED_CLASS HandleScopeImplementer {
+class HandleScopeImplementer {
  public:
 
-  HandleScopeImplementer()
-      : blocks_(0),
+  explicit HandleScopeImplementer(Isolate* isolate)
+      : isolate_(isolate),
+        blocks_(0),
         entered_contexts_(0),
         saved_contexts_(0),
         spare_(NULL),
+        ignore_out_of_memory_(false),
         call_depth_(0) { }
 
   // Threading support for handle data.
@@ -436,6 +438,10 @@
   inline bool HasSavedContexts();
 
   inline List<internal::Object**>* blocks() { return &blocks_; }
+  inline bool ignore_out_of_memory() { return ignore_out_of_memory_; }
+  inline void set_ignore_out_of_memory(bool value) {
+    ignore_out_of_memory_ = value;
+  }
 
  private:
   void ResetAfterArchive() {
@@ -443,6 +449,7 @@
     entered_contexts_.Initialize(0);
     saved_contexts_.Initialize(0);
     spare_ = NULL;
+    ignore_out_of_memory_ = false;
     call_depth_ = 0;
   }
 
@@ -460,12 +467,14 @@
     ASSERT(call_depth_ == 0);
   }
 
+  Isolate* isolate_;
   List<internal::Object**> blocks_;
   // Used as a stack to keep track of entered contexts.
   List<Handle<Object> > entered_contexts_;
   // Used as a stack to keep track of saved contexts.
   List<Context*> saved_contexts_;
   Object** spare_;
+  bool ignore_out_of_memory_;
   int call_depth_;
   // This is only used for threading support.
   v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
diff --git a/src/arguments.h b/src/arguments.h
index a7a30e2..a080581 100644
--- a/src/arguments.h
+++ b/src/arguments.h
@@ -28,6 +28,8 @@
 #ifndef V8_ARGUMENTS_H_
 #define V8_ARGUMENTS_H_
 
+#include "allocation.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index fd8e8b5..c7050a7 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -51,24 +51,30 @@
 unsigned CpuFeatures::found_by_runtime_probing_ = 0;
 
 
-#ifdef __arm__
+// Get the CPU features enabled by the build. For cross compilation the
+// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP_INSTRUCTIONS
+// can be defined to enable ARMv7 and VFPv3 instructions when building the
+// snapshot.
 static uint64_t CpuFeaturesImpliedByCompiler() {
   uint64_t answer = 0;
 #ifdef CAN_USE_ARMV7_INSTRUCTIONS
   answer |= 1u << ARMv7;
 #endif  // def CAN_USE_ARMV7_INSTRUCTIONS
+#ifdef CAN_USE_VFP_INSTRUCTIONS
+  answer |= 1u << VFP3 | 1u << ARMv7;
+#endif  // def CAN_USE_VFP_INSTRUCTIONS
+
+#ifdef __arm__
   // If the compiler is allowed to use VFP then we can use VFP too in our code
   // generation even when generating snapshots.  This won't work for cross
   // compilation. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
 #if defined(__VFP_FP__) && !defined(__SOFTFP__)
   answer |= 1u << VFP3 | 1u << ARMv7;
 #endif  // defined(__VFP_FP__) && !defined(__SOFTFP__)
-#ifdef CAN_USE_VFP_INSTRUCTIONS
-  answer |= 1u << VFP3 | 1u << ARMv7;
-#endif  // def CAN_USE_VFP_INSTRUCTIONS
+#endif  // def __arm__
+
   return answer;
 }
-#endif  // def __arm__
 
 
 void CpuFeatures::Probe() {
@@ -76,6 +82,18 @@
 #ifdef DEBUG
   initialized_ = true;
 #endif
+
+  // Get the features implied by the OS and the compiler settings. This is the
+  // minimal set of features which is also alowed for generated code in the
+  // snapshot.
+  supported_ |= OS::CpuFeaturesImpliedByPlatform();
+  supported_ |= CpuFeaturesImpliedByCompiler();
+
+  if (Serializer::enabled()) {
+    // No probing for features if we might serialize (generate snapshot).
+    return;
+  }
+
 #ifndef __arm__
   // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is
   // enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
@@ -87,13 +105,8 @@
     supported_ |= 1u << ARMv7;
   }
 #else  // def __arm__
-  if (Serializer::enabled()) {
-    supported_ |= OS::CpuFeaturesImpliedByPlatform();
-    supported_ |= CpuFeaturesImpliedByCompiler();
-    return;  // No features if we might serialize.
-  }
-
-  if (OS::ArmCpuHasFeature(VFP3)) {
+  // Probe for additional features not already known to be available.
+  if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) {
     // This implementation also sets the VFP flags if runtime
     // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
     // 0406B, page A1-6.
@@ -101,7 +114,7 @@
     found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7;
   }
 
-  if (OS::ArmCpuHasFeature(ARMv7)) {
+  if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) {
     supported_ |= 1u << ARMv7;
     found_by_runtime_probing_ |= 1u << ARMv7;
   }
@@ -276,9 +289,7 @@
 Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
     : AssemblerBase(arg_isolate),
       positions_recorder_(this),
-      allow_peephole_optimization_(false),
       emit_debug_code_(FLAG_debug_code) {
-  allow_peephole_optimization_ = FLAG_peephole_optimization;
   if (buffer == NULL) {
     // Do our own buffer management.
     if (buffer_size <= kMinimalBufferSize) {
@@ -315,6 +326,7 @@
   no_const_pool_before_ = 0;
   last_const_pool_end_ = 0;
   last_bound_pos_ = 0;
+  ast_id_for_reloc_info_ = kNoASTId;
 }
 
 
@@ -1082,20 +1094,6 @@
 void Assembler::add(Register dst, Register src1, const Operand& src2,
                     SBit s, Condition cond) {
   addrmod1(cond | ADD | s, src1, dst, src2);
-
-  // Eliminate pattern: push(r), pop()
-  //   str(src, MemOperand(sp, 4, NegPreIndex), al);
-  //   add(sp, sp, Operand(kPointerSize));
-  // Both instructions can be eliminated.
-  if (can_peephole_optimize(2) &&
-      // Pattern.
-      instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
-      (instr_at(pc_ - 2 * kInstrSize) & ~kRdMask) == kPushRegPattern) {
-    pc_ -= 2 * kInstrSize;
-    if (FLAG_print_peephole_optimization) {
-      PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
-    }
-  }
 }
 
 
@@ -1400,195 +1398,11 @@
     positions_recorder()->WriteRecordedPositions();
   }
   addrmod2(cond | B26 | L, dst, src);
-
-  // Eliminate pattern: push(ry), pop(rx)
-  //   str(ry, MemOperand(sp, 4, NegPreIndex), al)
-  //   ldr(rx, MemOperand(sp, 4, PostIndex), al)
-  // Both instructions can be eliminated if ry = rx.
-  // If ry != rx, a register copy from ry to rx is inserted
-  // after eliminating the push and the pop instructions.
-  if (can_peephole_optimize(2)) {
-    Instr push_instr = instr_at(pc_ - 2 * kInstrSize);
-    Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
-
-    if (IsPush(push_instr) && IsPop(pop_instr)) {
-      if (Instruction::RdValue(pop_instr) != Instruction::RdValue(push_instr)) {
-        // For consecutive push and pop on different registers,
-        // we delete both the push & pop and insert a register move.
-        // push ry, pop rx --> mov rx, ry
-        Register reg_pushed, reg_popped;
-        reg_pushed = GetRd(push_instr);
-        reg_popped = GetRd(pop_instr);
-        pc_ -= 2 * kInstrSize;
-        // Insert a mov instruction, which is better than a pair of push & pop
-        mov(reg_popped, reg_pushed);
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%x push/pop (diff reg) replaced by a reg move\n",
-                 pc_offset());
-        }
-      } else {
-        // For consecutive push and pop on the same register,
-        // both the push and the pop can be deleted.
-        pc_ -= 2 * kInstrSize;
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
-        }
-      }
-    }
-  }
-
-  if (can_peephole_optimize(2)) {
-    Instr str_instr = instr_at(pc_ - 2 * kInstrSize);
-    Instr ldr_instr = instr_at(pc_ - 1 * kInstrSize);
-
-    if ((IsStrRegFpOffset(str_instr) &&
-         IsLdrRegFpOffset(ldr_instr)) ||
-       (IsStrRegFpNegOffset(str_instr) &&
-         IsLdrRegFpNegOffset(ldr_instr))) {
-      if ((ldr_instr & kLdrStrInstrArgumentMask) ==
-            (str_instr & kLdrStrInstrArgumentMask)) {
-        // Pattern: Ldr/str same fp+offset, same register.
-        //
-        // The following:
-        // str rx, [fp, #-12]
-        // ldr rx, [fp, #-12]
-        //
-        // Becomes:
-        // str rx, [fp, #-12]
-
-        pc_ -= 1 * kInstrSize;
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%x str/ldr (fp + same offset), same reg\n", pc_offset());
-        }
-      } else if ((ldr_instr & kLdrStrOffsetMask) ==
-                 (str_instr & kLdrStrOffsetMask)) {
-        // Pattern: Ldr/str same fp+offset, different register.
-        //
-        // The following:
-        // str rx, [fp, #-12]
-        // ldr ry, [fp, #-12]
-        //
-        // Becomes:
-        // str rx, [fp, #-12]
-        // mov ry, rx
-
-        Register reg_stored, reg_loaded;
-        reg_stored = GetRd(str_instr);
-        reg_loaded = GetRd(ldr_instr);
-        pc_ -= 1 * kInstrSize;
-        // Insert a mov instruction, which is better than ldr.
-        mov(reg_loaded, reg_stored);
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%x str/ldr (fp + same offset), diff reg \n", pc_offset());
-        }
-      }
-    }
-  }
-
-  if (can_peephole_optimize(3)) {
-    Instr mem_write_instr = instr_at(pc_ - 3 * kInstrSize);
-    Instr ldr_instr = instr_at(pc_ - 2 * kInstrSize);
-    Instr mem_read_instr = instr_at(pc_ - 1 * kInstrSize);
-    if (IsPush(mem_write_instr) &&
-        IsPop(mem_read_instr)) {
-      if ((IsLdrRegFpOffset(ldr_instr) ||
-        IsLdrRegFpNegOffset(ldr_instr))) {
-        if (Instruction::RdValue(mem_write_instr) ==
-                                  Instruction::RdValue(mem_read_instr)) {
-          // Pattern: push & pop from/to same register,
-          // with a fp+offset ldr in between
-          //
-          // The following:
-          // str rx, [sp, #-4]!
-          // ldr rz, [fp, #-24]
-          // ldr rx, [sp], #+4
-          //
-          // Becomes:
-          // if(rx == rz)
-          //   delete all
-          // else
-          //   ldr rz, [fp, #-24]
-
-          if (Instruction::RdValue(mem_write_instr) ==
-              Instruction::RdValue(ldr_instr)) {
-            pc_ -= 3 * kInstrSize;
-          } else {
-            pc_ -= 3 * kInstrSize;
-            // Reinsert back the ldr rz.
-            emit(ldr_instr);
-          }
-          if (FLAG_print_peephole_optimization) {
-            PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset());
-          }
-        } else {
-          // Pattern: push & pop from/to different registers
-          // with a fp+offset ldr in between
-          //
-          // The following:
-          // str rx, [sp, #-4]!
-          // ldr rz, [fp, #-24]
-          // ldr ry, [sp], #+4
-          //
-          // Becomes:
-          // if(ry == rz)
-          //   mov ry, rx;
-          // else if(rx != rz)
-          //   ldr rz, [fp, #-24]
-          //   mov ry, rx
-          // else if((ry != rz) || (rx == rz)) becomes:
-          //   mov ry, rx
-          //   ldr rz, [fp, #-24]
-
-          Register reg_pushed, reg_popped;
-          if (Instruction::RdValue(mem_read_instr) ==
-              Instruction::RdValue(ldr_instr)) {
-            reg_pushed = GetRd(mem_write_instr);
-            reg_popped = GetRd(mem_read_instr);
-            pc_ -= 3 * kInstrSize;
-            mov(reg_popped, reg_pushed);
-          } else if (Instruction::RdValue(mem_write_instr) !=
-                     Instruction::RdValue(ldr_instr)) {
-            reg_pushed = GetRd(mem_write_instr);
-            reg_popped = GetRd(mem_read_instr);
-            pc_ -= 3 * kInstrSize;
-            emit(ldr_instr);
-            mov(reg_popped, reg_pushed);
-          } else if ((Instruction::RdValue(mem_read_instr) !=
-                      Instruction::RdValue(ldr_instr)) ||
-                     (Instruction::RdValue(mem_write_instr) ==
-                      Instruction::RdValue(ldr_instr))) {
-            reg_pushed = GetRd(mem_write_instr);
-            reg_popped = GetRd(mem_read_instr);
-            pc_ -= 3 * kInstrSize;
-            mov(reg_popped, reg_pushed);
-            emit(ldr_instr);
-          }
-          if (FLAG_print_peephole_optimization) {
-            PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset());
-          }
-        }
-      }
-    }
-  }
 }
 
 
 void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
   addrmod2(cond | B26, src, dst);
-
-  // Eliminate pattern: pop(), push(r)
-  //     add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
-  // ->  str r, [sp, 0], al
-  if (can_peephole_optimize(2) &&
-     // Pattern.
-     instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
-     instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
-    pc_ -= 2 * kInstrSize;
-    emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
-    if (FLAG_print_peephole_optimization) {
-      PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
-    }
-  }
 }
 
 
@@ -2722,7 +2536,14 @@
       }
     }
     ASSERT(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
-    reloc_info_writer.Write(&rinfo);
+    if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
+      ASSERT(ast_id_for_reloc_info_ != kNoASTId);
+      RelocInfo reloc_info_with_ast_id(pc_, rmode, ast_id_for_reloc_info_);
+      ast_id_for_reloc_info_ = kNoASTId;
+      reloc_info_writer.Write(&reloc_info_with_ast_id);
+    } else {
+      reloc_info_writer.Write(&rinfo);
+    }
   }
 }
 
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 3f2daab..2ab46b3 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -72,6 +72,7 @@
 struct Register {
   static const int kNumRegisters = 16;
   static const int kNumAllocatableRegisters = 8;
+  static const int kSizeInBytes = 4;
 
   static int ToAllocationIndex(Register reg) {
     ASSERT(reg.code() < kNumAllocatableRegisters);
@@ -1170,6 +1171,10 @@
   // Mark address of a debug break slot.
   void RecordDebugBreakSlot();
 
+  // Record the AST id of the CallIC being compiled, so that it can be placed
+  // in the relocation information.
+  void RecordAstId(unsigned ast_id) { ast_id_for_reloc_info_ = ast_id; }
+
   // Record a comment relocation entry that can be used by a disassembler.
   // Use --code-comments to enable.
   void RecordComment(const char* msg);
@@ -1185,12 +1190,6 @@
 
   PositionsRecorder* positions_recorder() { return &positions_recorder_; }
 
-  bool can_peephole_optimize(int instructions) {
-    if (!allow_peephole_optimization_) return false;
-    if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false;
-    return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize;
-  }
-
   // Read/patch instructions
   static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
   static void instr_at_put(byte* pc, Instr instr) {
@@ -1223,10 +1222,25 @@
   static int GetCmpImmediateRawImmediate(Instr instr);
   static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
 
+  // Buffer size and constant pool distance are checked together at regular
+  // intervals of kBufferCheckInterval emitted bytes
+  static const int kBufferCheckInterval = 1*KB/2;
+  // Constants in pools are accessed via pc relative addressing, which can
+  // reach +/-4KB thereby defining a maximum distance between the instruction
+  // and the accessed constant. We satisfy this constraint by limiting the
+  // distance between pools.
+  static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval;
+  static const int kMaxNumPRInfo = kMaxDistBetweenPools/kInstrSize;
+
   // Check if is time to emit a constant pool for pending reloc info entries
   void CheckConstPool(bool force_emit, bool require_jump);
 
  protected:
+  // Relocation for a type-recording IC has the AST id added to it.  This
+  // member variable is a way to pass the information from the call site to
+  // the relocation info.
+  unsigned ast_id_for_reloc_info_;
+
   bool emit_debug_code() const { return emit_debug_code_; }
 
   int buffer_space() const { return reloc_info_writer.pos() - pc_; }
@@ -1264,9 +1278,6 @@
   // True if the assembler owns the buffer, false if buffer is external.
   bool own_buffer_;
 
-  // Buffer size and constant pool distance are checked together at regular
-  // intervals of kBufferCheckInterval emitted bytes
-  static const int kBufferCheckInterval = 1*KB/2;
   int next_buffer_check_;  // pc offset of next buffer check
 
   // Code generation
@@ -1299,12 +1310,6 @@
   // regular intervals of kDistBetweenPools bytes
   static const int kDistBetweenPools = 1*KB;
 
-  // Constants in pools are accessed via pc relative addressing, which can
-  // reach +/-4KB thereby defining a maximum distance between the instruction
-  // and the accessed constant. We satisfy this constraint by limiting the
-  // distance between pools.
-  static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval;
-
   // Emission of the constant pool may be blocked in some code sequences.
   int const_pool_blocked_nesting_;  // Block emission if this is not zero.
   int no_const_pool_before_;  // Block emission before this pc offset.
@@ -1322,7 +1327,6 @@
   // stored in a separate buffer until a constant pool is emitted.
   // If every instruction in a long sequence is accessing the pool, we need one
   // pending relocation entry per instruction.
-  static const int kMaxNumPRInfo = kMaxDistBetweenPools/kInstrSize;
   RelocInfo prinfo_[kMaxNumPRInfo];  // the buffer of pending relocation info
   int num_prinfo_;  // number of pending reloc info entries in the buffer
 
@@ -1356,7 +1360,6 @@
   friend class BlockConstPoolScope;
 
   PositionsRecorder positions_recorder_;
-  bool allow_peephole_optimization_;
   bool emit_debug_code_;
   friend class PositionsRecorder;
   friend class EnsureSpace;
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 5235dd3..794b370 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -584,7 +584,7 @@
   __ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
   __ EnterInternalFrame();
   __ push(r0);
-  __ InvokeBuiltin(Builtins::TO_STRING, CALL_JS);
+  __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
   __ LeaveInternalFrame();
   __ pop(function);
   __ mov(argument, r0);
@@ -636,6 +636,7 @@
   // Set expected number of arguments to zero (not changing r0).
   __ mov(r2, Operand(0, RelocInfo::NONE));
   __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ SetCallKind(r5, CALL_AS_METHOD);
   __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
           RelocInfo::CODE_TARGET);
 }
@@ -914,10 +915,11 @@
         masm->isolate()->builtins()->HandleApiCallConstruct();
     ParameterCount expected(0);
     __ InvokeCode(code, expected, expected,
-                  RelocInfo::CODE_TARGET, CALL_FUNCTION);
+                  RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
   } else {
     ParameterCount actual(r0);
-    __ InvokeFunction(r1, actual, CALL_FUNCTION);
+    __ InvokeFunction(r1, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
   }
 
   // Pop the function from the stack.
@@ -1049,7 +1051,8 @@
             RelocInfo::CODE_TARGET);
   } else {
     ParameterCount actual(r0);
-    __ InvokeFunction(r1, actual, CALL_FUNCTION);
+    __ InvokeFunction(r1, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
   }
 
   // Exit the JS frame and remove the parameters (except function), and return.
@@ -1077,12 +1080,17 @@
 
   // Preserve the function.
   __ push(r1);
+  // Push call kind information.
+  __ push(r5);
 
   // Push the function on the stack as the argument to the runtime function.
   __ push(r1);
   __ CallRuntime(Runtime::kLazyCompile, 1);
   // Calculate the entry point.
   __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  // Restore call kind information.
+  __ pop(r5);
   // Restore saved function.
   __ pop(r1);
 
@@ -1100,12 +1108,17 @@
 
   // Preserve the function.
   __ push(r1);
+  // Push call kind information.
+  __ push(r5);
 
   // Push the function on the stack as the argument to the runtime function.
   __ push(r1);
   __ CallRuntime(Runtime::kLazyRecompile, 1);
   // Calculate the entry point.
   __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  // Restore call kind information.
+  __ pop(r5);
   // Restore saved function.
   __ pop(r1);
 
@@ -1238,8 +1251,13 @@
 
     // Do not transform the receiver for strict mode functions.
     __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
-    __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
-    __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+    __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
+    __ tst(r3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+                             kSmiTagSize)));
+    __ b(ne, &shift_arguments);
+
+    // Do not transform the receiver for native (Compilerhints already in r3).
+    __ tst(r3, Operand(1 << (SharedFunctionInfo::kES5Native +
                              kSmiTagSize)));
     __ b(ne, &shift_arguments);
 
@@ -1252,17 +1270,17 @@
     __ tst(r2, Operand(kSmiTagMask));
     __ b(eq, &convert_to_object);
 
-    __ LoadRoot(r3, Heap::kNullValueRootIndex);
-    __ cmp(r2, r3);
-    __ b(eq, &use_global_receiver);
     __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
     __ cmp(r2, r3);
     __ b(eq, &use_global_receiver);
+    __ LoadRoot(r3, Heap::kNullValueRootIndex);
+    __ cmp(r2, r3);
+    __ b(eq, &use_global_receiver);
 
+    STATIC_ASSERT(LAST_JS_OBJECT_TYPE + 1 == LAST_TYPE);
+    STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
     __ CompareObjectType(r2, r3, r3, FIRST_JS_OBJECT_TYPE);
-    __ b(lt, &convert_to_object);
-    __ cmp(r3, Operand(LAST_JS_OBJECT_TYPE));
-    __ b(le, &shift_arguments);
+    __ b(ge, &shift_arguments);
 
     __ bind(&convert_to_object);
     __ EnterInternalFrame();  // In order to preserve argument count.
@@ -1270,7 +1288,7 @@
     __ push(r0);
 
     __ push(r2);
-    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
     __ mov(r2, r0);
 
     __ pop(r0);
@@ -1340,6 +1358,7 @@
     // Expected number of arguments is 0 for CALL_NON_FUNCTION.
     __ mov(r2, Operand(0, RelocInfo::NONE));
     __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
+    __ SetCallKind(r5, CALL_AS_METHOD);
     __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
             RelocInfo::CODE_TARGET);
     __ bind(&function);
@@ -1355,13 +1374,15 @@
          FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
   __ mov(r2, Operand(r2, ASR, kSmiTagSize));
   __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+  __ SetCallKind(r5, CALL_AS_METHOD);
   __ cmp(r2, r0);  // Check formal and actual parameter counts.
   __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
           RelocInfo::CODE_TARGET,
           ne);
 
   ParameterCount expected(0);
-  __ InvokeCode(r3, expected, expected, JUMP_FUNCTION);
+  __ InvokeCode(r3, expected, expected, JUMP_FUNCTION,
+                NullCallWrapper(), CALL_AS_METHOD);
 }
 
 
@@ -1378,7 +1399,7 @@
   __ push(r0);
   __ ldr(r0, MemOperand(fp, kArgsOffset));  // get the args array
   __ push(r0);
-  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_JS);
+  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
 
   // Check the stack for overflow. We are not trying need to catch
   // interruptions (e.g. debug break and preemption) here, so the "real stack
@@ -1396,7 +1417,7 @@
   __ ldr(r1, MemOperand(fp, kFunctionOffset));
   __ push(r1);
   __ push(r0);
-  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_JS);
+  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
   // End of stack check.
 
   // Push current limit and index.
@@ -1416,8 +1437,13 @@
   __ ldr(r0, MemOperand(fp, kRecvOffset));
 
   // Do not transform the receiver for strict mode functions.
-  __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCompilerHintsOffset));
-  __ tst(r1, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+  __ ldr(r2, FieldMemOperand(r1, SharedFunctionInfo::kCompilerHintsOffset));
+  __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+                           kSmiTagSize)));
+  __ b(ne, &push_receiver);
+
+  // Do not transform the receiver for strict mode functions.
+  __ tst(r2, Operand(1 << (SharedFunctionInfo::kES5Native +
                            kSmiTagSize)));
   __ b(ne, &push_receiver);
 
@@ -1433,16 +1459,16 @@
 
   // Check if the receiver is already a JavaScript object.
   // r0: receiver
+  STATIC_ASSERT(LAST_JS_OBJECT_TYPE + 1 == LAST_TYPE);
+  STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
   __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
-  __ b(lt, &call_to_object);
-  __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
-  __ b(le, &push_receiver);
+  __ b(ge, &push_receiver);
 
   // Convert the receiver to a regular object.
   // r0: receiver
   __ bind(&call_to_object);
   __ push(r0);
-  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
+  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
   __ b(&push_receiver);
 
   // Use the current global receiver object as the receiver.
@@ -1492,7 +1518,8 @@
   ParameterCount actual(r0);
   __ mov(r0, Operand(r0, ASR, kSmiTagSize));
   __ ldr(r1, MemOperand(fp, kFunctionOffset));
-  __ InvokeFunction(r1, actual, CALL_FUNCTION);
+  __ InvokeFunction(r1, actual, CALL_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
 
   // Tear down the internal frame and remove function, receiver and args.
   __ LeaveInternalFrame();
@@ -1529,6 +1556,7 @@
   //  -- r1 : function (passed through to callee)
   //  -- r2 : expected number of arguments
   //  -- r3 : code entry to call
+  //  -- r5 : call kind information
   // -----------------------------------
 
   Label invoke, dont_adapt_arguments;
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index fad9339..5e6c0c3 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -55,6 +55,17 @@
                                            Register rhs);
 
 
+// Check if the operand is a heap number.
+static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
+                                   Register scratch1, Register scratch2,
+                                   Label* not_a_heap_number) {
+  __ ldr(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
+  __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
+  __ cmp(scratch1, scratch2);
+  __ b(ne, not_a_heap_number);
+}
+
+
 void ToNumberStub::Generate(MacroAssembler* masm) {
   // The ToNumber stub takes one argument in eax.
   Label check_heap_number, call_builtin;
@@ -63,15 +74,12 @@
   __ Ret();
 
   __ bind(&check_heap_number);
-  __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
-  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
-  __ cmp(r1, ip);
-  __ b(ne, &call_builtin);
+  EmitCheckForHeapNumber(masm, r0, r1, ip, &call_builtin);
   __ Ret();
 
   __ bind(&call_builtin);
   __ push(r0);
-  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_JS);
+  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
 }
 
 
@@ -364,136 +372,6 @@
 }
 
 
-class FloatingPointHelper : public AllStatic {
- public:
-
-  enum Destination {
-    kVFPRegisters,
-    kCoreRegisters
-  };
-
-
-  // Loads smis from r0 and r1 (right and left in binary operations) into
-  // floating point registers. Depending on the destination the values ends up
-  // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
-  // floating point registers VFP3 must be supported. If core registers are
-  // requested when VFP3 is supported d6 and d7 will be scratched.
-  static void LoadSmis(MacroAssembler* masm,
-                       Destination destination,
-                       Register scratch1,
-                       Register scratch2);
-
-  // Loads objects from r0 and r1 (right and left in binary operations) into
-  // floating point registers. Depending on the destination the values ends up
-  // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
-  // floating point registers VFP3 must be supported. If core registers are
-  // requested when VFP3 is supported d6 and d7 will still be scratched. If
-  // either r0 or r1 is not a number (not smi and not heap number object) the
-  // not_number label is jumped to with r0 and r1 intact.
-  static void LoadOperands(MacroAssembler* masm,
-                           FloatingPointHelper::Destination destination,
-                           Register heap_number_map,
-                           Register scratch1,
-                           Register scratch2,
-                           Label* not_number);
-
-  // Convert the smi or heap number in object to an int32 using the rules
-  // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
-  // and brought into the range -2^31 .. +2^31 - 1.
-  static void ConvertNumberToInt32(MacroAssembler* masm,
-                                   Register object,
-                                   Register dst,
-                                   Register heap_number_map,
-                                   Register scratch1,
-                                   Register scratch2,
-                                   Register scratch3,
-                                   DwVfpRegister double_scratch,
-                                   Label* not_int32);
-
-  // Load the number from object into double_dst in the double format.
-  // Control will jump to not_int32 if the value cannot be exactly represented
-  // by a 32-bit integer.
-  // Floating point value in the 32-bit integer range that are not exact integer
-  // won't be loaded.
-  static void LoadNumberAsInt32Double(MacroAssembler* masm,
-                                      Register object,
-                                      Destination destination,
-                                      DwVfpRegister double_dst,
-                                      Register dst1,
-                                      Register dst2,
-                                      Register heap_number_map,
-                                      Register scratch1,
-                                      Register scratch2,
-                                      SwVfpRegister single_scratch,
-                                      Label* not_int32);
-
-  // Loads the number from object into dst as a 32-bit integer.
-  // Control will jump to not_int32 if the object cannot be exactly represented
-  // by a 32-bit integer.
-  // Floating point value in the 32-bit integer range that are not exact integer
-  // won't be converted.
-  // scratch3 is not used when VFP3 is supported.
-  static void LoadNumberAsInt32(MacroAssembler* masm,
-                                Register object,
-                                Register dst,
-                                Register heap_number_map,
-                                Register scratch1,
-                                Register scratch2,
-                                Register scratch3,
-                                DwVfpRegister double_scratch,
-                                Label* not_int32);
-
-  // Generate non VFP3 code to check if a double can be exactly represented by a
-  // 32-bit integer. This does not check for 0 or -0, which need
-  // to be checked for separately.
-  // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
-  // through otherwise.
-  // src1 and src2 will be cloberred.
-  //
-  // Expected input:
-  // - src1: higher (exponent) part of the double value.
-  // - src2: lower (mantissa) part of the double value.
-  // Output status:
-  // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
-  // - src2: contains 1.
-  // - other registers are clobbered.
-  static void DoubleIs32BitInteger(MacroAssembler* masm,
-                                   Register src1,
-                                   Register src2,
-                                   Register dst,
-                                   Register scratch,
-                                   Label* not_int32);
-
-  // Generates code to call a C function to do a double operation using core
-  // registers. (Used when VFP3 is not supported.)
-  // This code never falls through, but returns with a heap number containing
-  // the result in r0.
-  // Register heapnumber_result must be a heap number in which the
-  // result of the operation will be stored.
-  // Requires the following layout on entry:
-  // r0: Left value (least significant part of mantissa).
-  // r1: Left value (sign, exponent, top of mantissa).
-  // r2: Right value (least significant part of mantissa).
-  // r3: Right value (sign, exponent, top of mantissa).
-  static void CallCCodeForDoubleOperation(MacroAssembler* masm,
-                                          Token::Value op,
-                                          Register heap_number_result,
-                                          Register scratch);
-
- private:
-  static void LoadNumber(MacroAssembler* masm,
-                         FloatingPointHelper::Destination destination,
-                         Register object,
-                         DwVfpRegister dst,
-                         Register dst1,
-                         Register dst2,
-                         Register heap_number_map,
-                         Register scratch1,
-                         Register scratch2,
-                         Label* not_number);
-};
-
-
 void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
                                    FloatingPointHelper::Destination destination,
                                    Register scratch1,
@@ -651,6 +529,80 @@
 }
 
 
+void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
+                                             Register int_scratch,
+                                             Destination destination,
+                                             DwVfpRegister double_dst,
+                                             Register dst1,
+                                             Register dst2,
+                                             Register scratch2,
+                                             SwVfpRegister single_scratch) {
+  ASSERT(!int_scratch.is(scratch2));
+  ASSERT(!int_scratch.is(dst1));
+  ASSERT(!int_scratch.is(dst2));
+
+  Label done;
+
+  if (CpuFeatures::IsSupported(VFP3)) {
+    CpuFeatures::Scope scope(VFP3);
+    __ vmov(single_scratch, int_scratch);
+    __ vcvt_f64_s32(double_dst, single_scratch);
+    if (destination == kCoreRegisters) {
+      __ vmov(dst1, dst2, double_dst);
+    }
+  } else {
+    Label fewer_than_20_useful_bits;
+    // Expected output:
+    // |         dst2            |         dst1            |
+    // | s |   exp   |              mantissa               |
+
+    // Check for zero.
+    __ cmp(int_scratch, Operand(0));
+    __ mov(dst2, int_scratch);
+    __ mov(dst1, int_scratch);
+    __ b(eq, &done);
+
+    // Preload the sign of the value.
+    __ and_(dst2, int_scratch, Operand(HeapNumber::kSignMask), SetCC);
+    // Get the absolute value of the object (as an unsigned integer).
+    __ rsb(int_scratch, int_scratch, Operand(0), SetCC, mi);
+
+    // Get mantisssa[51:20].
+
+    // Get the position of the first set bit.
+    __ CountLeadingZeros(dst1, int_scratch, scratch2);
+    __ rsb(dst1, dst1, Operand(31));
+
+    // Set the exponent.
+    __ add(scratch2, dst1, Operand(HeapNumber::kExponentBias));
+    __ Bfi(dst2, scratch2, scratch2,
+        HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+
+    // Clear the first non null bit.
+    __ mov(scratch2, Operand(1));
+    __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst1));
+
+    __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
+    // Get the number of bits to set in the lower part of the mantissa.
+    __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
+    __ b(mi, &fewer_than_20_useful_bits);
+    // Set the higher 20 bits of the mantissa.
+    __ orr(dst2, dst2, Operand(int_scratch, LSR, scratch2));
+    __ rsb(scratch2, scratch2, Operand(32));
+    __ mov(dst1, Operand(int_scratch, LSL, scratch2));
+    __ b(&done);
+
+    __ bind(&fewer_than_20_useful_bits);
+    __ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
+    __ mov(scratch2, Operand(int_scratch, LSL, scratch2));
+    __ orr(dst2, dst2, scratch2);
+    // Set dst1 to 0.
+    __ mov(dst1, Operand(0));
+  }
+  __ bind(&done);
+}
+
+
 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
                                                   Register object,
                                                   Destination destination,
@@ -672,63 +624,8 @@
 
   __ JumpIfNotSmi(object, &obj_is_not_smi);
   __ SmiUntag(scratch1, object);
-  if (CpuFeatures::IsSupported(VFP3)) {
-    CpuFeatures::Scope scope(VFP3);
-    __ vmov(single_scratch, scratch1);
-    __ vcvt_f64_s32(double_dst, single_scratch);
-    if (destination == kCoreRegisters) {
-      __ vmov(dst1, dst2, double_dst);
-    }
-  } else {
-    Label fewer_than_20_useful_bits;
-    // Expected output:
-    // |         dst2            |         dst1            |
-    // | s |   exp   |              mantissa               |
-
-    // Check for zero.
-    __ cmp(scratch1, Operand(0));
-    __ mov(dst2, scratch1);
-    __ mov(dst1, scratch1);
-    __ b(eq, &done);
-
-    // Preload the sign of the value.
-    __ and_(dst2, scratch1, Operand(HeapNumber::kSignMask), SetCC);
-    // Get the absolute value of the object (as an unsigned integer).
-    __ rsb(scratch1, scratch1, Operand(0), SetCC, mi);
-
-    // Get mantisssa[51:20].
-
-    // Get the position of the first set bit.
-    __ CountLeadingZeros(dst1, scratch1, scratch2);
-    __ rsb(dst1, dst1, Operand(31));
-
-    // Set the exponent.
-    __ add(scratch2, dst1, Operand(HeapNumber::kExponentBias));
-    __ Bfi(dst2, scratch2, scratch2,
-        HeapNumber::kExponentShift, HeapNumber::kExponentBits);
-
-    // Clear the first non null bit.
-    __ mov(scratch2, Operand(1));
-    __ bic(scratch1, scratch1, Operand(scratch2, LSL, dst1));
-
-    __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
-    // Get the number of bits to set in the lower part of the mantissa.
-    __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
-    __ b(mi, &fewer_than_20_useful_bits);
-    // Set the higher 20 bits of the mantissa.
-    __ orr(dst2, dst2, Operand(scratch1, LSR, scratch2));
-    __ rsb(scratch2, scratch2, Operand(32));
-    __ mov(dst1, Operand(scratch1, LSL, scratch2));
-    __ b(&done);
-
-    __ bind(&fewer_than_20_useful_bits);
-    __ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
-    __ mov(scratch2, Operand(scratch1, LSL, scratch2));
-    __ orr(dst2, dst2, scratch2);
-    // Set dst1 to 0.
-    __ mov(dst1, Operand(0));
-  }
-
+  ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
+                     scratch2, single_scratch);
   __ b(&done);
 
   __ bind(&obj_is_not_smi);
@@ -943,14 +840,25 @@
   // Push the current return address before the C call. Return will be
   // through pop(pc) below.
   __ push(lr);
-  __ PrepareCallCFunction(4, scratch);  // Two doubles are 4 arguments.
+  __ PrepareCallCFunction(0, 2, scratch);
+  if (masm->use_eabi_hardfloat()) {
+    CpuFeatures::Scope scope(VFP3);
+    __ vmov(d0, r0, r1);
+    __ vmov(d1, r2, r3);
+  }
   // Call C routine that may not cause GC or other trouble.
   __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
-                   4);
+                   0, 2);
   // Store answer in the overwritable heap number. Double returned in
-  // registers r0 and r1.
-  __ Strd(r0, r1, FieldMemOperand(heap_number_result,
-                                  HeapNumber::kValueOffset));
+  // registers r0 and r1 or in d0.
+  if (masm->use_eabi_hardfloat()) {
+    CpuFeatures::Scope scope(VFP3);
+    __ vstr(d0,
+            FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
+  } else {
+    __ Strd(r0, r1, FieldMemOperand(heap_number_result,
+                                    HeapNumber::kValueOffset));
+  }
   // Place heap_number_result in r0 and return to the pushed return address.
   __ mov(r0, Operand(heap_number_result));
   __ pop(pc);
@@ -1292,8 +1200,14 @@
     // Call a native function to do a comparison between two non-NaNs.
     // Call C routine that may not cause GC or other trouble.
     __ push(lr);
-    __ PrepareCallCFunction(4, r5);  // Two doubles count as 4 arguments.
-    __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4);
+    __ PrepareCallCFunction(0, 2, r5);
+    if (masm->use_eabi_hardfloat()) {
+      CpuFeatures::Scope scope(VFP3);
+      __ vmov(d0, r0, r1);
+      __ vmov(d1, r2, r3);
+    }
+    __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
+                     0, 2);
     __ pop(pc);  // Return.
   }
 }
@@ -1457,7 +1371,7 @@
                   scratch1,
                   Heap::kHeapNumberMapRootIndex,
                   not_found,
-                  true);
+                  DONT_DO_SMI_CHECK);
 
       STATIC_ASSERT(8 == kDoubleSize);
       __ add(scratch1,
@@ -1656,13 +1570,22 @@
   __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow);
 
   __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3);
-  StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+  if (cc_ == eq) {
+    StringCompareStub::GenerateFlatAsciiStringEquals(masm,
                                                      lhs_,
                                                      rhs_,
                                                      r2,
                                                      r3,
-                                                     r4,
-                                                     r5);
+                                                     r4);
+  } else {
+    StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+                                                       lhs_,
+                                                       rhs_,
+                                                       r2,
+                                                       r3,
+                                                       r4,
+                                                       r5);
+  }
   // Never falls through to here.
 
   __ bind(&slow);
@@ -1687,7 +1610,7 @@
 
   // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
   // tagged as a small integer.
-  __ InvokeBuiltin(native, JUMP_JS);
+  __ InvokeBuiltin(native, JUMP_FUNCTION);
 }
 
 
@@ -1695,12 +1618,36 @@
 // The stub returns zero for false, and a non-zero value for true.
 void ToBooleanStub::Generate(MacroAssembler* masm) {
   // This stub uses VFP3 instructions.
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  CpuFeatures::Scope scope(VFP3);
 
   Label false_result;
   Label not_heap_number;
   Register scratch = r9.is(tos_) ? r7 : r9;
 
+  // undefined -> false
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(tos_, ip);
+  __ b(eq, &false_result);
+
+  // Boolean -> its value
+  __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+  __ cmp(tos_, ip);
+  __ b(eq, &false_result);
+  __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+  __ cmp(tos_, ip);
+  // "tos_" is a register and contains a non-zero value.  Hence we implicitly
+  // return true if the equal condition is satisfied.
+  __ Ret(eq);
+
+  // Smis: 0 -> false, all other -> true
+  __ tst(tos_, tos_);
+  __ b(eq, &false_result);
+  __ tst(tos_, Operand(kSmiTagMask));
+  // "tos_" is a register and contains a non-zero value.  Hence we implicitly
+  // return true if the not equal condition is satisfied.
+  __ Ret(eq);
+
+  // 'null' -> false
   __ LoadRoot(ip, Heap::kNullValueRootIndex);
   __ cmp(tos_, ip);
   __ b(eq, &false_result);
@@ -1710,9 +1657,7 @@
   __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
   __ cmp(scratch, ip);
   __ b(&not_heap_number, ne);
-
-  __ sub(ip, tos_, Operand(kHeapObjectTag));
-  __ vldr(d1, ip, HeapNumber::kValueOffset);
+  __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
   __ VFPCompareAndSetFlags(d1, 0.0);
   // "tos_" is a register, and contains a non zero value by default.
   // Hence we only need to overwrite "tos_" with zero to return false for
@@ -1723,12 +1668,6 @@
 
   __ bind(&not_heap_number);
 
-  // Check if the value is 'null'.
-  // 'null' => false.
-  __ LoadRoot(ip, Heap::kNullValueRootIndex);
-  __ cmp(tos_, ip);
-  __ b(&false_result, eq);
-
   // It can be an undetectable object.
   // Undetectable => false.
   __ ldr(ip, FieldMemOperand(tos_, HeapObject::kMapOffset));
@@ -1768,15 +1707,310 @@
 }
 
 
-Handle<Code> GetTypeRecordingBinaryOpStub(int key,
-    TRBinaryOpIC::TypeInfo type_info,
-    TRBinaryOpIC::TypeInfo result_type_info) {
-  TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
+Handle<Code> GetUnaryOpStub(int key, UnaryOpIC::TypeInfo type_info) {
+  UnaryOpStub stub(key, type_info);
   return stub.GetCode();
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+const char* UnaryOpStub::GetName() {
+  if (name_ != NULL) return name_;
+  const int kMaxNameLength = 100;
+  name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+      kMaxNameLength);
+  if (name_ == NULL) return "OOM";
+  const char* op_name = Token::Name(op_);
+  const char* overwrite_name = NULL;  // Make g++ happy.
+  switch (mode_) {
+    case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
+    case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
+  }
+
+  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+               "UnaryOpStub_%s_%s_%s",
+               op_name,
+               overwrite_name,
+               UnaryOpIC::GetName(operand_type_));
+  return name_;
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::Generate(MacroAssembler* masm) {
+  switch (operand_type_) {
+    case UnaryOpIC::UNINITIALIZED:
+      GenerateTypeTransition(masm);
+      break;
+    case UnaryOpIC::SMI:
+      GenerateSmiStub(masm);
+      break;
+    case UnaryOpIC::HEAP_NUMBER:
+      GenerateHeapNumberStub(masm);
+      break;
+    case UnaryOpIC::GENERIC:
+      GenerateGenericStub(masm);
+      break;
+  }
+}
+
+
+void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+  // Prepare to push argument.
+  __ mov(r3, Operand(r0));
+
+  // Push this stub's key. Although the operation and the type info are
+  // encoded into the key, the encoding is opaque, so push them too.
+  __ mov(r2, Operand(Smi::FromInt(MinorKey())));
+  __ mov(r1, Operand(Smi::FromInt(op_)));
+  __ mov(r0, Operand(Smi::FromInt(operand_type_)));
+
+  __ Push(r3, r2, r1, r0);
+
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kUnaryOp_Patch),
+                        masm->isolate()),
+      4,
+      1);
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+  switch (op_) {
+    case Token::SUB:
+      GenerateSmiStubSub(masm);
+      break;
+    case Token::BIT_NOT:
+      GenerateSmiStubBitNot(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeSub(masm, &non_smi, &slow);
+  __ bind(&non_smi);
+  __ bind(&slow);
+  GenerateTypeTransition(masm);
+}
+
+
+void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
+  Label non_smi;
+  GenerateSmiCodeBitNot(masm, &non_smi);
+  __ bind(&non_smi);
+  GenerateTypeTransition(masm);
+}
+
+
+void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
+                                     Label* non_smi,
+                                     Label* slow) {
+  __ JumpIfNotSmi(r0, non_smi);
+
+  // The result of negating zero or the smallest negative smi is not a smi.
+  __ bic(ip, r0, Operand(0x80000000), SetCC);
+  __ b(eq, slow);
+
+  // Return '0 - value'.
+  __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
+  __ Ret();
+}
+
+
+void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
+                                        Label* non_smi) {
+  __ JumpIfNotSmi(r0, non_smi);
+
+  // Flip bits and revert inverted smi-tag.
+  __ mvn(r0, Operand(r0));
+  __ bic(r0, r0, Operand(kSmiTagMask));
+  __ Ret();
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+  switch (op_) {
+    case Token::SUB:
+      GenerateHeapNumberStubSub(masm);
+      break;
+    case Token::BIT_NOT:
+      GenerateHeapNumberStubBitNot(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
+  Label non_smi, slow, call_builtin;
+  GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeSub(masm, &slow);
+  __ bind(&slow);
+  GenerateTypeTransition(masm);
+  __ bind(&call_builtin);
+  GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeBitNot(masm, &non_smi);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeBitNot(masm, &slow);
+  __ bind(&slow);
+  GenerateTypeTransition(masm);
+}
+
+void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
+                                            Label* slow) {
+  EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
+  // r0 is a heap number.  Get a new heap number in r1.
+  if (mode_ == UNARY_OVERWRITE) {
+    __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+    __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign.
+    __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+  } else {
+    Label slow_allocate_heapnumber, heapnumber_allocated;
+    __ AllocateHeapNumber(r1, r2, r3, r6, &slow_allocate_heapnumber);
+    __ jmp(&heapnumber_allocated);
+
+    __ bind(&slow_allocate_heapnumber);
+    __ EnterInternalFrame();
+    __ push(r0);
+    __ CallRuntime(Runtime::kNumberAlloc, 0);
+    __ mov(r1, Operand(r0));
+    __ pop(r0);
+    __ LeaveInternalFrame();
+
+    __ bind(&heapnumber_allocated);
+    __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+    __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+    __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
+    __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign.
+    __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
+    __ mov(r0, Operand(r1));
+  }
+  __ Ret();
+}
+
+
+void UnaryOpStub::GenerateHeapNumberCodeBitNot(
+    MacroAssembler* masm, Label* slow) {
+  EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
+  // Convert the heap number is r0 to an untagged integer in r1.
+  __ ConvertToInt32(r0, r1, r2, r3, d0, slow);
+
+  // Do the bitwise operation and check if the result fits in a smi.
+  Label try_float;
+  __ mvn(r1, Operand(r1));
+  __ add(r2, r1, Operand(0x40000000), SetCC);
+  __ b(mi, &try_float);
+
+  // Tag the result as a smi and we're done.
+  __ mov(r0, Operand(r1, LSL, kSmiTagSize));
+  __ Ret();
+
+  // Try to store the result in a heap number.
+  __ bind(&try_float);
+  if (mode_ == UNARY_NO_OVERWRITE) {
+    Label slow_allocate_heapnumber, heapnumber_allocated;
+    __ AllocateHeapNumber(r0, r2, r3, r6, &slow_allocate_heapnumber);
+    __ jmp(&heapnumber_allocated);
+
+    __ bind(&slow_allocate_heapnumber);
+    __ EnterInternalFrame();
+    __ push(r1);
+      __ CallRuntime(Runtime::kNumberAlloc, 0);
+    __ pop(r1);
+    __ LeaveInternalFrame();
+
+    __ bind(&heapnumber_allocated);
+  }
+
+  if (CpuFeatures::IsSupported(VFP3)) {
+    // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
+    CpuFeatures::Scope scope(VFP3);
+    __ vmov(s0, r1);
+    __ vcvt_f64_s32(d0, s0);
+    __ sub(r2, r0, Operand(kHeapObjectTag));
+    __ vstr(d0, r2, HeapNumber::kValueOffset);
+    __ Ret();
+  } else {
+    // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
+    // have to set up a frame.
+    WriteInt32ToHeapNumberStub stub(r1, r0, r2);
+    __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+  }
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
+  switch (op_) {
+    case Token::SUB:
+      GenerateGenericStubSub(masm);
+      break;
+    case Token::BIT_NOT:
+      GenerateGenericStubBitNot(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeSub(masm, &non_smi, &slow);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeSub(masm, &slow);
+  __ bind(&slow);
+  GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeBitNot(masm, &non_smi);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeBitNot(masm, &slow);
+  __ bind(&slow);
+  GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
+  // Handle the slow case by jumping to the JavaScript builtin.
+  __ push(r0);
+  switch (op_) {
+    case Token::SUB:
+      __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+      break;
+    case Token::BIT_NOT:
+      __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+Handle<Code> GetBinaryOpStub(int key,
+                             BinaryOpIC::TypeInfo type_info,
+                             BinaryOpIC::TypeInfo result_type_info) {
+  BinaryOpStub stub(key, type_info, result_type_info);
+  return stub.GetCode();
+}
+
+
+void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
   Label get_result;
 
   __ Push(r1, r0);
@@ -1787,40 +2021,43 @@
   __ Push(r2, r1, r0);
 
   __ TailCallExternalReference(
-      ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch),
+      ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
                         masm->isolate()),
       5,
       1);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
+void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
     MacroAssembler* masm) {
   UNIMPLEMENTED();
 }
 
 
-void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
+void BinaryOpStub::Generate(MacroAssembler* masm) {
   switch (operands_type_) {
-    case TRBinaryOpIC::UNINITIALIZED:
+    case BinaryOpIC::UNINITIALIZED:
       GenerateTypeTransition(masm);
       break;
-    case TRBinaryOpIC::SMI:
+    case BinaryOpIC::SMI:
       GenerateSmiStub(masm);
       break;
-    case TRBinaryOpIC::INT32:
+    case BinaryOpIC::INT32:
       GenerateInt32Stub(masm);
       break;
-    case TRBinaryOpIC::HEAP_NUMBER:
+    case BinaryOpIC::HEAP_NUMBER:
       GenerateHeapNumberStub(masm);
       break;
-    case TRBinaryOpIC::ODDBALL:
+    case BinaryOpIC::ODDBALL:
       GenerateOddballStub(masm);
       break;
-    case TRBinaryOpIC::STRING:
+    case BinaryOpIC::BOTH_STRING:
+      GenerateBothStringStub(masm);
+      break;
+    case BinaryOpIC::STRING:
       GenerateStringStub(masm);
       break;
-    case TRBinaryOpIC::GENERIC:
+    case BinaryOpIC::GENERIC:
       GenerateGeneric(masm);
       break;
     default:
@@ -1829,7 +2066,7 @@
 }
 
 
-const char* TypeRecordingBinaryOpStub::GetName() {
+const char* BinaryOpStub::GetName() {
   if (name_ != NULL) return name_;
   const int kMaxNameLength = 100;
   name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
@@ -1845,16 +2082,15 @@
   }
 
   OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
-               "TypeRecordingBinaryOpStub_%s_%s_%s",
+               "BinaryOpStub_%s_%s_%s",
                op_name,
                overwrite_name,
-               TRBinaryOpIC::GetName(operands_type_));
+               BinaryOpIC::GetName(operands_type_));
   return name_;
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation(
-    MacroAssembler* masm) {
+void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
   Register left = r1;
   Register right = r0;
   Register scratch1 = r7;
@@ -1979,10 +2215,10 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
-                                                    bool smi_operands,
-                                                    Label* not_numbers,
-                                                    Label* gc_required) {
+void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
+                                       bool smi_operands,
+                                       Label* not_numbers,
+                                       Label* gc_required) {
   Register left = r1;
   Register right = r0;
   Register scratch1 = r7;
@@ -2193,7 +2429,8 @@
 // generated. If the result is not a smi and heap number allocation is not
 // requested the code falls through. If number allocation is requested but a
 // heap number cannot be allocated the code jumps to the lable gc_required.
-void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
+void BinaryOpStub::GenerateSmiCode(
+    MacroAssembler* masm,
     Label* use_runtime,
     Label* gc_required,
     SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
@@ -2222,11 +2459,11 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
   Label not_smis, call_runtime;
 
-  if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
-      result_type_ == TRBinaryOpIC::SMI) {
+  if (result_type_ == BinaryOpIC::UNINITIALIZED ||
+      result_type_ == BinaryOpIC::SMI) {
     // Only allow smi results.
     GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
   } else {
@@ -2247,18 +2484,48 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
-  ASSERT(operands_type_ == TRBinaryOpIC::STRING);
+void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+  ASSERT(operands_type_ == BinaryOpIC::STRING);
   ASSERT(op_ == Token::ADD);
   // Try to add arguments as strings, otherwise, transition to the generic
-  // TRBinaryOpIC type.
+  // BinaryOpIC type.
   GenerateAddStrings(masm);
   GenerateTypeTransition(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
-  ASSERT(operands_type_ == TRBinaryOpIC::INT32);
+void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
+  Label call_runtime;
+  ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+  ASSERT(op_ == Token::ADD);
+  // If both arguments are strings, call the string add stub.
+  // Otherwise, do a transition.
+
+  // Registers containing left and right operands respectively.
+  Register left = r1;
+  Register right = r0;
+
+  // Test if left operand is a string.
+  __ JumpIfSmi(left, &call_runtime);
+  __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
+  __ b(ge, &call_runtime);
+
+  // Test if right operand is a string.
+  __ JumpIfSmi(right, &call_runtime);
+  __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
+  __ b(ge, &call_runtime);
+
+  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+  GenerateRegisterArgsPush(masm);
+  __ TailCallStub(&string_add_stub);
+
+  __ bind(&call_runtime);
+  GenerateTypeTransition(masm);
+}
+
+
+void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+  ASSERT(operands_type_ == BinaryOpIC::INT32);
 
   Register left = r1;
   Register right = r0;
@@ -2355,7 +2622,7 @@
                              scratch1,
                              scratch2);
 
-          if (result_type_ <= TRBinaryOpIC::INT32) {
+          if (result_type_ <= BinaryOpIC::INT32) {
             // If the ne condition is set, result does
             // not fit in a 32-bit integer.
             __ b(ne, &transition);
@@ -2382,8 +2649,8 @@
           // DIV just falls through to allocating a heap number.
         }
 
-        if (result_type_ >= (op_ == Token::DIV) ? TRBinaryOpIC::HEAP_NUMBER
-                                                : TRBinaryOpIC::INT32) {
+        if (result_type_ >= (op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
+                                                : BinaryOpIC::INT32) {
           __ bind(&return_heap_number);
           // We are using vfp registers so r5 is available.
           heap_number_result = r5;
@@ -2492,12 +2759,13 @@
           // The non vfp3 code does not support this special case, so jump to
           // runtime if we don't support it.
           if (CpuFeatures::IsSupported(VFP3)) {
-            __ b(mi,
-                 (result_type_ <= TRBinaryOpIC::INT32) ? &transition
-                                                       : &return_heap_number);
+            __ b(mi, (result_type_ <= BinaryOpIC::INT32)
+                      ? &transition
+                      : &return_heap_number);
           } else {
-            __ b(mi, (result_type_ <= TRBinaryOpIC::INT32) ? &transition
-                                                           : &call_runtime);
+            __ b(mi, (result_type_ <= BinaryOpIC::INT32)
+                      ? &transition
+                      : &call_runtime);
           }
           break;
         case Token::SHL:
@@ -2567,7 +2835,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
   Label call_runtime;
 
   if (op_ == Token::ADD) {
@@ -2600,7 +2868,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
   Label call_runtime;
   GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
 
@@ -2609,7 +2877,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
   Label call_runtime, call_string_add_or_runtime;
 
   GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
@@ -2626,7 +2894,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
+void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
   ASSERT(op_ == Token::ADD);
   Label left_not_string, call_runtime;
 
@@ -2657,41 +2925,41 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
+void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
   GenerateRegisterArgsPush(masm);
   switch (op_) {
     case Token::ADD:
-      __ InvokeBuiltin(Builtins::ADD, JUMP_JS);
+      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
       break;
     case Token::SUB:
-      __ InvokeBuiltin(Builtins::SUB, JUMP_JS);
+      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
       break;
     case Token::MUL:
-      __ InvokeBuiltin(Builtins::MUL, JUMP_JS);
+      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
       break;
     case Token::DIV:
-      __ InvokeBuiltin(Builtins::DIV, JUMP_JS);
+      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
       break;
     case Token::MOD:
-      __ InvokeBuiltin(Builtins::MOD, JUMP_JS);
+      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
       break;
     case Token::BIT_OR:
-      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
+      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
       break;
     case Token::BIT_AND:
-      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
+      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
       break;
     case Token::BIT_XOR:
-      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
+      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
       break;
     case Token::SAR:
-      __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
+      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
       break;
     case Token::SHR:
-      __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
+      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
       break;
     case Token::SHL:
-      __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
+      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
       break;
     default:
       UNREACHABLE();
@@ -2699,14 +2967,12 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
-    MacroAssembler* masm,
-    Register result,
-    Register heap_number_map,
-    Register scratch1,
-    Register scratch2,
-    Label* gc_required) {
-
+void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
+                                                Register result,
+                                                Register heap_number_map,
+                                                Register scratch1,
+                                                Register scratch2,
+                                                Label* gc_required) {
   // Code below will scratch result if allocation fails. To keep both arguments
   // intact for the runtime call result cannot be one of these.
   ASSERT(!result.is(r0) && !result.is(r1));
@@ -2733,7 +2999,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
   __ Push(r1, r0);
 }
 
@@ -2771,7 +3037,7 @@
                   r1,
                   Heap::kHeapNumberMapRootIndex,
                   &calculate,
-                  true);
+                  DONT_DO_SMI_CHECK);
       // Input is a HeapNumber. Load it to a double register and store the
       // low and high words into r2, r3.
       __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
@@ -2914,17 +3180,24 @@
   Isolate* isolate = masm->isolate();
 
   __ push(lr);
-  __ PrepareCallCFunction(2, scratch);
-  __ vmov(r0, r1, d2);
+  __ PrepareCallCFunction(0, 1, scratch);
+  if (masm->use_eabi_hardfloat()) {
+    __ vmov(d0, d2);
+  } else {
+    __ vmov(r0, r1, d2);
+  }
   switch (type_) {
     case TranscendentalCache::SIN:
-      __ CallCFunction(ExternalReference::math_sin_double_function(isolate), 2);
+      __ CallCFunction(ExternalReference::math_sin_double_function(isolate),
+          0, 1);
       break;
     case TranscendentalCache::COS:
-      __ CallCFunction(ExternalReference::math_cos_double_function(isolate), 2);
+      __ CallCFunction(ExternalReference::math_cos_double_function(isolate),
+          0, 1);
       break;
     case TranscendentalCache::LOG:
-      __ CallCFunction(ExternalReference::math_log_double_function(isolate), 2);
+      __ CallCFunction(ExternalReference::math_log_double_function(isolate),
+          0, 1);
       break;
     default:
       UNIMPLEMENTED();
@@ -2952,141 +3225,6 @@
 }
 
 
-void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
-  Label slow, done;
-
-  Register heap_number_map = r6;
-  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
-  if (op_ == Token::SUB) {
-    if (include_smi_code_) {
-      // Check whether the value is a smi.
-      Label try_float;
-      __ tst(r0, Operand(kSmiTagMask));
-      __ b(ne, &try_float);
-
-      // Go slow case if the value of the expression is zero
-      // to make sure that we switch between 0 and -0.
-      if (negative_zero_ == kStrictNegativeZero) {
-        // If we have to check for zero, then we can check for the max negative
-        // smi while we are at it.
-        __ bic(ip, r0, Operand(0x80000000), SetCC);
-        __ b(eq, &slow);
-        __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
-        __ Ret();
-      } else {
-        // The value of the expression is a smi and 0 is OK for -0.  Try
-        // optimistic subtraction '0 - value'.
-        __ rsb(r0, r0, Operand(0, RelocInfo::NONE), SetCC);
-        __ Ret(vc);
-        // We don't have to reverse the optimistic neg since the only case
-        // where we fall through is the minimum negative Smi, which is the case
-        // where the neg leaves the register unchanged.
-        __ jmp(&slow);  // Go slow on max negative Smi.
-      }
-      __ bind(&try_float);
-    } else if (FLAG_debug_code) {
-      __ tst(r0, Operand(kSmiTagMask));
-      __ Assert(ne, "Unexpected smi operand.");
-    }
-
-    __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
-    __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-    __ cmp(r1, heap_number_map);
-    __ b(ne, &slow);
-    // r0 is a heap number.  Get a new heap number in r1.
-    if (overwrite_ == UNARY_OVERWRITE) {
-      __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
-      __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign.
-      __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
-    } else {
-      __ AllocateHeapNumber(r1, r2, r3, r6, &slow);
-      __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
-      __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
-      __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
-      __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign.
-      __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
-      __ mov(r0, Operand(r1));
-    }
-  } else if (op_ == Token::BIT_NOT) {
-    if (include_smi_code_) {
-      Label non_smi;
-      __ JumpIfNotSmi(r0, &non_smi);
-      __ mvn(r0, Operand(r0));
-      // Bit-clear inverted smi-tag.
-      __ bic(r0, r0, Operand(kSmiTagMask));
-      __ Ret();
-      __ bind(&non_smi);
-    } else if (FLAG_debug_code) {
-      __ tst(r0, Operand(kSmiTagMask));
-      __ Assert(ne, "Unexpected smi operand.");
-    }
-
-    // Check if the operand is a heap number.
-    __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
-    __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-    __ cmp(r1, heap_number_map);
-    __ b(ne, &slow);
-
-    // Convert the heap number is r0 to an untagged integer in r1.
-    __ ConvertToInt32(r0, r1, r2, r3, d0, &slow);
-
-    // Do the bitwise operation (move negated) and check if the result
-    // fits in a smi.
-    Label try_float;
-    __ mvn(r1, Operand(r1));
-    __ add(r2, r1, Operand(0x40000000), SetCC);
-    __ b(mi, &try_float);
-    __ mov(r0, Operand(r1, LSL, kSmiTagSize));
-    __ b(&done);
-
-    __ bind(&try_float);
-    if (!overwrite_ == UNARY_OVERWRITE) {
-      // Allocate a fresh heap number, but don't overwrite r0 until
-      // we're sure we can do it without going through the slow case
-      // that needs the value in r0.
-      __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
-      __ mov(r0, Operand(r2));
-    }
-
-    if (CpuFeatures::IsSupported(VFP3)) {
-      // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
-      CpuFeatures::Scope scope(VFP3);
-      __ vmov(s0, r1);
-      __ vcvt_f64_s32(d0, s0);
-      __ sub(r2, r0, Operand(kHeapObjectTag));
-      __ vstr(d0, r2, HeapNumber::kValueOffset);
-    } else {
-      // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
-      // have to set up a frame.
-      WriteInt32ToHeapNumberStub stub(r1, r0, r2);
-      __ push(lr);
-      __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
-      __ pop(lr);
-    }
-  } else {
-    UNIMPLEMENTED();
-  }
-
-  __ bind(&done);
-  __ Ret();
-
-  // Handle the slow case by jumping to the JavaScript builtin.
-  __ bind(&slow);
-  __ push(r0);
-  switch (op_) {
-    case Token::SUB:
-      __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
-      break;
-    case Token::BIT_NOT:
-      __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS);
-      break;
-    default:
-      UNREACHABLE();
-  }
-}
-
-
 void MathPowStub::Generate(MacroAssembler* masm) {
   Label call_runtime;
 
@@ -3141,11 +3279,11 @@
                           heapnumbermap,
                           &call_runtime);
     __ push(lr);
-    __ PrepareCallCFunction(3, scratch);
-    __ mov(r2, exponent);
-    __ vmov(r0, r1, double_base);
+    __ PrepareCallCFunction(1, 1, scratch);
+    __ SetCallCDoubleArguments(double_base, exponent);
     __ CallCFunction(
-        ExternalReference::power_double_int_function(masm->isolate()), 3);
+        ExternalReference::power_double_int_function(masm->isolate()),
+        1, 1);
     __ pop(lr);
     __ GetCFunctionDoubleResult(double_result);
     __ vstr(double_result,
@@ -3171,11 +3309,11 @@
                           heapnumbermap,
                           &call_runtime);
     __ push(lr);
-    __ PrepareCallCFunction(4, scratch);
-    __ vmov(r0, r1, double_base);
-    __ vmov(r2, r3, double_exponent);
+    __ PrepareCallCFunction(0, 2, scratch);
+    __ SetCallCDoubleArguments(double_base, double_exponent);
     __ CallCFunction(
-        ExternalReference::power_double_double_function(masm->isolate()), 4);
+        ExternalReference::power_double_double_function(masm->isolate()),
+        0, 2);
     __ pop(lr);
     __ GetCFunctionDoubleResult(double_result);
     __ vstr(double_result,
@@ -3219,8 +3357,9 @@
 
   if (do_gc) {
     // Passing r0.
-    __ PrepareCallCFunction(1, r1);
-    __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1);
+    __ PrepareCallCFunction(1, 0, r1);
+    __ CallCFunction(ExternalReference::perform_gc_function(isolate),
+        1, 0);
   }
 
   ExternalReference scope_depth =
@@ -3707,7 +3846,7 @@
   __ b(ne, &slow);
 
   // Null is not instance of anything.
-  __ cmp(scratch, Operand(FACTORY->null_value()));
+  __ cmp(scratch, Operand(masm->isolate()->factory()->null_value()));
   __ b(ne, &object_not_null);
   __ mov(r0, Operand(Smi::FromInt(1)));
   __ Ret(HasArgsInRegisters() ? 0 : 2);
@@ -3730,11 +3869,11 @@
     if (HasArgsInRegisters()) {
       __ Push(r0, r1);
     }
-  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
+  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
   } else {
     __ EnterInternalFrame();
     __ Push(r0, r1);
-    __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_JS);
+    __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
     __ LeaveInternalFrame();
     __ cmp(r0, Operand(0));
     __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
@@ -4087,9 +4226,8 @@
 
   // Check that the irregexp code has been generated for the actual string
   // encoding. If it has, the field contains a code object otherwise it contains
-  // the hole.
-  __ CompareObjectType(r7, r0, r0, CODE_TYPE);
-  __ b(ne, &runtime);
+  // a smi (code flushing support).
+  __ JumpIfSmi(r7, &runtime);
 
   // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
   // r7: code
@@ -4205,7 +4343,7 @@
 
   __ bind(&failure);
   // For failure and exception return null.
-  __ mov(r0, Operand(FACTORY->null_value()));
+  __ mov(r0, Operand(masm->isolate()->factory()->null_value()));
   __ add(sp, sp, Operand(4 * kPointerSize));
   __ Ret();
 
@@ -4276,6 +4414,8 @@
   const int kMaxInlineLength = 100;
   Label slowcase;
   Label done;
+  Factory* factory = masm->isolate()->factory();
+
   __ ldr(r1, MemOperand(sp, kPointerSize * 2));
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize == 1);
@@ -4310,7 +4450,7 @@
   // Interleave operations for better latency.
   __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
   __ add(r3, r0, Operand(JSRegExpResult::kSize));
-  __ mov(r4, Operand(FACTORY->empty_fixed_array()));
+  __ mov(r4, Operand(factory->empty_fixed_array()));
   __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
   __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
   __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
@@ -4331,13 +4471,13 @@
   // r5: Number of elements in array, untagged.
 
   // Set map.
-  __ mov(r2, Operand(FACTORY->fixed_array_map()));
+  __ mov(r2, Operand(factory->fixed_array_map()));
   __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
   // Set FixedArray length.
   __ mov(r6, Operand(r5, LSL, kSmiTagSize));
   __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
   // Fill contents of fixed-array with the-hole.
-  __ mov(r2, Operand(FACTORY->the_hole_value()));
+  __ mov(r2, Operand(factory->the_hole_value()));
   __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   // Fill fixed array elements with hole.
   // r0: JSArray, tagged.
@@ -4364,30 +4504,22 @@
 void CallFunctionStub::Generate(MacroAssembler* masm) {
   Label slow;
 
-  // If the receiver might be a value (string, number or boolean) check for this
-  // and box it if it is.
-  if (ReceiverMightBeValue()) {
+  // The receiver might implicitly be the global object. This is
+  // indicated by passing the hole as the receiver to the call
+  // function stub.
+  if (ReceiverMightBeImplicit()) {
+    Label call;
     // Get the receiver from the stack.
     // function, receiver [, arguments]
-    Label receiver_is_value, receiver_is_js_object;
-    __ ldr(r1, MemOperand(sp, argc_ * kPointerSize));
-
-    // Check if receiver is a smi (which is a number value).
-    __ JumpIfSmi(r1, &receiver_is_value);
-
-    // Check if the receiver is a valid JS object.
-    __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE);
-    __ b(ge, &receiver_is_js_object);
-
-    // Call the runtime to box the value.
-    __ bind(&receiver_is_value);
-    __ EnterInternalFrame();
-    __ push(r1);
-    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
-    __ LeaveInternalFrame();
-    __ str(r0, MemOperand(sp, argc_ * kPointerSize));
-
-    __ bind(&receiver_is_js_object);
+    __ ldr(r4, MemOperand(sp, argc_ * kPointerSize));
+    // Call as function is indicated with the hole.
+    __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
+    __ b(ne, &call);
+    // Patch the receiver on the stack with the global receiver object.
+    __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+    __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
+    __ str(r1, MemOperand(sp, argc_ * kPointerSize));
+    __ bind(&call);
   }
 
   // Get the function to call from the stack.
@@ -4404,7 +4536,23 @@
   // Fast-case: Invoke the function now.
   // r1: pushed function
   ParameterCount actual(argc_);
-  __ InvokeFunction(r1, actual, JUMP_FUNCTION);
+
+  if (ReceiverMightBeImplicit()) {
+    Label call_as_function;
+    __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
+    __ b(eq, &call_as_function);
+    __ InvokeFunction(r1,
+                      actual,
+                      JUMP_FUNCTION,
+                      NullCallWrapper(),
+                      CALL_AS_METHOD);
+    __ bind(&call_as_function);
+  }
+  __ InvokeFunction(r1,
+                    actual,
+                    JUMP_FUNCTION,
+                    NullCallWrapper(),
+                    CALL_AS_FUNCTION);
 
   // Slow-case: Non-function called.
   __ bind(&slow);
@@ -4587,7 +4735,7 @@
               scratch_,
               Heap::kHeapNumberMapRootIndex,
               index_not_number_,
-              true);
+              DONT_DO_SMI_CHECK);
   call_helper.BeforeCall(masm);
   __ Push(object_, index_);
   __ push(index_);  // Consumed by runtime conversion function.
@@ -5299,6 +5447,45 @@
 }
 
 
+void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+                                                      Register left,
+                                                      Register right,
+                                                      Register scratch1,
+                                                      Register scratch2,
+                                                      Register scratch3) {
+  Register length = scratch1;
+
+  // Compare lengths.
+  Label strings_not_equal, check_zero_length;
+  __ ldr(length, FieldMemOperand(left, String::kLengthOffset));
+  __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
+  __ cmp(length, scratch2);
+  __ b(eq, &check_zero_length);
+  __ bind(&strings_not_equal);
+  __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL)));
+  __ Ret();
+
+  // Check if the length is zero.
+  Label compare_chars;
+  __ bind(&check_zero_length);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ tst(length, Operand(length));
+  __ b(ne, &compare_chars);
+  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
+  __ Ret();
+
+  // Compare characters.
+  __ bind(&compare_chars);
+  GenerateAsciiCharsCompareLoop(masm,
+                                left, right, length, scratch2, scratch3,
+                                &strings_not_equal);
+
+  // Characters are equal.
+  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
+  __ Ret();
+}
+
+
 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
                                                         Register left,
                                                         Register right,
@@ -5306,7 +5493,7 @@
                                                         Register scratch2,
                                                         Register scratch3,
                                                         Register scratch4) {
-  Label compare_lengths;
+  Label result_not_equal, compare_lengths;
   // Find minimum length and length difference.
   __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
   __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
@@ -5318,46 +5505,56 @@
   __ tst(min_length, Operand(min_length));
   __ b(eq, &compare_lengths);
 
-  // Untag smi.
-  __ mov(min_length, Operand(min_length, ASR, kSmiTagSize));
+  // Compare loop.
+  GenerateAsciiCharsCompareLoop(masm,
+                                left, right, min_length, scratch2, scratch4,
+                                &result_not_equal);
 
-  // Setup registers so that we only need to increment one register
-  // in the loop.
-  __ add(scratch2, min_length,
-         Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  __ add(left, left, Operand(scratch2));
-  __ add(right, right, Operand(scratch2));
-  // Registers left and right points to the min_length character of strings.
-  __ rsb(min_length, min_length, Operand(-1));
-  Register index = min_length;
-  // Index starts at -min_length.
-
-  {
-    // Compare loop.
-    Label loop;
-    __ bind(&loop);
-    // Compare characters.
-    __ add(index, index, Operand(1), SetCC);
-    __ ldrb(scratch2, MemOperand(left, index), ne);
-    __ ldrb(scratch4, MemOperand(right, index), ne);
-    // Skip to compare lengths with eq condition true.
-    __ b(eq, &compare_lengths);
-    __ cmp(scratch2, scratch4);
-    __ b(eq, &loop);
-    // Fallthrough with eq condition false.
-  }
-  // Compare lengths -  strings up to min-length are equal.
+  // Compare lengths - strings up to min-length are equal.
   __ bind(&compare_lengths);
   ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
-  // Use zero length_delta as result.
-  __ mov(r0, Operand(length_delta), SetCC, eq);
-  // Fall through to here if characters compare not-equal.
+  // Use length_delta as result if it's zero.
+  __ mov(r0, Operand(length_delta), SetCC);
+  __ bind(&result_not_equal);
+  // Conditionally update the result based either on length_delta or
+  // the last comparion performed in the loop above.
   __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
   __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
   __ Ret();
 }
 
 
+void StringCompareStub::GenerateAsciiCharsCompareLoop(
+    MacroAssembler* masm,
+    Register left,
+    Register right,
+    Register length,
+    Register scratch1,
+    Register scratch2,
+    Label* chars_not_equal) {
+  // Change index to run from -length to -1 by adding length to string
+  // start. This means that loop ends when index reaches zero, which
+  // doesn't need an additional compare.
+  __ SmiUntag(length);
+  __ add(scratch1, length,
+         Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(left, left, Operand(scratch1));
+  __ add(right, right, Operand(scratch1));
+  __ rsb(length, length, Operand(0));
+  Register index = length;  // index = -length;
+
+  // Compare loop.
+  Label loop;
+  __ bind(&loop);
+  __ ldrb(scratch1, MemOperand(left, index));
+  __ ldrb(scratch2, MemOperand(right, index));
+  __ cmp(scratch1, scratch2);
+  __ b(ne, chars_not_equal);
+  __ add(index, index, Operand(1), SetCC);
+  __ b(ne, &loop);
+}
+
+
 void StringCompareStub::Generate(MacroAssembler* masm) {
   Label runtime;
 
@@ -5684,7 +5881,7 @@
 
   if (call_builtin.is_linked()) {
     __ bind(&call_builtin);
-    __ InvokeBuiltin(builtin_id, JUMP_JS);
+    __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
   }
 }
 
@@ -5810,6 +6007,109 @@
 }
 
 
+void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::SYMBOLS);
+  Label miss;
+
+  // Registers containing left and right operands respectively.
+  Register left = r1;
+  Register right = r0;
+  Register tmp1 = r2;
+  Register tmp2 = r3;
+
+  // Check that both operands are heap objects.
+  __ JumpIfEitherSmi(left, right, &miss);
+
+  // Check that both operands are symbols.
+  __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+  __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+  __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+  __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kSymbolTag != 0);
+  __ and_(tmp1, tmp1, Operand(tmp2));
+  __ tst(tmp1, Operand(kIsSymbolMask));
+  __ b(eq, &miss);
+
+  // Symbols are compared by identity.
+  __ cmp(left, right);
+  // Make sure r0 is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  ASSERT(right.is(r0));
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::STRINGS);
+  Label miss;
+
+  // Registers containing left and right operands respectively.
+  Register left = r1;
+  Register right = r0;
+  Register tmp1 = r2;
+  Register tmp2 = r3;
+  Register tmp3 = r4;
+  Register tmp4 = r5;
+
+  // Check that both operands are heap objects.
+  __ JumpIfEitherSmi(left, right, &miss);
+
+  // Check that both operands are strings. This leaves the instance
+  // types loaded in tmp1 and tmp2.
+  __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+  __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+  __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+  __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kNotStringTag != 0);
+  __ orr(tmp3, tmp1, tmp2);
+  __ tst(tmp3, Operand(kIsNotStringMask));
+  __ b(ne, &miss);
+
+  // Fast check for identical strings.
+  __ cmp(left, right);
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
+  __ Ret(eq);
+
+  // Handle not identical strings.
+
+  // Check that both strings are symbols. If they are, we're done
+  // because we already know they are not identical.
+  ASSERT(GetCondition() == eq);
+  STATIC_ASSERT(kSymbolTag != 0);
+  __ and_(tmp3, tmp1, Operand(tmp2));
+  __ tst(tmp3, Operand(kIsSymbolMask));
+  // Make sure r0 is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  ASSERT(right.is(r0));
+  __ Ret(ne);
+
+  // Check that both strings are sequential ASCII.
+  Label runtime;
+  __ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4,
+                                                  &runtime);
+
+  // Compare flat ASCII strings. Returns when done.
+  StringCompareStub::GenerateFlatAsciiStringEquals(
+      masm, left, right, tmp1, tmp2, tmp3);
+
+  // Handle more complex cases in runtime.
+  __ bind(&runtime);
+  __ Push(left, right);
+  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::OBJECTS);
   Label miss;
@@ -5880,6 +6180,239 @@
 }
 
 
+MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
+    MacroAssembler* masm,
+    Label* miss,
+    Label* done,
+    Register receiver,
+    Register properties,
+    String* name,
+    Register scratch0) {
+  // If names of slots in range from 1 to kProbes - 1 for the hash value are
+  // not equal to the name and kProbes-th slot is not used (its name is the
+  // undefined value), it guarantees the hash table doesn't contain the
+  // property. It's true even if some slots represent deleted properties
+  // (their names are the null value).
+  for (int i = 0; i < kInlinedProbes; i++) {
+    // scratch0 points to properties hash.
+    // Compute the masked index: (hash + i + i * i) & mask.
+    Register index = scratch0;
+    // Capacity is smi 2^n.
+    __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
+    __ sub(index, index, Operand(1));
+    __ and_(index, index, Operand(
+        Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    __ add(index, index, Operand(index, LSL, 1));  // index *= 3.
+
+    Register entity_name = scratch0;
+    // Having undefined at this place means the name is not contained.
+    ASSERT_EQ(kSmiTagSize, 1);
+    Register tmp = properties;
+    __ add(tmp, properties, Operand(index, LSL, 1));
+    __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
+
+    ASSERT(!tmp.is(entity_name));
+    __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
+    __ cmp(entity_name, tmp);
+    __ b(eq, done);
+
+    if (i != kInlinedProbes - 1) {
+      // Stop if found the property.
+      __ cmp(entity_name, Operand(Handle<String>(name)));
+      __ b(eq, miss);
+
+      // Check if the entry name is not a symbol.
+      __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+      __ ldrb(entity_name,
+              FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+      __ tst(entity_name, Operand(kIsSymbolMask));
+      __ b(eq, miss);
+
+      // Restore the properties.
+      __ ldr(properties,
+             FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+    }
+  }
+
+  const int spill_mask =
+      (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
+       r2.bit() | r1.bit() | r0.bit());
+
+  __ stm(db_w, sp, spill_mask);
+  __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  __ mov(r1, Operand(Handle<String>(name)));
+  StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
+  MaybeObject* result = masm->TryCallStub(&stub);
+  if (result->IsFailure()) return result;
+  __ tst(r0, Operand(r0));
+  __ ldm(ia_w, sp, spill_mask);
+
+  __ b(eq, done);
+  __ b(ne, miss);
+  return result;
+}
+
+
+// Probe the string dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found. Jump to
+// the |miss| label otherwise.
+// If lookup was successful |scratch2| will be equal to elements + 4 * index.
+void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
+                                                        Label* miss,
+                                                        Label* done,
+                                                        Register elements,
+                                                        Register name,
+                                                        Register scratch1,
+                                                        Register scratch2) {
+  // Assert that name contains a string.
+  if (FLAG_debug_code) __ AbortIfNotString(name);
+
+  // Compute the capacity mask.
+  __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
+  __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize));  // convert smi to int
+  __ sub(scratch1, scratch1, Operand(1));
+
+  // Generate an unrolled loop that performs a few probes before
+  // giving up. Measurements done on Gmail indicate that 2 probes
+  // cover ~93% of loads from dictionaries.
+  for (int i = 0; i < kInlinedProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    __ ldr(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
+    if (i > 0) {
+      // Add the probe offset (i + i * i) left shifted to avoid right shifting
+      // the hash in a separate instruction. The value hash + i + i * i is right
+      // shifted in the following and instruction.
+      ASSERT(StringDictionary::GetProbeOffset(i) <
+             1 << (32 - String::kHashFieldOffset));
+      __ add(scratch2, scratch2, Operand(
+          StringDictionary::GetProbeOffset(i) << String::kHashShift));
+    }
+    __ and_(scratch2, scratch1, Operand(scratch2, LSR, String::kHashShift));
+
+    // Scale the index by multiplying by the element size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    // scratch2 = scratch2 * 3.
+    __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
+
+    // Check if the key is identical to the name.
+    __ add(scratch2, elements, Operand(scratch2, LSL, 2));
+    __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
+    __ cmp(name, Operand(ip));
+    __ b(eq, done);
+  }
+
+  const int spill_mask =
+      (lr.bit() | r6.bit() | r5.bit() | r4.bit() |
+       r3.bit() | r2.bit() | r1.bit() | r0.bit()) &
+      ~(scratch1.bit() | scratch2.bit());
+
+  __ stm(db_w, sp, spill_mask);
+  __ Move(r0, elements);
+  __ Move(r1, name);
+  StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
+  __ CallStub(&stub);
+  __ tst(r0, Operand(r0));
+  __ mov(scratch2, Operand(r2));
+  __ ldm(ia_w, sp, spill_mask);
+
+  __ b(ne, done);
+  __ b(eq, miss);
+}
+
+
+void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+  // Registers:
+  //  result: StringDictionary to probe
+  //  r1: key
+  //  : StringDictionary to probe.
+  //  index_: will hold an index of entry if lookup is successful.
+  //          might alias with result_.
+  // Returns:
+  //  result_ is zero if lookup failed, non zero otherwise.
+
+  Register result = r0;
+  Register dictionary = r0;
+  Register key = r1;
+  Register index = r2;
+  Register mask = r3;
+  Register hash = r4;
+  Register undefined = r5;
+  Register entry_key = r6;
+
+  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+  __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset));
+  __ mov(mask, Operand(mask, ASR, kSmiTagSize));
+  __ sub(mask, mask, Operand(1));
+
+  __ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset));
+
+  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    // Capacity is smi 2^n.
+    if (i > 0) {
+      // Add the probe offset (i + i * i) left shifted to avoid right shifting
+      // the hash in a separate instruction. The value hash + i + i * i is right
+      // shifted in the following and instruction.
+      ASSERT(StringDictionary::GetProbeOffset(i) <
+             1 << (32 - String::kHashFieldOffset));
+      __ add(index, hash, Operand(
+          StringDictionary::GetProbeOffset(i) << String::kHashShift));
+    } else {
+      __ mov(index, Operand(hash));
+    }
+    __ and_(index, mask, Operand(index, LSR, String::kHashShift));
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    __ add(index, index, Operand(index, LSL, 1));  // index *= 3.
+
+    ASSERT_EQ(kSmiTagSize, 1);
+    __ add(index, dictionary, Operand(index, LSL, 2));
+    __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
+
+    // Having undefined at this place means the name is not contained.
+    __ cmp(entry_key, Operand(undefined));
+    __ b(eq, &not_in_dictionary);
+
+    // Stop if found the property.
+    __ cmp(entry_key, Operand(key));
+    __ b(eq, &in_dictionary);
+
+    if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+      // Check if the entry name is not a symbol.
+      __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
+      __ ldrb(entry_key,
+              FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
+      __ tst(entry_key, Operand(kIsSymbolMask));
+      __ b(eq, &maybe_in_dictionary);
+    }
+  }
+
+  __ bind(&maybe_in_dictionary);
+  // If we are doing negative lookup then probing failure should be
+  // treated as a lookup success. For positive lookup probing failure
+  // should be treated as lookup failure.
+  if (mode_ == POSITIVE_LOOKUP) {
+    __ mov(result, Operand(0));
+    __ Ret();
+  }
+
+  __ bind(&in_dictionary);
+  __ mov(result, Operand(1));
+  __ Ret();
+
+  __ bind(&not_in_dictionary);
+  __ mov(result, Operand(0));
+  __ Ret();
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index d82afc7..fb05cd2 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -71,22 +71,108 @@
 };
 
 
-class TypeRecordingBinaryOpStub: public CodeStub {
+class UnaryOpStub: public CodeStub {
  public:
-  TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
+  UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
       : op_(op),
         mode_(mode),
-        operands_type_(TRBinaryOpIC::UNINITIALIZED),
-        result_type_(TRBinaryOpIC::UNINITIALIZED),
+        operand_type_(UnaryOpIC::UNINITIALIZED),
+        name_(NULL) {
+  }
+
+  UnaryOpStub(
+      int key,
+      UnaryOpIC::TypeInfo operand_type)
+      : op_(OpBits::decode(key)),
+        mode_(ModeBits::decode(key)),
+        operand_type_(operand_type),
+        name_(NULL) {
+  }
+
+ private:
+  Token::Value op_;
+  UnaryOverwriteMode mode_;
+
+  // Operand type information determined at runtime.
+  UnaryOpIC::TypeInfo operand_type_;
+
+  char* name_;
+
+  const char* GetName();
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("UnaryOpStub %d (op %s), "
+           "(mode %d, runtime_type_info %s)\n",
+           MinorKey(),
+           Token::String(op_),
+           static_cast<int>(mode_),
+           UnaryOpIC::GetName(operand_type_));
+  }
+#endif
+
+  class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
+  class OpBits: public BitField<Token::Value, 1, 7> {};
+  class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
+
+  Major MajorKey() { return UnaryOp; }
+  int MinorKey() {
+    return ModeBits::encode(mode_)
+           | OpBits::encode(op_)
+           | OperandTypeInfoBits::encode(operand_type_);
+  }
+
+  // Note: A lot of the helper functions below will vanish when we use virtual
+  // function instead of switch more often.
+  void Generate(MacroAssembler* masm);
+
+  void GenerateTypeTransition(MacroAssembler* masm);
+
+  void GenerateSmiStub(MacroAssembler* masm);
+  void GenerateSmiStubSub(MacroAssembler* masm);
+  void GenerateSmiStubBitNot(MacroAssembler* masm);
+  void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow);
+  void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow);
+
+  void GenerateHeapNumberStub(MacroAssembler* masm);
+  void GenerateHeapNumberStubSub(MacroAssembler* masm);
+  void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
+  void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
+  void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
+
+  void GenerateGenericStub(MacroAssembler* masm);
+  void GenerateGenericStubSub(MacroAssembler* masm);
+  void GenerateGenericStubBitNot(MacroAssembler* masm);
+  void GenerateGenericCodeFallback(MacroAssembler* masm);
+
+  virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
+
+  virtual InlineCacheState GetICState() {
+    return UnaryOpIC::ToState(operand_type_);
+  }
+
+  virtual void FinishCode(Code* code) {
+    code->set_unary_op_type(operand_type_);
+  }
+};
+
+
+class BinaryOpStub: public CodeStub {
+ public:
+  BinaryOpStub(Token::Value op, OverwriteMode mode)
+      : op_(op),
+        mode_(mode),
+        operands_type_(BinaryOpIC::UNINITIALIZED),
+        result_type_(BinaryOpIC::UNINITIALIZED),
         name_(NULL) {
     use_vfp3_ = CpuFeatures::IsSupported(VFP3);
     ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
   }
 
-  TypeRecordingBinaryOpStub(
+  BinaryOpStub(
       int key,
-      TRBinaryOpIC::TypeInfo operands_type,
-      TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
+      BinaryOpIC::TypeInfo operands_type,
+      BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
       : op_(OpBits::decode(key)),
         mode_(ModeBits::decode(key)),
         use_vfp3_(VFP3Bits::decode(key)),
@@ -105,8 +191,8 @@
   bool use_vfp3_;
 
   // Operand type information determined at runtime.
-  TRBinaryOpIC::TypeInfo operands_type_;
-  TRBinaryOpIC::TypeInfo result_type_;
+  BinaryOpIC::TypeInfo operands_type_;
+  BinaryOpIC::TypeInfo result_type_;
 
   char* name_;
 
@@ -114,12 +200,12 @@
 
 #ifdef DEBUG
   void Print() {
-    PrintF("TypeRecordingBinaryOpStub %d (op %s), "
+    PrintF("BinaryOpStub %d (op %s), "
            "(mode %d, runtime_type_info %s)\n",
            MinorKey(),
            Token::String(op_),
            static_cast<int>(mode_),
-           TRBinaryOpIC::GetName(operands_type_));
+           BinaryOpIC::GetName(operands_type_));
   }
 #endif
 
@@ -127,10 +213,10 @@
   class ModeBits: public BitField<OverwriteMode, 0, 2> {};
   class OpBits: public BitField<Token::Value, 2, 7> {};
   class VFP3Bits: public BitField<bool, 9, 1> {};
-  class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
-  class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
+  class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
+  class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
 
-  Major MajorKey() { return TypeRecordingBinaryOp; }
+  Major MajorKey() { return BinaryOp; }
   int MinorKey() {
     return OpBits::encode(op_)
            | ModeBits::encode(mode_)
@@ -158,6 +244,7 @@
   void GenerateHeapNumberStub(MacroAssembler* masm);
   void GenerateOddballStub(MacroAssembler* masm);
   void GenerateStringStub(MacroAssembler* masm);
+  void GenerateBothStringStub(MacroAssembler* masm);
   void GenerateGenericStub(MacroAssembler* masm);
   void GenerateAddStrings(MacroAssembler* masm);
   void GenerateCallRuntime(MacroAssembler* masm);
@@ -172,15 +259,15 @@
   void GenerateTypeTransition(MacroAssembler* masm);
   void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
 
-  virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
+  virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
 
   virtual InlineCacheState GetICState() {
-    return TRBinaryOpIC::ToState(operands_type_);
+    return BinaryOpIC::ToState(operands_type_);
   }
 
   virtual void FinishCode(Code* code) {
-    code->set_type_recording_binary_op_type(operands_type_);
-    code->set_type_recording_binary_op_result_type(result_type_);
+    code->set_binary_op_type(operands_type_);
+    code->set_binary_op_result_type(result_type_);
   }
 
   friend class CodeGenerator;
@@ -240,8 +327,7 @@
  public:
   StringCompareStub() { }
 
-  // Compare two flat ASCII strings and returns result in r0.
-  // Does not use the stack.
+  // Compares two flat ASCII strings and returns result in r0.
   static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
                                               Register left,
                                               Register right,
@@ -250,11 +336,27 @@
                                               Register scratch3,
                                               Register scratch4);
 
- private:
-  Major MajorKey() { return StringCompare; }
-  int MinorKey() { return 0; }
+  // Compares two flat ASCII strings for equality and returns result
+  // in r0.
+  static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+                                            Register left,
+                                            Register right,
+                                            Register scratch1,
+                                            Register scratch2,
+                                            Register scratch3);
 
-  void Generate(MacroAssembler* masm);
+ private:
+  virtual Major MajorKey() { return StringCompare; }
+  virtual int MinorKey() { return 0; }
+  virtual void Generate(MacroAssembler* masm);
+
+  static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
+                                            Register left,
+                                            Register right,
+                                            Register length,
+                                            Register scratch1,
+                                            Register scratch2,
+                                            Label* chars_not_equal);
 };
 
 
@@ -367,6 +469,205 @@
 };
 
 
+class FloatingPointHelper : public AllStatic {
+ public:
+
+  enum Destination {
+    kVFPRegisters,
+    kCoreRegisters
+  };
+
+
+  // Loads smis from r0 and r1 (right and left in binary operations) into
+  // floating point registers. Depending on the destination the values ends up
+  // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
+  // floating point registers VFP3 must be supported. If core registers are
+  // requested when VFP3 is supported d6 and d7 will be scratched.
+  static void LoadSmis(MacroAssembler* masm,
+                       Destination destination,
+                       Register scratch1,
+                       Register scratch2);
+
+  // Loads objects from r0 and r1 (right and left in binary operations) into
+  // floating point registers. Depending on the destination the values ends up
+  // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
+  // floating point registers VFP3 must be supported. If core registers are
+  // requested when VFP3 is supported d6 and d7 will still be scratched. If
+  // either r0 or r1 is not a number (not smi and not heap number object) the
+  // not_number label is jumped to with r0 and r1 intact.
+  static void LoadOperands(MacroAssembler* masm,
+                           FloatingPointHelper::Destination destination,
+                           Register heap_number_map,
+                           Register scratch1,
+                           Register scratch2,
+                           Label* not_number);
+
+  // Convert the smi or heap number in object to an int32 using the rules
+  // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+  // and brought into the range -2^31 .. +2^31 - 1.
+  static void ConvertNumberToInt32(MacroAssembler* masm,
+                                   Register object,
+                                   Register dst,
+                                   Register heap_number_map,
+                                   Register scratch1,
+                                   Register scratch2,
+                                   Register scratch3,
+                                   DwVfpRegister double_scratch,
+                                   Label* not_int32);
+
+  // Converts the integer (untagged smi) in |int_scratch| to a double, storing
+  // the result either in |double_dst| or |dst2:dst1|, depending on
+  // |destination|.
+  // Warning: The value in |int_scratch| will be changed in the process!
+  static void ConvertIntToDouble(MacroAssembler* masm,
+                                 Register int_scratch,
+                                 Destination destination,
+                                 DwVfpRegister double_dst,
+                                 Register dst1,
+                                 Register dst2,
+                                 Register scratch2,
+                                 SwVfpRegister single_scratch);
+
+  // Load the number from object into double_dst in the double format.
+  // Control will jump to not_int32 if the value cannot be exactly represented
+  // by a 32-bit integer.
+  // Floating point value in the 32-bit integer range that are not exact integer
+  // won't be loaded.
+  static void LoadNumberAsInt32Double(MacroAssembler* masm,
+                                      Register object,
+                                      Destination destination,
+                                      DwVfpRegister double_dst,
+                                      Register dst1,
+                                      Register dst2,
+                                      Register heap_number_map,
+                                      Register scratch1,
+                                      Register scratch2,
+                                      SwVfpRegister single_scratch,
+                                      Label* not_int32);
+
+  // Loads the number from object into dst as a 32-bit integer.
+  // Control will jump to not_int32 if the object cannot be exactly represented
+  // by a 32-bit integer.
+  // Floating point value in the 32-bit integer range that are not exact integer
+  // won't be converted.
+  // scratch3 is not used when VFP3 is supported.
+  static void LoadNumberAsInt32(MacroAssembler* masm,
+                                Register object,
+                                Register dst,
+                                Register heap_number_map,
+                                Register scratch1,
+                                Register scratch2,
+                                Register scratch3,
+                                DwVfpRegister double_scratch,
+                                Label* not_int32);
+
+  // Generate non VFP3 code to check if a double can be exactly represented by a
+  // 32-bit integer. This does not check for 0 or -0, which need
+  // to be checked for separately.
+  // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
+  // through otherwise.
+  // src1 and src2 will be cloberred.
+  //
+  // Expected input:
+  // - src1: higher (exponent) part of the double value.
+  // - src2: lower (mantissa) part of the double value.
+  // Output status:
+  // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
+  // - src2: contains 1.
+  // - other registers are clobbered.
+  static void DoubleIs32BitInteger(MacroAssembler* masm,
+                                   Register src1,
+                                   Register src2,
+                                   Register dst,
+                                   Register scratch,
+                                   Label* not_int32);
+
+  // Generates code to call a C function to do a double operation using core
+  // registers. (Used when VFP3 is not supported.)
+  // This code never falls through, but returns with a heap number containing
+  // the result in r0.
+  // Register heapnumber_result must be a heap number in which the
+  // result of the operation will be stored.
+  // Requires the following layout on entry:
+  // r0: Left value (least significant part of mantissa).
+  // r1: Left value (sign, exponent, top of mantissa).
+  // r2: Right value (least significant part of mantissa).
+  // r3: Right value (sign, exponent, top of mantissa).
+  static void CallCCodeForDoubleOperation(MacroAssembler* masm,
+                                          Token::Value op,
+                                          Register heap_number_result,
+                                          Register scratch);
+
+ private:
+  static void LoadNumber(MacroAssembler* masm,
+                         FloatingPointHelper::Destination destination,
+                         Register object,
+                         DwVfpRegister dst,
+                         Register dst1,
+                         Register dst2,
+                         Register heap_number_map,
+                         Register scratch1,
+                         Register scratch2,
+                         Label* not_number);
+};
+
+
+class StringDictionaryLookupStub: public CodeStub {
+ public:
+  enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+  explicit StringDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
+
+  void Generate(MacroAssembler* masm);
+
+  MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
+      MacroAssembler* masm,
+      Label* miss,
+      Label* done,
+      Register receiver,
+      Register properties,
+      String* name,
+      Register scratch0);
+
+  static void GeneratePositiveLookup(MacroAssembler* masm,
+                                     Label* miss,
+                                     Label* done,
+                                     Register elements,
+                                     Register name,
+                                     Register r0,
+                                     Register r1);
+
+ private:
+  static const int kInlinedProbes = 4;
+  static const int kTotalProbes = 20;
+
+  static const int kCapacityOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kCapacityIndex * kPointerSize;
+
+  static const int kElementsStartOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
+
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("StringDictionaryLookupStub\n");
+  }
+#endif
+
+  Major MajorKey() { return StringDictionaryNegativeLookup; }
+
+  int MinorKey() {
+    return LookupModeBits::encode(mode_);
+  }
+
+  class LookupModeBits: public BitField<LookupMode, 0, 1> {};
+
+  LookupMode mode_;
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_ARM_CODE_STUBS_ARM_H_
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 6c82c12..5b62d82 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -552,13 +552,21 @@
   const int kDoubleRegsSize =
       kDoubleSize * DwVfpRegister::kNumAllocatableRegisters;
 
-  // Save all general purpose registers before messing with them.
-  __ sub(sp, sp, Operand(kDoubleRegsSize));
-  for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) {
-    DwVfpRegister vfp_reg = DwVfpRegister::FromAllocationIndex(i);
-    int offset = i * kDoubleSize;
-    __ vstr(vfp_reg, sp, offset);
+  // Save all VFP registers before messing with them.
+  DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0);
+  DwVfpRegister last =
+      DwVfpRegister::FromAllocationIndex(
+          DwVfpRegister::kNumAllocatableRegisters - 1);
+  ASSERT(last.code() > first.code());
+  ASSERT((last.code() - first.code()) ==
+      (DwVfpRegister::kNumAllocatableRegisters - 1));
+#ifdef DEBUG
+  for (int i = 0; i <= (DwVfpRegister::kNumAllocatableRegisters - 1); i++) {
+    ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) &&
+           (DwVfpRegister::FromAllocationIndex(i).code() >= first.code()));
   }
+#endif
+  __ vstm(db_w, sp, first, last);
 
   // Push all 16 registers (needed to populate FrameDescription::registers_).
   __ stm(db_w, sp, restored_regs  | sp.bit() | lr.bit() | pc.bit());
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index a3775b5..d4bd81c 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -502,13 +502,16 @@
         ASSERT(STRING_STARTS_WITH(format, "memop"));
         if (instr->HasL()) {
           Print("ldr");
-        } else if ((instr->Bits(27, 25) == 0) && (instr->Bit(20) == 0)) {
-          if (instr->Bits(7, 4) == 0xf) {
-            Print("strd");
-          } else {
-            Print("ldrd");
-          }
         } else {
+          if ((instr->Bits(27, 25) == 0) && (instr->Bit(20) == 0) &&
+              (instr->Bits(7, 6) == 3) && (instr->Bit(4) == 1)) {
+            if (instr->Bit(5) == 1) {
+              Print("strd");
+            } else {
+              Print("ldrd");
+            }
+            return 5;
+          }
           Print("str");
         }
         return 5;
@@ -1086,10 +1089,10 @@
         }
       } else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
         // vabs
-        Format(instr, "vabs'cond 'Dd, 'Dm");
+        Format(instr, "vabs.f64'cond 'Dd, 'Dm");
       } else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
         // vneg
-        Format(instr, "vneg'cond 'Dd, 'Dm");
+        Format(instr, "vneg.f64'cond 'Dd, 'Dm");
       } else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
         DecodeVCVTBetweenDoubleAndSingle(instr);
       } else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 871b453..6116513 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -46,6 +46,12 @@
 #define __ ACCESS_MASM(masm_)
 
 
+static unsigned GetPropertyId(Property* property) {
+  if (property->is_synthetic()) return AstNode::kNoNumber;
+  return property->id();
+}
+
+
 // A patch site is a location in the code which it is possible to patch. This
 // class has a number of methods to emit the code which is patchable and the
 // method EmitPatchInfo to record a marker back to the patchable code. This
@@ -133,6 +139,20 @@
   }
 #endif
 
+  // Strict mode functions need to replace the receiver with undefined
+  // when called as functions (without an explicit receiver
+  // object). r5 is zero for method calls and non-zero for function
+  // calls.
+  if (info->is_strict_mode()) {
+    Label ok;
+    __ cmp(r5, Operand(0));
+    __ b(eq, &ok);
+    int receiver_offset = scope()->num_parameters() * kPointerSize;
+    __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+    __ str(r2, MemOperand(sp, receiver_offset));
+    __ bind(&ok);
+  }
+
   int locals_count = scope()->num_stack_slots();
 
   __ Push(lr, fp, cp, r1);
@@ -562,23 +582,6 @@
                                Label* if_false,
                                Label* fall_through) {
   if (CpuFeatures::IsSupported(VFP3)) {
-    CpuFeatures::Scope scope(VFP3);
-    // Emit the inlined tests assumed by the stub.
-    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-    __ cmp(result_register(), ip);
-    __ b(eq, if_false);
-    __ LoadRoot(ip, Heap::kTrueValueRootIndex);
-    __ cmp(result_register(), ip);
-    __ b(eq, if_true);
-    __ LoadRoot(ip, Heap::kFalseValueRootIndex);
-    __ cmp(result_register(), ip);
-    __ b(eq, if_false);
-    STATIC_ASSERT(kSmiTag == 0);
-    __ tst(result_register(), result_register());
-    __ b(eq, if_false);
-    __ JumpIfSmi(result_register(), if_true);
-
-    // Call the ToBoolean stub for all other cases.
     ToBooleanStub stub(result_register());
     __ CallStub(&stub);
     __ tst(result_register(), result_register());
@@ -590,8 +593,6 @@
     __ LoadRoot(ip, Heap::kFalseValueRootIndex);
     __ cmp(r0, ip);
   }
-
-  // The stub returns nonzero for true.
   Split(ne, if_true, if_false, fall_through);
 }
 
@@ -759,23 +760,22 @@
     }
 
   } else if (prop != NULL) {
-    if (function != NULL || mode == Variable::CONST) {
-      // We are declaring a function or constant that rewrites to a
-      // property.  Use (keyed) IC to set the initial value.  We
-      // cannot visit the rewrite because it's shared and we risk
-      // recording duplicate AST IDs for bailouts from optimized code.
+    // A const declaration aliasing a parameter is an illegal redeclaration.
+    ASSERT(mode != Variable::CONST);
+    if (function != NULL) {
+      // We are declaring a function that rewrites to a property.
+      // Use (keyed) IC to set the initial value.  We cannot visit the
+      // rewrite because it's shared and we risk recording duplicate AST
+      // IDs for bailouts from optimized code.
       ASSERT(prop->obj()->AsVariableProxy() != NULL);
       { AccumulatorValueContext for_object(this);
         EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
       }
-      if (function != NULL) {
-        __ push(r0);
-        VisitForAccumulatorValue(function);
-        __ pop(r2);
-      } else {
-        __ mov(r2, r0);
-        __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
-      }
+
+      __ push(r0);
+      VisitForAccumulatorValue(function);
+      __ pop(r2);
+
       ASSERT(prop->key()->AsLiteral() != NULL &&
              prop->key()->AsLiteral()->handle()->IsSmi());
       __ mov(r1, Operand(prop->key()->AsLiteral()->handle()));
@@ -783,7 +783,7 @@
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
           : isolate()->builtins()->KeyedStoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
       // Value in r0 is ignored (declarations are statements).
     }
   }
@@ -857,7 +857,8 @@
     // Record position before stub call for type feedback.
     SetSourcePosition(clause->position());
     Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
-    EmitCallIC(ic, &patch_site);
+    EmitCallIC(ic, &patch_site, clause->CompareId());
+
     __ cmp(r0, Operand(0));
     __ b(ne, &next_test);
     __ Drop(1);  // Switch value is no longer needed.
@@ -915,7 +916,7 @@
   __ b(hs, &done_convert);
   __ bind(&convert);
   __ push(r0);
-  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
+  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
   __ bind(&done_convert);
   __ push(r0);
 
@@ -943,9 +944,8 @@
   // check for an enum cache.  Leave the map in r2 for the subsequent
   // prototype load.
   __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOffset));
-  __ cmp(r3, empty_descriptor_array_value);
-  __ b(eq, &call_runtime);
+  __ ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOrBitField3Offset));
+  __ JumpIfSmi(r3, &call_runtime);
 
   // Check that there is an enum cache in the non-empty instance
   // descriptors (r3).  This is the case if the next enumeration
@@ -990,7 +990,7 @@
 
   // We got a map in register r0. Get the enumeration cache from it.
   __ bind(&use_cache);
-  __ ldr(r1, FieldMemOperand(r0, Map::kInstanceDescriptorsOffset));
+  __ LoadInstanceDescriptors(r0, r1);
   __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
   __ ldr(r2, FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
 
@@ -1039,7 +1039,7 @@
   // just skip it.
   __ push(r1);  // Enumerable.
   __ push(r3);  // Current entry.
-  __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS);
+  __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
   __ mov(r3, Operand(r0), SetCC);
   __ b(eq, loop_statement.continue_target());
 
@@ -1109,6 +1109,67 @@
 }
 
 
+void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
+    Slot* slot,
+    TypeofState typeof_state,
+    Label* slow) {
+  Register current = cp;
+  Register next = r1;
+  Register temp = r2;
+
+  Scope* s = scope();
+  while (s != NULL) {
+    if (s->num_heap_slots() > 0) {
+      if (s->calls_eval()) {
+        // Check that extension is NULL.
+        __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
+        __ tst(temp, temp);
+        __ b(ne, slow);
+      }
+      // Load next context in chain.
+      __ ldr(next, ContextOperand(current, Context::CLOSURE_INDEX));
+      __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset));
+      // Walk the rest of the chain without clobbering cp.
+      current = next;
+    }
+    // If no outer scope calls eval, we do not need to check more
+    // context extensions.
+    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+    s = s->outer_scope();
+  }
+
+  if (s->is_eval_scope()) {
+    Label loop, fast;
+    if (!current.is(next)) {
+      __ Move(next, current);
+    }
+    __ bind(&loop);
+    // Terminate at global context.
+    __ ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset));
+    __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
+    __ cmp(temp, ip);
+    __ b(eq, &fast);
+    // Check that extension is NULL.
+    __ ldr(temp, ContextOperand(next, Context::EXTENSION_INDEX));
+    __ tst(temp, temp);
+    __ b(ne, slow);
+    // Load next context in chain.
+    __ ldr(next, ContextOperand(next, Context::CLOSURE_INDEX));
+    __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset));
+    __ b(&loop);
+    __ bind(&fast);
+  }
+
+  __ ldr(r0, GlobalObjectOperand());
+  __ mov(r2, Operand(slot->var()->name()));
+  RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
+      ? RelocInfo::CODE_TARGET
+      : RelocInfo::CODE_TARGET_CONTEXT;
+  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+  EmitCallIC(ic, mode, AstNode::kNoNumber);
+}
+
+
 MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
     Slot* slot,
     Label* slow) {
@@ -1187,7 +1248,7 @@
           __ mov(r0, Operand(key_literal->handle()));
           Handle<Code> ic =
               isolate()->builtins()->KeyedLoadIC_Initialize();
-          EmitCallIC(ic, RelocInfo::CODE_TARGET);
+          EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
           __ jmp(done);
         }
       }
@@ -1196,67 +1257,6 @@
 }
 
 
-void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
-    Slot* slot,
-    TypeofState typeof_state,
-    Label* slow) {
-  Register current = cp;
-  Register next = r1;
-  Register temp = r2;
-
-  Scope* s = scope();
-  while (s != NULL) {
-    if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
-        // Check that extension is NULL.
-        __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
-        __ tst(temp, temp);
-        __ b(ne, slow);
-      }
-      // Load next context in chain.
-      __ ldr(next, ContextOperand(current, Context::CLOSURE_INDEX));
-      __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset));
-      // Walk the rest of the chain without clobbering cp.
-      current = next;
-    }
-    // If no outer scope calls eval, we do not need to check more
-    // context extensions.
-    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
-    s = s->outer_scope();
-  }
-
-  if (s->is_eval_scope()) {
-    Label loop, fast;
-    if (!current.is(next)) {
-      __ Move(next, current);
-    }
-    __ bind(&loop);
-    // Terminate at global context.
-    __ ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset));
-    __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
-    __ cmp(temp, ip);
-    __ b(eq, &fast);
-    // Check that extension is NULL.
-    __ ldr(temp, ContextOperand(next, Context::EXTENSION_INDEX));
-    __ tst(temp, temp);
-    __ b(ne, slow);
-    // Load next context in chain.
-    __ ldr(next, ContextOperand(next, Context::CLOSURE_INDEX));
-    __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset));
-    __ b(&loop);
-    __ bind(&fast);
-  }
-
-  __ ldr(r0, GlobalObjectOperand());
-  __ mov(r2, Operand(slot->var()->name()));
-  RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
-      ? RelocInfo::CODE_TARGET
-      : RelocInfo::CODE_TARGET_CONTEXT;
-  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  EmitCallIC(ic, mode);
-}
-
-
 void FullCodeGenerator::EmitVariableLoad(Variable* var) {
   // Four cases: non-this global variables, lookup slots, all other
   // types of slots, and parameters that rewrite to explicit property
@@ -1271,7 +1271,7 @@
     __ ldr(r0, GlobalObjectOperand());
     __ mov(r2, Operand(var->name()));
     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
     context()->Plug(r0);
 
   } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
@@ -1330,7 +1330,7 @@
 
     // Call keyed load IC. It has arguments key and receiver in r0 and r1.
     Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-    EmitCallIC(ic, RelocInfo::CODE_TARGET);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
     context()->Plug(r0);
   }
 }
@@ -1438,8 +1438,10 @@
             VisitForAccumulatorValue(value);
             __ mov(r2, Operand(key->handle()));
             __ ldr(r1, MemOperand(sp));
-            Handle<Code> ic = isolate()->builtins()->StoreIC_Initialize();
-            EmitCallIC(ic, RelocInfo::CODE_TARGET);
+            Handle<Code> ic = is_strict_mode()
+                ? isolate()->builtins()->StoreIC_Initialize_Strict()
+                : isolate()->builtins()->StoreIC_Initialize();
+            EmitCallIC(ic, RelocInfo::CODE_TARGET, key->id());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
           } else {
             VisitForEffect(value);
@@ -1651,13 +1653,13 @@
     SetSourcePosition(expr->position() + 1);
     AccumulatorValueContext context(this);
     if (ShouldInlineSmiCase(op)) {
-      EmitInlineSmiBinaryOp(expr,
+      EmitInlineSmiBinaryOp(expr->binary_operation(),
                             op,
                             mode,
                             expr->target(),
                             expr->value());
     } else {
-      EmitBinaryOp(op, mode);
+      EmitBinaryOp(expr->binary_operation(), op, mode);
     }
 
     // Deoptimization point in case the binary operation may have side effects.
@@ -1693,7 +1695,7 @@
   __ mov(r2, Operand(key->handle()));
   // Call load IC. It has arguments receiver and property name r0 and r2.
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  EmitCallIC(ic, RelocInfo::CODE_TARGET);
+  EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
 }
 
 
@@ -1701,11 +1703,11 @@
   SetSourcePosition(prop->position());
   // Call keyed load IC. It has arguments key and receiver in r0 and r1.
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  EmitCallIC(ic, RelocInfo::CODE_TARGET);
+  EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
 }
 
 
-void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
                                               Token::Value op,
                                               OverwriteMode mode,
                                               Expression* left_expr,
@@ -1727,14 +1729,14 @@
   patch_site.EmitJumpIfSmi(scratch1, &smi_case);
 
   __ bind(&stub_call);
-  TypeRecordingBinaryOpStub stub(op, mode);
-  EmitCallIC(stub.GetCode(), &patch_site);
+  BinaryOpStub stub(op, mode);
+  EmitCallIC(stub.GetCode(), &patch_site, expr->id());
   __ jmp(&done);
 
   __ bind(&smi_case);
   // Smi case. This code works the same way as the smi-smi case in the type
   // recording binary operation stub, see
-  // TypeRecordingBinaryOpStub::GenerateSmiSmiOperation for comments.
+  // BinaryOpStub::GenerateSmiSmiOperation for comments.
   switch (op) {
     case Token::SAR:
       __ b(&stub_call);
@@ -1804,11 +1806,12 @@
 }
 
 
-void FullCodeGenerator::EmitBinaryOp(Token::Value op,
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
+                                     Token::Value op,
                                      OverwriteMode mode) {
   __ pop(r1);
-  TypeRecordingBinaryOpStub stub(op, mode);
-  EmitCallIC(stub.GetCode(), NULL);
+  BinaryOpStub stub(op, mode);
+  EmitCallIC(stub.GetCode(), NULL, expr->id());
   context()->Plug(r0);
 }
 
@@ -1848,7 +1851,7 @@
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->StoreIC_Initialize_Strict()
           : isolate()->builtins()->StoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
       break;
     }
     case KEYED_PROPERTY: {
@@ -1871,7 +1874,7 @@
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
           : isolate()->builtins()->KeyedStoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
       break;
     }
   }
@@ -1897,7 +1900,7 @@
     Handle<Code> ic = is_strict_mode()
         ? isolate()->builtins()->StoreIC_Initialize_Strict()
         : isolate()->builtins()->StoreIC_Initialize();
-    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
 
   } else if (op == Token::INIT_CONST) {
     // Like var declarations, const declarations are hoisted to function
@@ -2006,7 +2009,7 @@
   Handle<Code> ic = is_strict_mode()
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
-  EmitCallIC(ic, RelocInfo::CODE_TARGET);
+  EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -2052,7 +2055,7 @@
   Handle<Code> ic = is_strict_mode()
       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
       : isolate()->builtins()->KeyedStoreIC_Initialize();
-  EmitCallIC(ic, RelocInfo::CODE_TARGET);
+  EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -2103,8 +2106,8 @@
   // Call the IC initialization code.
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic =
-      isolate()->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
-  EmitCallIC(ic, mode);
+      isolate()->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
+  EmitCallIC(ic, mode, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2113,8 +2116,7 @@
 
 
 void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
-                                            Expression* key,
-                                            RelocInfo::Mode mode) {
+                                            Expression* key) {
   // Load the key.
   VisitForAccumulatorValue(key);
 
@@ -2139,7 +2141,7 @@
   Handle<Code> ic =
       isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
   __ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize));  // Key.
-  EmitCallIC(ic, mode);
+  EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2147,7 +2149,7 @@
 }
 
 
-void FullCodeGenerator::EmitCallWithStub(Call* expr) {
+void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
   // Code common for calls using the call stub.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
@@ -2159,7 +2161,7 @@
   // Record source position for debugger.
   SetSourcePosition(expr->position());
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
-  CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+  CallFunctionStub stub(arg_count, in_loop, flags);
   __ CallStub(&stub);
   RecordJSReturnSite(expr);
   // Restore context register.
@@ -2255,7 +2257,7 @@
     // Record source position for debugger.
     SetSourcePosition(expr->position());
     InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
-    CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+    CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_IMPLICIT);
     __ CallStub(&stub);
     RecordJSReturnSite(expr);
     // Restore context register.
@@ -2305,7 +2307,10 @@
       __ bind(&call);
     }
 
-    EmitCallWithStub(expr);
+    // The receiver is either the global receiver or an object found
+    // by LoadContextSlot. That object could be the hole if the
+    // receiver is implicitly the global object.
+    EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
   } else if (fun->AsProperty() != NULL) {
     // Call to an object property.
     Property* prop = fun->AsProperty();
@@ -2319,7 +2324,7 @@
     } else {
       // Call to a keyed property.
       // For a synthetic property use keyed load IC followed by function call,
-      // for a regular property use keyed CallIC.
+      // for a regular property use keyed EmitCallIC.
       if (prop->is_synthetic()) {
         // Do not visit the object and key subexpressions (they are shared
         // by all occurrences of the same rewritten parameter).
@@ -2337,16 +2342,16 @@
         SetSourcePosition(prop->position());
 
         Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-        EmitCallIC(ic, RelocInfo::CODE_TARGET);
+        EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
         __ ldr(r1, GlobalObjectOperand());
         __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
         __ Push(r0, r1);  // Function, receiver.
-        EmitCallWithStub(expr);
+        EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
       } else {
         { PreservePositionScope scope(masm()->positions_recorder());
           VisitForStackValue(prop->obj());
         }
-        EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET);
+        EmitKeyedCallWithIC(expr, prop->key());
       }
     }
   } else {
@@ -2358,7 +2363,7 @@
     __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
     __ push(r1);
     // Emit function call.
-    EmitCallWithStub(expr);
+    EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
   }
 
 #ifdef DEBUG
@@ -2548,7 +2553,7 @@
   // Look for valueOf symbol in the descriptor array, and indicate false if
   // found. The type is not checked, so if it is a transition it is a false
   // negative.
-  __ ldr(r4, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
+  __ LoadInstanceDescriptors(r1, r4);
   __ ldr(r3, FieldMemOperand(r4, FixedArray::kLengthOffset));
   // r4: descriptor array
   // r3: length of descriptor array
@@ -3161,17 +3166,17 @@
 void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
   ASSERT(args->length() >= 2);
 
-  int arg_count = args->length() - 2;  // For receiver and function.
-  VisitForStackValue(args->at(0));  // Receiver.
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i + 1));
+  int arg_count = args->length() - 2;  // 2 ~ receiver and function.
+  for (int i = 0; i < arg_count + 1; i++) {
+    VisitForStackValue(args->at(i));
   }
-  VisitForAccumulatorValue(args->at(arg_count + 1));  // Function.
+  VisitForAccumulatorValue(args->last());  // Function.
 
-  // InvokeFunction requires function in r1. Move it in there.
-  if (!result_register().is(r1)) __ mov(r1, result_register());
+  // InvokeFunction requires the function in r1. Move it in there.
+  __ mov(r1, result_register());
   ParameterCount count(arg_count);
-  __ InvokeFunction(r1, count, CALL_FUNCTION);
+  __ InvokeFunction(r1, count, CALL_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   context()->Plug(r0);
 }
@@ -3656,9 +3661,12 @@
   if (expr->is_jsruntime()) {
     // Call the JS runtime function.
     __ mov(r2, Operand(expr->name()));
+    RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
     Handle<Code> ic =
-        isolate()->stub_cache()->ComputeCallInitialize(arg_count, NOT_IN_LOOP);
-    EmitCallIC(ic, RelocInfo::CODE_TARGET);
+        isolate()->stub_cache()->ComputeCallInitialize(arg_count,
+                                                       NOT_IN_LOOP,
+                                                       mode);
+    EmitCallIC(ic, mode, expr->id());
     // Restore context register.
     __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   } else {
@@ -3686,7 +3694,7 @@
           VisitForStackValue(prop->key());
           __ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
           __ push(r1);
-          __ InvokeBuiltin(Builtins::DELETE, CALL_JS);
+          __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
           context()->Plug(r0);
         }
       } else if (var != NULL) {
@@ -3698,7 +3706,7 @@
           __ mov(r1, Operand(var->name()));
           __ mov(r0, Operand(Smi::FromInt(kNonStrictMode)));
           __ Push(r2, r1, r0);
-          __ InvokeBuiltin(Builtins::DELETE, CALL_JS);
+          __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
           context()->Plug(r0);
         } else if (var->AsSlot() != NULL &&
                    var->AsSlot()->type() != Slot::LOOKUP) {
@@ -3775,48 +3783,13 @@
       break;
     }
 
-    case Token::SUB: {
-      Comment cmt(masm_, "[ UnaryOperation (SUB)");
-      bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
-      UnaryOverwriteMode overwrite =
-          can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-      GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS);
-      // GenericUnaryOpStub expects the argument to be in the
-      // accumulator register r0.
-      VisitForAccumulatorValue(expr->expression());
-      __ CallStub(&stub);
-      context()->Plug(r0);
+    case Token::SUB:
+      EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
       break;
-    }
 
-    case Token::BIT_NOT: {
-      Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
-      // The generic unary operation stub expects the argument to be
-      // in the accumulator register r0.
-      VisitForAccumulatorValue(expr->expression());
-      Label done;
-      bool inline_smi_code = ShouldInlineSmiCase(expr->op());
-      if (inline_smi_code) {
-        Label call_stub;
-        __ JumpIfNotSmi(r0, &call_stub);
-        __ mvn(r0, Operand(r0));
-        // Bit-clear inverted smi-tag.
-        __ bic(r0, r0, Operand(kSmiTagMask));
-        __ b(&done);
-        __ bind(&call_stub);
-      }
-      bool overwrite = expr->expression()->ResultOverwriteAllowed();
-      UnaryOpFlags flags = inline_smi_code
-          ? NO_UNARY_SMI_CODE_IN_STUB
-          : NO_UNARY_FLAGS;
-      UnaryOverwriteMode mode =
-          overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-      GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
-      __ CallStub(&stub);
-      __ bind(&done);
-      context()->Plug(r0);
+    case Token::BIT_NOT:
+      EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
       break;
-    }
 
     default:
       UNREACHABLE();
@@ -3824,6 +3797,23 @@
 }
 
 
+void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
+                                           const char* comment) {
+  // TODO(svenpanne): Allowing format strings in Comment would be nice here...
+  Comment cmt(masm_, comment);
+  bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
+  UnaryOverwriteMode overwrite =
+      can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
+  UnaryOpStub stub(expr->op(), overwrite);
+  // UnaryOpStub expects the argument to be in the
+  // accumulator register r0.
+  VisitForAccumulatorValue(expr->expression());
+  SetSourcePosition(expr->position());
+  EmitCallIC(stub.GetCode(), NULL, expr->id());
+  context()->Plug(r0);
+}
+
+
 void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
   Comment cmnt(masm_, "[ CountOperation");
   SetSourcePosition(expr->position());
@@ -3936,8 +3926,8 @@
   // Record position before stub call.
   SetSourcePosition(expr->position());
 
-  TypeRecordingBinaryOpStub stub(Token::ADD, NO_OVERWRITE);
-  EmitCallIC(stub.GetCode(), &patch_site);
+  BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
+  EmitCallIC(stub.GetCode(), &patch_site, expr->CountId());
   __ bind(&done);
 
   // Store the value returned in r0.
@@ -3968,7 +3958,7 @@
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->StoreIC_Initialize_Strict()
           : isolate()->builtins()->StoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -3985,7 +3975,7 @@
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
           : isolate()->builtins()->KeyedStoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -4011,7 +4001,7 @@
     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
-    EmitCallIC(ic, RelocInfo::CODE_TARGET);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
     PrepareForBailout(expr, TOS_REG);
     context()->Plug(r0);
   } else if (proxy != NULL &&
@@ -4144,7 +4134,7 @@
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
-      __ InvokeBuiltin(Builtins::IN, CALL_JS);
+      __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
       PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
       __ LoadRoot(ip, Heap::kTrueValueRootIndex);
       __ cmp(r0, ip);
@@ -4214,7 +4204,7 @@
       // Record position and call the compare IC.
       SetSourcePosition(expr->position());
       Handle<Code> ic = CompareIC::GetUninitialized(op);
-      EmitCallIC(ic, &patch_site);
+      EmitCallIC(ic, &patch_site, expr->id());
       PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
       __ cmp(r0, Operand(0));
       Split(cond, if_true, if_false, fall_through);
@@ -4276,7 +4266,9 @@
 }
 
 
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
+                                   RelocInfo::Mode mode,
+                                   unsigned ast_id) {
   ASSERT(mode == RelocInfo::CODE_TARGET ||
          mode == RelocInfo::CODE_TARGET_CONTEXT);
   Counters* counters = isolate()->counters();
@@ -4295,12 +4287,19 @@
     default:
       break;
   }
-
-  __ Call(ic, mode);
+  if (ast_id == kNoASTId || mode == RelocInfo::CODE_TARGET_CONTEXT) {
+    __ Call(ic, mode);
+  } else {
+    ASSERT(mode == RelocInfo::CODE_TARGET);
+    mode = RelocInfo::CODE_TARGET_WITH_ID;
+    __ CallWithAstId(ic, mode, ast_id);
+  }
 }
 
 
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
+                                   JumpPatchSite* patch_site,
+                                   unsigned ast_id) {
   Counters* counters = isolate()->counters();
   switch (ic->kind()) {
     case Code::LOAD_IC:
@@ -4318,7 +4317,11 @@
       break;
   }
 
-  __ Call(ic, RelocInfo::CODE_TARGET);
+  if (ast_id == kNoASTId) {
+    __ Call(ic, RelocInfo::CODE_TARGET);
+  } else {
+    __ CallWithAstId(ic, RelocInfo::CODE_TARGET_WITH_ID, ast_id);
+  }
   if (patch_site != NULL && patch_site->is_bound()) {
     patch_site->EmitPatchInfo();
   } else {
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 8acf7c2..2123163 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -105,65 +105,6 @@
 }
 
 
-// Probe the string dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found. Jump to
-// the |miss| label otherwise.
-static void GenerateStringDictionaryProbes(MacroAssembler* masm,
-                                           Label* miss,
-                                           Label* done,
-                                           Register elements,
-                                           Register name,
-                                           Register scratch1,
-                                           Register scratch2) {
-  // Assert that name contains a string.
-  if (FLAG_debug_code) __ AbortIfNotString(name);
-
-  // Compute the capacity mask.
-  const int kCapacityOffset = StringDictionary::kHeaderSize +
-      StringDictionary::kCapacityIndex * kPointerSize;
-  __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
-  __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize));  // convert smi to int
-  __ sub(scratch1, scratch1, Operand(1));
-
-  const int kElementsStartOffset = StringDictionary::kHeaderSize +
-      StringDictionary::kElementsStartIndex * kPointerSize;
-
-  // Generate an unrolled loop that performs a few probes before
-  // giving up. Measurements done on Gmail indicate that 2 probes
-  // cover ~93% of loads from dictionaries.
-  static const int kProbes = 4;
-  for (int i = 0; i < kProbes; i++) {
-    // Compute the masked index: (hash + i + i * i) & mask.
-    __ ldr(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
-    if (i > 0) {
-      // Add the probe offset (i + i * i) left shifted to avoid right shifting
-      // the hash in a separate instruction. The value hash + i + i * i is right
-      // shifted in the following and instruction.
-      ASSERT(StringDictionary::GetProbeOffset(i) <
-             1 << (32 - String::kHashFieldOffset));
-      __ add(scratch2, scratch2, Operand(
-          StringDictionary::GetProbeOffset(i) << String::kHashShift));
-    }
-    __ and_(scratch2, scratch1, Operand(scratch2, LSR, String::kHashShift));
-
-    // Scale the index by multiplying by the element size.
-    ASSERT(StringDictionary::kEntrySize == 3);
-    // scratch2 = scratch2 * 3.
-    __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
-
-    // Check if the key is identical to the name.
-    __ add(scratch2, elements, Operand(scratch2, LSL, 2));
-    __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
-    __ cmp(name, Operand(ip));
-    if (i != kProbes - 1) {
-      __ b(eq, done);
-    } else {
-      __ b(ne, miss);
-    }
-  }
-}
-
-
 // Helper function used from LoadIC/CallIC GenerateNormal.
 //
 // elements: Property dictionary. It is not clobbered if a jump to the miss
@@ -191,13 +132,13 @@
   Label done;
 
   // Probe the dictionary.
-  GenerateStringDictionaryProbes(masm,
-                                 miss,
-                                 &done,
-                                 elements,
-                                 name,
-                                 scratch1,
-                                 scratch2);
+  StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+                                                     miss,
+                                                     &done,
+                                                     elements,
+                                                     name,
+                                                     scratch1,
+                                                     scratch2);
 
   // If probing finds an entry check that the value is a normal
   // property.
@@ -240,13 +181,13 @@
   Label done;
 
   // Probe the dictionary.
-  GenerateStringDictionaryProbes(masm,
-                                 miss,
-                                 &done,
-                                 elements,
-                                 name,
-                                 scratch1,
-                                 scratch2);
+  StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+                                                     miss,
+                                                     &done,
+                                                     elements,
+                                                     name,
+                                                     scratch1,
+                                                     scratch2);
 
   // If probing finds an entry in the dictionary check that the value
   // is a normal property that is not read only.
@@ -538,7 +479,8 @@
 // The generated code falls through if both probes miss.
 static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
                                           int argc,
-                                          Code::Kind kind) {
+                                          Code::Kind kind,
+                                          Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   //  -- r1    : receiver
   //  -- r2    : name
@@ -549,7 +491,7 @@
   Code::Flags flags = Code::ComputeFlags(kind,
                                          NOT_IN_LOOP,
                                          MONOMORPHIC,
-                                         Code::kNoExtraICState,
+                                         extra_ic_state,
                                          NORMAL,
                                          argc);
   Isolate::Current()->stub_cache()->GenerateProbe(
@@ -615,7 +557,8 @@
 
   // Invoke the function.
   ParameterCount actual(argc);
-  __ InvokeFunction(r1, actual, JUMP_FUNCTION);
+  __ InvokeFunction(r1, actual, JUMP_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
 }
 
 
@@ -641,7 +584,10 @@
 }
 
 
-static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
+static void GenerateCallMiss(MacroAssembler* masm,
+                             int argc,
+                             IC::UtilityId id,
+                             Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -693,22 +639,33 @@
   }
 
   // Invoke the function.
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
   ParameterCount actual(argc);
-  __ InvokeFunction(r1, actual, JUMP_FUNCTION);
+  __ InvokeFunction(r1,
+                    actual,
+                    JUMP_FUNCTION,
+                    NullCallWrapper(),
+                    call_kind);
 }
 
 
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+void CallIC::GenerateMiss(MacroAssembler* masm,
+                          int argc,
+                          Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
   // -----------------------------------
 
-  GenerateCallMiss(masm, argc, IC::kCallIC_Miss);
+  GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
 }
 
 
-void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+void CallIC::GenerateMegamorphic(MacroAssembler* masm,
+                                 int argc,
+                                 Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -716,8 +673,8 @@
 
   // Get the receiver of the function from the stack into r1.
   __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-  GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC);
-  GenerateMiss(masm, argc);
+  GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
+  GenerateMiss(masm, argc, extra_ic_state);
 }
 
 
@@ -728,7 +685,7 @@
   // -----------------------------------
 
   GenerateCallNormal(masm, argc);
-  GenerateMiss(masm, argc);
+  GenerateMiss(masm, argc, Code::kNoExtraICState);
 }
 
 
@@ -738,7 +695,7 @@
   //  -- lr    : return address
   // -----------------------------------
 
-  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss);
+  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
 }
 
 
@@ -824,7 +781,10 @@
 
   __ bind(&lookup_monomorphic_cache);
   __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1, r0, r3);
-  GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC);
+  GenerateMonomorphicCacheProbe(masm,
+                                argc,
+                                Code::KEYED_CALL_IC,
+                                Code::kNoExtraICState);
   // Fall through on miss.
 
   __ bind(&slow_call);
@@ -926,222 +886,8 @@
   __ TailCallExternalReference(ref, 2, 1);
 }
 
-// Returns the code marker, or the 0 if the code is not marked.
-static inline int InlinedICSiteMarker(Address address,
-                                      Address* inline_end_address) {
-  if (V8::UseCrankshaft()) return false;
 
-  // If the instruction after the call site is not the pseudo instruction nop1
-  // then this is not related to an inlined in-object property load. The nop1
-  // instruction is located just after the call to the IC in the deferred code
-  // handling the miss in the inlined code. After the nop1 instruction there is
-  // a branch instruction for jumping back from the deferred code.
-  Address address_after_call = address + Assembler::kCallTargetAddressOffset;
-  Instr instr_after_call = Assembler::instr_at(address_after_call);
-  int code_marker = MacroAssembler::GetCodeMarker(instr_after_call);
-
-  // A negative result means the code is not marked.
-  if (code_marker <= 0) return 0;
-
-  Address address_after_nop = address_after_call + Assembler::kInstrSize;
-  Instr instr_after_nop = Assembler::instr_at(address_after_nop);
-  // There may be some reg-reg move and frame merging code to skip over before
-  // the branch back from the DeferredReferenceGetKeyedValue code to the inlined
-  // code.
-  while (!Assembler::IsBranch(instr_after_nop)) {
-    address_after_nop += Assembler::kInstrSize;
-    instr_after_nop = Assembler::instr_at(address_after_nop);
-  }
-
-  // Find the end of the inlined code for handling the load.
-  int b_offset =
-      Assembler::GetBranchOffset(instr_after_nop) + Assembler::kPcLoadDelta;
-  ASSERT(b_offset < 0);  // Jumping back from deferred code.
-  *inline_end_address = address_after_nop + b_offset;
-
-  return code_marker;
-}
-
-
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
-  if (V8::UseCrankshaft()) return false;
-
-  // Find the end of the inlined code for handling the load if this is an
-  // inlined IC call site.
-  Address inline_end_address = 0;
-  if (InlinedICSiteMarker(address, &inline_end_address)
-      != Assembler::PROPERTY_ACCESS_INLINED) {
-    return false;
-  }
-
-  // Patch the offset of the property load instruction (ldr r0, [r1, #+XXX]).
-  // The immediate must be representable in 12 bits.
-  ASSERT((JSObject::kMaxInstanceSize - JSObject::kHeaderSize) < (1 << 12));
-  Address ldr_property_instr_address =
-      inline_end_address - Assembler::kInstrSize;
-  ASSERT(Assembler::IsLdrRegisterImmediate(
-      Assembler::instr_at(ldr_property_instr_address)));
-  Instr ldr_property_instr = Assembler::instr_at(ldr_property_instr_address);
-  ldr_property_instr = Assembler::SetLdrRegisterImmediateOffset(
-      ldr_property_instr, offset - kHeapObjectTag);
-  Assembler::instr_at_put(ldr_property_instr_address, ldr_property_instr);
-
-  // Indicate that code has changed.
-  CPU::FlushICache(ldr_property_instr_address, 1 * Assembler::kInstrSize);
-
-  // Patch the map check.
-  // For PROPERTY_ACCESS_INLINED, the load map instruction is generated
-  // 4 instructions before the end of the inlined code.
-  // See codgen-arm.cc CodeGenerator::EmitNamedLoad.
-  int ldr_map_offset = -4;
-  Address ldr_map_instr_address =
-      inline_end_address + ldr_map_offset * Assembler::kInstrSize;
-  Assembler::set_target_address_at(ldr_map_instr_address,
-                                   reinterpret_cast<Address>(map));
-  return true;
-}
-
-
-bool LoadIC::PatchInlinedContextualLoad(Address address,
-                                        Object* map,
-                                        Object* cell,
-                                        bool is_dont_delete) {
-  // Find the end of the inlined code for handling the contextual load if
-  // this is inlined IC call site.
-  Address inline_end_address = 0;
-  int marker = InlinedICSiteMarker(address, &inline_end_address);
-  if (!((marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT) ||
-        (marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE))) {
-    return false;
-  }
-  // On ARM we don't rely on the is_dont_delete argument as the hint is already
-  // embedded in the code marker.
-  bool marker_is_dont_delete =
-      marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE;
-
-  // These are the offsets from the end of the inlined code.
-  // See codgen-arm.cc CodeGenerator::EmitNamedLoad.
-  int ldr_map_offset = marker_is_dont_delete ? -5: -8;
-  int ldr_cell_offset = marker_is_dont_delete ? -2: -5;
-  if (FLAG_debug_code && marker_is_dont_delete) {
-    // Three extra instructions were generated to check for the_hole_value.
-    ldr_map_offset -= 3;
-    ldr_cell_offset -= 3;
-  }
-  Address ldr_map_instr_address =
-      inline_end_address + ldr_map_offset * Assembler::kInstrSize;
-  Address ldr_cell_instr_address =
-      inline_end_address + ldr_cell_offset * Assembler::kInstrSize;
-
-  // Patch the map check.
-  Assembler::set_target_address_at(ldr_map_instr_address,
-                                   reinterpret_cast<Address>(map));
-  // Patch the cell address.
-  Assembler::set_target_address_at(ldr_cell_instr_address,
-                                   reinterpret_cast<Address>(cell));
-
-  return true;
-}
-
-
-bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
-  if (V8::UseCrankshaft()) return false;
-
-  // Find the end of the inlined code for the store if there is an
-  // inlined version of the store.
-  Address inline_end_address = 0;
-  if (InlinedICSiteMarker(address, &inline_end_address)
-      != Assembler::PROPERTY_ACCESS_INLINED) {
-    return false;
-  }
-
-  // Compute the address of the map load instruction.
-  Address ldr_map_instr_address =
-      inline_end_address -
-      (CodeGenerator::GetInlinedNamedStoreInstructionsAfterPatch() *
-       Assembler::kInstrSize);
-
-  // Update the offsets if initializing the inlined store. No reason
-  // to update the offsets when clearing the inlined version because
-  // it will bail out in the map check.
-  if (map != HEAP->null_value()) {
-    // Patch the offset in the actual store instruction.
-    Address str_property_instr_address =
-        ldr_map_instr_address + 3 * Assembler::kInstrSize;
-    Instr str_property_instr = Assembler::instr_at(str_property_instr_address);
-    ASSERT(Assembler::IsStrRegisterImmediate(str_property_instr));
-    str_property_instr = Assembler::SetStrRegisterImmediateOffset(
-        str_property_instr, offset - kHeapObjectTag);
-    Assembler::instr_at_put(str_property_instr_address, str_property_instr);
-
-    // Patch the offset in the add instruction that is part of the
-    // write barrier.
-    Address add_offset_instr_address =
-        str_property_instr_address + Assembler::kInstrSize;
-    Instr add_offset_instr = Assembler::instr_at(add_offset_instr_address);
-    ASSERT(Assembler::IsAddRegisterImmediate(add_offset_instr));
-    add_offset_instr = Assembler::SetAddRegisterImmediateOffset(
-        add_offset_instr, offset - kHeapObjectTag);
-    Assembler::instr_at_put(add_offset_instr_address, add_offset_instr);
-
-    // Indicate that code has changed.
-    CPU::FlushICache(str_property_instr_address, 2 * Assembler::kInstrSize);
-  }
-
-  // Patch the map check.
-  Assembler::set_target_address_at(ldr_map_instr_address,
-                                   reinterpret_cast<Address>(map));
-
-  return true;
-}
-
-
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
-  if (V8::UseCrankshaft()) return false;
-
-  Address inline_end_address = 0;
-  if (InlinedICSiteMarker(address, &inline_end_address)
-      != Assembler::PROPERTY_ACCESS_INLINED) {
-    return false;
-  }
-
-  // Patch the map check.
-  Address ldr_map_instr_address =
-      inline_end_address -
-      (CodeGenerator::GetInlinedKeyedLoadInstructionsAfterPatch() *
-      Assembler::kInstrSize);
-  Assembler::set_target_address_at(ldr_map_instr_address,
-                                   reinterpret_cast<Address>(map));
-  return true;
-}
-
-
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
-  if (V8::UseCrankshaft()) return false;
-
-  // Find the end of the inlined code for handling the store if this is an
-  // inlined IC call site.
-  Address inline_end_address = 0;
-  if (InlinedICSiteMarker(address, &inline_end_address)
-      != Assembler::PROPERTY_ACCESS_INLINED) {
-    return false;
-  }
-
-  // Patch the map check.
-  Address ldr_map_instr_address =
-      inline_end_address -
-      (CodeGenerator::kInlinedKeyedStoreInstructionsAfterPatch *
-      Assembler::kInstrSize);
-  Assembler::set_target_address_at(ldr_map_instr_address,
-                                   reinterpret_cast<Address>(map));
-  return true;
-}
-
-
-Object* KeyedLoadIC_Miss(Arguments args);
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
   // ---------- S t a t e --------------
   //  -- lr     : return address
   //  -- r0     : key
@@ -1153,8 +899,11 @@
 
   __ Push(r1, r0);
 
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+  // Perform tail call to the entry.
+  ExternalReference ref = force_generic
+      ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate)
+      : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+
   __ TailCallExternalReference(ref, 2, 1);
 }
 
@@ -1345,7 +1094,7 @@
   char_at_generator.GenerateSlow(masm, call_helper);
 
   __ bind(&miss);
-  GenerateMiss(masm);
+  GenerateMiss(masm, false);
 }
 
 
@@ -1385,11 +1134,11 @@
       1);
 
   __ bind(&slow);
-  GenerateMiss(masm);
+  GenerateMiss(masm, false);
 }
 
 
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
   // ---------- S t a t e --------------
   //  -- r0     : value
   //  -- r1     : key
@@ -1400,8 +1149,29 @@
   // Push receiver, key and value for runtime call.
   __ Push(r2, r1, r0);
 
+  ExternalReference ref = force_generic
+      ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
+                          masm->isolate())
+      : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+  // ---------- S t a t e --------------
+  //  -- r0     : value
+  //  -- r1     : key
+  //  -- r2     : receiver
+  //  -- lr     : return address
+  // -----------------------------------
+
+  // Push receiver, key and value for runtime call.
+  __ Push(r2, r1, r0);
+
+  // The slow case calls into the runtime to complete the store without causing
+  // an IC miss that would otherwise cause a transition to the generic stub.
   ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+      ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
   __ TailCallExternalReference(ref, 3, 1);
 }
 
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index efd226e..e32cd0c 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -61,22 +61,21 @@
 
 #ifdef DEBUG
 void LInstruction::VerifyCall() {
-  // Call instructions can use only fixed registers as
-  // temporaries and outputs because all registers
-  // are blocked by the calling convention.
-  // Inputs must use a fixed register.
+  // Call instructions can use only fixed registers as temporaries and
+  // outputs because all registers are blocked by the calling convention.
+  // Inputs operands must use a fixed register or use-at-start policy or
+  // a non-register policy.
   ASSERT(Output() == NULL ||
          LUnallocated::cast(Output())->HasFixedPolicy() ||
          !LUnallocated::cast(Output())->HasRegisterPolicy());
   for (UseIterator it(this); it.HasNext(); it.Advance()) {
-    LOperand* operand = it.Next();
-    ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
-           !LUnallocated::cast(operand)->HasRegisterPolicy());
+    LUnallocated* operand = LUnallocated::cast(it.Next());
+    ASSERT(operand->HasFixedPolicy() ||
+           operand->IsUsedAtStart());
   }
   for (TempIterator it(this); it.HasNext(); it.Advance()) {
-    LOperand* operand = it.Next();
-    ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
-           !LUnallocated::cast(operand)->HasRegisterPolicy());
+    LUnallocated* operand = LUnallocated::cast(it.Next());
+    ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
   }
 }
 #endif
@@ -151,7 +150,7 @@
 }
 
 
-void LGap::PrintDataTo(StringStream* stream) const {
+void LGap::PrintDataTo(StringStream* stream) {
   for (int i = 0; i < 4; i++) {
     stream->Add("(");
     if (parallel_moves_[i] != NULL) {
@@ -238,6 +237,13 @@
 }
 
 
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_undetectable(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
 void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if has_instance_type(");
   InputAt(0)->PrintTo(stream);
@@ -301,6 +307,13 @@
 }
 
 
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
 void LCallKeyed::PrintDataTo(StringStream* stream) {
   stream->Add("[r2] #%d / ", arity());
 }
@@ -449,7 +462,7 @@
 
 
 void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
-  LGap* gap = new LGap(block);
+  LInstructionGap* gap = new LInstructionGap(block);
   int index = -1;
   if (instr->IsControl()) {
     instructions_.Add(gap);
@@ -852,22 +865,20 @@
 
   // Shift operations can only deoptimize if we do a logical shift
   // by 0 and the result cannot be truncated to int32.
-  bool can_deopt = (op == Token::SHR && constant_value == 0);
-  if (can_deopt) {
-    bool can_truncate = true;
-    for (int i = 0; i < instr->uses()->length(); i++) {
-      if (!instr->uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) {
-        can_truncate = false;
+  bool may_deopt = (op == Token::SHR && constant_value == 0);
+  bool does_deopt = false;
+  if (may_deopt) {
+    for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+      if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
+        does_deopt = true;
         break;
       }
     }
-    can_deopt = !can_truncate;
   }
 
   LInstruction* result =
-      DefineSameAsFirst(new LShiftI(op, left, right, can_deopt));
-  if (can_deopt) AssignEnvironment(result);
-  return result;
+      DefineSameAsFirst(new LShiftI(op, left, right, does_deopt));
+  return does_deopt ? AssignEnvironment(result) : result;
 }
 
 
@@ -1011,6 +1022,8 @@
                                           outer);
   int argument_index = 0;
   for (int i = 0; i < value_count; ++i) {
+    if (hydrogen_env->is_special_index(i)) continue;
+
     HValue* value = hydrogen_env->values()->at(i);
     LOperand* op = NULL;
     if (value->IsArgumentsObject()) {
@@ -1037,97 +1050,97 @@
 
 LInstruction* LChunkBuilder::DoTest(HTest* instr) {
   HValue* v = instr->value();
-  if (v->EmitAtUses()) {
-    if (v->IsClassOfTest()) {
-      HClassOfTest* compare = HClassOfTest::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
-                                       TempRegister());
-    } else if (v->IsCompare()) {
-      HCompare* compare = HCompare::cast(v);
-      Token::Value op = compare->token();
-      HValue* left = compare->left();
-      HValue* right = compare->right();
-      Representation r = compare->GetInputRepresentation();
-      if (r.IsInteger32()) {
-        ASSERT(left->representation().IsInteger32());
-        ASSERT(right->representation().IsInteger32());
-        return new LCmpIDAndBranch(UseRegisterAtStart(left),
-                                   UseRegisterAtStart(right));
-      } else if (r.IsDouble()) {
-        ASSERT(left->representation().IsDouble());
-        ASSERT(right->representation().IsDouble());
-        return new LCmpIDAndBranch(UseRegisterAtStart(left),
-                                   UseRegisterAtStart(right));
-      } else {
-        ASSERT(left->representation().IsTagged());
-        ASSERT(right->representation().IsTagged());
-        bool reversed = op == Token::GT || op == Token::LTE;
-        LOperand* left_operand = UseFixed(left, reversed ? r0 : r1);
-        LOperand* right_operand = UseFixed(right, reversed ? r1 : r0);
-        LInstruction* result = new LCmpTAndBranch(left_operand,
-                                                  right_operand);
-        return MarkAsCall(result, instr);
-      }
-    } else if (v->IsIsSmi()) {
-      HIsSmi* compare = HIsSmi::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      return new LIsSmiAndBranch(Use(compare->value()));
-    } else if (v->IsHasInstanceType()) {
-      HHasInstanceType* compare = HHasInstanceType::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-      return new LHasInstanceTypeAndBranch(
-          UseRegisterAtStart(compare->value()));
-    } else if (v->IsHasCachedArrayIndex()) {
-      HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      return new LHasCachedArrayIndexAndBranch(
-          UseRegisterAtStart(compare->value()));
-    } else if (v->IsIsNull()) {
-      HIsNull* compare = HIsNull::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      return new LIsNullAndBranch(UseRegisterAtStart(compare->value()));
-    } else if (v->IsIsObject()) {
-      HIsObject* compare = HIsObject::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      LOperand* temp = TempRegister();
-      return new LIsObjectAndBranch(UseRegister(compare->value()), temp);
-    } else if (v->IsCompareJSObjectEq()) {
-      HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
-      return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
-                                         UseRegisterAtStart(compare->right()));
-    } else if (v->IsInstanceOf()) {
-      HInstanceOf* instance_of = HInstanceOf::cast(v);
-      LInstruction* result =
-          new LInstanceOfAndBranch(UseFixed(instance_of->left(), r0),
-                                   UseFixed(instance_of->right(), r1));
-      return MarkAsCall(result, instr);
-    } else if (v->IsTypeofIs()) {
-      HTypeofIs* typeof_is = HTypeofIs::cast(v);
-      return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
-    } else if (v->IsIsConstructCall()) {
-      return new LIsConstructCallAndBranch(TempRegister());
+  if (!v->EmitAtUses()) {
+    return new LBranch(UseRegisterAtStart(v));
+  } else if (v->IsClassOfTest()) {
+    HClassOfTest* compare = HClassOfTest::cast(v);
+    ASSERT(compare->value()->representation().IsTagged());
+    return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
+                                     TempRegister());
+  } else if (v->IsCompare()) {
+    HCompare* compare = HCompare::cast(v);
+    Token::Value op = compare->token();
+    HValue* left = compare->left();
+    HValue* right = compare->right();
+    Representation r = compare->GetInputRepresentation();
+    if (r.IsInteger32()) {
+      ASSERT(left->representation().IsInteger32());
+      ASSERT(right->representation().IsInteger32());
+      return new LCmpIDAndBranch(UseRegisterAtStart(left),
+                                 UseRegisterAtStart(right));
+    } else if (r.IsDouble()) {
+      ASSERT(left->representation().IsDouble());
+      ASSERT(right->representation().IsDouble());
+      return new LCmpIDAndBranch(UseRegisterAtStart(left),
+                                 UseRegisterAtStart(right));
     } else {
-      if (v->IsConstant()) {
-        if (HConstant::cast(v)->ToBoolean()) {
-          return new LGoto(instr->FirstSuccessor()->block_id());
-        } else {
-          return new LGoto(instr->SecondSuccessor()->block_id());
-        }
-      }
-      Abort("Undefined compare before branch");
-      return NULL;
+      ASSERT(left->representation().IsTagged());
+      ASSERT(right->representation().IsTagged());
+      bool reversed = op == Token::GT || op == Token::LTE;
+      LOperand* left_operand = UseFixed(left, reversed ? r0 : r1);
+      LOperand* right_operand = UseFixed(right, reversed ? r1 : r0);
+      LInstruction* result = new LCmpTAndBranch(left_operand, right_operand);
+      return MarkAsCall(result, instr);
     }
+  } else if (v->IsIsSmi()) {
+    HIsSmi* compare = HIsSmi::cast(v);
+    ASSERT(compare->value()->representation().IsTagged());
+    return new LIsSmiAndBranch(Use(compare->value()));
+  } else if (v->IsIsUndetectable()) {
+    HIsUndetectable* compare = HIsUndetectable::cast(v);
+    ASSERT(compare->value()->representation().IsTagged());
+    return new LIsUndetectableAndBranch(UseRegisterAtStart(compare->value()),
+                                        TempRegister());
+  } else if (v->IsHasInstanceType()) {
+    HHasInstanceType* compare = HHasInstanceType::cast(v);
+    ASSERT(compare->value()->representation().IsTagged());
+    return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()));
+  } else if (v->IsHasCachedArrayIndex()) {
+    HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
+    ASSERT(compare->value()->representation().IsTagged());
+    return new LHasCachedArrayIndexAndBranch(
+        UseRegisterAtStart(compare->value()));
+  } else if (v->IsIsNull()) {
+    HIsNull* compare = HIsNull::cast(v);
+    ASSERT(compare->value()->representation().IsTagged());
+    return new LIsNullAndBranch(UseRegisterAtStart(compare->value()));
+  } else if (v->IsIsObject()) {
+    HIsObject* compare = HIsObject::cast(v);
+    ASSERT(compare->value()->representation().IsTagged());
+    LOperand* temp = TempRegister();
+    return new LIsObjectAndBranch(UseRegister(compare->value()), temp);
+  } else if (v->IsCompareJSObjectEq()) {
+    HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
+    return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
+                                       UseRegisterAtStart(compare->right()));
+  } else if (v->IsCompareSymbolEq()) {
+    HCompareSymbolEq* compare = HCompareSymbolEq::cast(v);
+    return new LCmpSymbolEqAndBranch(UseRegisterAtStart(compare->left()),
+                                     UseRegisterAtStart(compare->right()));
+  } else if (v->IsInstanceOf()) {
+    HInstanceOf* instance_of = HInstanceOf::cast(v);
+    LInstruction* result =
+        new LInstanceOfAndBranch(UseFixed(instance_of->left(), r0),
+                                 UseFixed(instance_of->right(), r1));
+    return MarkAsCall(result, instr);
+  } else if (v->IsTypeofIs()) {
+    HTypeofIs* typeof_is = HTypeofIs::cast(v);
+    return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
+  } else if (v->IsIsConstructCall()) {
+    return new LIsConstructCallAndBranch(TempRegister());
+  } else if (v->IsConstant()) {
+    HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
+        ? instr->FirstSuccessor()
+        : instr->SecondSuccessor();
+    return new LGoto(successor->block_id());
+  } else {
+    Abort("Undefined compare before branch");
+    return NULL;
   }
-  return new LBranch(UseRegisterAtStart(v));
 }
 
 
+
 LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
@@ -1183,7 +1196,7 @@
 
 
 LInstruction* LChunkBuilder::DoContext(HContext* instr) {
-  return DefineAsRegister(new LContext);
+  return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
 }
 
 
@@ -1212,6 +1225,14 @@
 }
 
 
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+  LOperand* function = UseFixed(instr->function(), r1);
+  argument_count_ -= instr->argument_count();
+  LInvokeFunction* result = new LInvokeFunction(function);
+  return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
 LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
   BuiltinFunctionId op = instr->op();
   if (op == kMathLog || op == kMathSin || op == kMathCos) {
@@ -1493,6 +1514,15 @@
 }
 
 
+LInstruction* LChunkBuilder::DoCompareSymbolEq(
+    HCompareSymbolEq* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  LCmpSymbolEq* result = new LCmpSymbolEq(left, right);
+  return DefineAsRegister(result);
+}
+
+
 LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
@@ -1517,6 +1547,14 @@
 }
 
 
+LInstruction* LChunkBuilder::DoIsUndetectable(HIsUndetectable* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+
+  return DefineAsRegister(new LIsUndetectable(value));
+}
+
+
 LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
@@ -1595,6 +1633,14 @@
 }
 
 
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+  // All HForceRepresentation instructions should be eliminated in the
+  // representation change phase of Hydrogen.
+  UNREACHABLE();
+  return NULL;
+}
+
+
 LInstruction* LChunkBuilder::DoChange(HChange* instr) {
   Representation from = instr->from();
   Representation to = instr->to();
@@ -1703,6 +1749,24 @@
 }
 
 
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+  LOperand* reg = UseRegister(value);
+  if (input_rep.IsDouble()) {
+    return DefineAsRegister(new LClampDToUint8(reg, FixedTemp(d1)));
+  } else if (input_rep.IsInteger32()) {
+    return DefineAsRegister(new LClampIToUint8(reg));
+  } else {
+    ASSERT(input_rep.IsTagged());
+    // Register allocator doesn't (yet) support allocation of double
+    // temps. Reserve d1 explicitly.
+    LClampTToUint8* result = new LClampTToUint8(reg, FixedTemp(d1));
+    return AssignEnvironment(DefineAsRegister(result));
+  }
+}
+
+
 LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
   return new LReturn(UseFixed(instr->value(), r0));
 }
@@ -1842,11 +1906,14 @@
     HLoadKeyedSpecializedArrayElement* instr) {
   ExternalArrayType array_type = instr->array_type();
   Representation representation(instr->representation());
-  ASSERT((representation.IsInteger32() && array_type != kExternalFloatArray) ||
-         (representation.IsDouble() && array_type == kExternalFloatArray));
+  ASSERT(
+      (representation.IsInteger32() && (array_type != kExternalFloatArray &&
+                                        array_type != kExternalDoubleArray)) ||
+      (representation.IsDouble() && (array_type == kExternalFloatArray ||
+                                     array_type == kExternalDoubleArray)));
   ASSERT(instr->key()->representation().IsInteger32());
   LOperand* external_pointer = UseRegister(instr->external_pointer());
-  LOperand* key = UseRegister(instr->key());
+  LOperand* key = UseRegisterOrConstant(instr->key());
   LLoadKeyedSpecializedArrayElement* result =
       new LLoadKeyedSpecializedArrayElement(external_pointer, key);
   LInstruction* load_instr = DefineAsRegister(result);
@@ -1890,8 +1957,11 @@
     HStoreKeyedSpecializedArrayElement* instr) {
   Representation representation(instr->value()->representation());
   ExternalArrayType array_type = instr->array_type();
-  ASSERT((representation.IsInteger32() && array_type != kExternalFloatArray) ||
-         (representation.IsDouble() && array_type == kExternalFloatArray));
+  ASSERT(
+      (representation.IsInteger32() && (array_type != kExternalFloatArray &&
+                                        array_type != kExternalDoubleArray)) ||
+      (representation.IsDouble() && (array_type == kExternalFloatArray ||
+                                     array_type == kExternalDoubleArray)));
   ASSERT(instr->external_pointer()->representation().IsExternal());
   ASSERT(instr->key()->representation().IsInteger32());
 
@@ -1901,7 +1971,7 @@
   LOperand* val = val_is_temp_register
       ? UseTempRegister(instr->value())
       : UseRegister(instr->value());
-  LOperand* key = UseRegister(instr->key());
+  LOperand* key = UseRegisterOrConstant(instr->key());
 
   return new LStoreKeyedSpecializedArrayElement(external_pointer,
                                                 key,
@@ -1946,6 +2016,13 @@
 }
 
 
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  return MarkAsCall(DefineFixed(new LStringAdd(left, right), r0), instr);
+}
+
+
 LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
   LOperand* string = UseRegister(instr->string());
   LOperand* index = UseRegisterOrConstant(instr->index());
@@ -2106,8 +2183,9 @@
   HConstant* undefined = graph()->GetConstantUndefined();
   HEnvironment* inner = outer->CopyForInlining(instr->closure(),
                                                instr->function(),
-                                               false,
-                                               undefined);
+                                               HEnvironment::LITHIUM,
+                                               undefined,
+                                               instr->call_kind());
   current_block_->UpdateEnvironment(inner);
   chunk_->AddInlinedClosure(instr->closure());
   return NULL;
@@ -2121,4 +2199,12 @@
 }
 
 
+LInstruction* LChunkBuilder::DoIn(HIn* instr) {
+  LOperand* key = UseRegisterAtStart(instr->key());
+  LOperand* object = UseRegisterAtStart(instr->object());
+  LIn* result = new LIn(key, object);
+  return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index e1c65d2..73c4a87 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -73,6 +73,9 @@
   V(CheckMap)                                   \
   V(CheckPrototypeMaps)                         \
   V(CheckSmi)                                   \
+  V(ClampDToUint8)                              \
+  V(ClampIToUint8)                              \
+  V(ClampTToUint8)                              \
   V(ClassOfTest)                                \
   V(ClassOfTestAndBranch)                       \
   V(CmpID)                                      \
@@ -80,6 +83,8 @@
   V(CmpJSObjectEq)                              \
   V(CmpJSObjectEqAndBranch)                     \
   V(CmpMapAndBranch)                            \
+  V(CmpSymbolEq)                                \
+  V(CmpSymbolEqAndBranch)                       \
   V(CmpT)                                       \
   V(CmpTAndBranch)                              \
   V(ConstantD)                                  \
@@ -93,7 +98,6 @@
   V(ExternalArrayLength)                        \
   V(FixedArrayLength)                           \
   V(FunctionLiteral)                            \
-  V(Gap)                                        \
   V(GetCachedArrayIndex)                        \
   V(GlobalObject)                               \
   V(GlobalReceiver)                             \
@@ -102,16 +106,23 @@
   V(HasCachedArrayIndexAndBranch)               \
   V(HasInstanceType)                            \
   V(HasInstanceTypeAndBranch)                   \
+  V(In)                                         \
   V(InstanceOf)                                 \
   V(InstanceOfAndBranch)                        \
   V(InstanceOfKnownGlobal)                      \
+  V(InstructionGap)                             \
   V(Integer32ToDouble)                          \
+  V(InvokeFunction)                             \
+  V(IsConstructCall)                            \
+  V(IsConstructCallAndBranch)                   \
   V(IsNull)                                     \
   V(IsNullAndBranch)                            \
   V(IsObject)                                   \
   V(IsObjectAndBranch)                          \
   V(IsSmi)                                      \
   V(IsSmiAndBranch)                             \
+  V(IsUndetectable)                             \
+  V(IsUndetectableAndBranch)                    \
   V(JSArrayLength)                              \
   V(Label)                                      \
   V(LazyBailout)                                \
@@ -152,6 +163,7 @@
   V(StoreKeyedSpecializedArrayElement)          \
   V(StoreNamedField)                            \
   V(StoreNamedGeneric)                          \
+  V(StringAdd)                                  \
   V(StringCharCodeAt)                           \
   V(StringCharFromCode)                         \
   V(StringLength)                               \
@@ -162,27 +174,21 @@
   V(Typeof)                                     \
   V(TypeofIs)                                   \
   V(TypeofIsAndBranch)                          \
-  V(IsConstructCall)                            \
-  V(IsConstructCallAndBranch)                   \
   V(UnaryMathOperation)                         \
   V(UnknownOSRValue)                            \
   V(ValueOf)
 
 
-#define DECLARE_INSTRUCTION(type)                \
-  virtual bool Is##type() const { return true; } \
-  static L##type* cast(LInstruction* instr) {    \
-    ASSERT(instr->Is##type());                   \
-    return reinterpret_cast<L##type*>(instr);    \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
+  virtual Opcode opcode() const { return LInstruction::k##type; } \
+  virtual void CompileToNative(LCodeGen* generator);              \
+  virtual const char* Mnemonic() const { return mnemonic; }       \
+  static L##type* cast(LInstruction* instr) {                     \
+    ASSERT(instr->Is##type());                                    \
+    return reinterpret_cast<L##type*>(instr);                     \
   }
 
 
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)        \
-  virtual void CompileToNative(LCodeGen* generator);        \
-  virtual const char* Mnemonic() const { return mnemonic; } \
-  DECLARE_INSTRUCTION(type)
-
-
 #define DECLARE_HYDROGEN_ACCESSOR(type)     \
   H##type* hydrogen() const {               \
     return H##type::cast(hydrogen_value()); \
@@ -204,10 +210,25 @@
   virtual void PrintDataTo(StringStream* stream) = 0;
   virtual void PrintOutputOperandTo(StringStream* stream) = 0;
 
-  // Declare virtual type testers.
-#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
-  LITHIUM_ALL_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
+  enum Opcode {
+    // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+    kNumberOfInstructions
+#undef DECLARE_OPCODE
+  };
+
+  virtual Opcode opcode() const = 0;
+
+  // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+  bool Is##type() const { return opcode() == k##type; }
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+  // Declare virtual predicates for instructions that don't have
+  // an opcode.
+  virtual bool IsGap() const { return false; }
 
   virtual bool IsControl() const { return false; }
   virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
@@ -334,8 +355,13 @@
     parallel_moves_[AFTER] = NULL;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Gap, "gap")
-  virtual void PrintDataTo(StringStream* stream) const;
+  // Can't use the DECLARE-macro here because of sub-classes.
+  virtual bool IsGap() const { return true; }
+  virtual void PrintDataTo(StringStream* stream);
+  static LGap* cast(LInstruction* instr) {
+    ASSERT(instr->IsGap());
+    return reinterpret_cast<LGap*>(instr);
+  }
 
   bool IsRedundant() const;
 
@@ -365,6 +391,14 @@
 };
 
 
+class LInstructionGap: public LGap {
+ public:
+  explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
 class LGoto: public LTemplateInstruction<0, 0, 0> {
  public:
   LGoto(int block_id, bool include_stack_check = false)
@@ -453,7 +487,6 @@
 template<int I, int T>
 class LControlInstruction: public LTemplateInstruction<0, I, T> {
  public:
-  DECLARE_INSTRUCTION(ControlInstruction)
   virtual bool IsControl() const { return true; }
 
   int true_block_id() const { return true_block_id_; }
@@ -655,6 +688,28 @@
 };
 
 
+class LCmpSymbolEq: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LCmpSymbolEq(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEq, "cmp-symbol-eq")
+};
+
+
+class LCmpSymbolEqAndBranch: public LControlInstruction<2, 0> {
+ public:
+  LCmpSymbolEqAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEqAndBranch, "cmp-symbol-eq-and-branch")
+};
+
+
 class LIsNull: public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LIsNull(LOperand* value) {
@@ -728,6 +783,31 @@
 };
 
 
+class LIsUndetectable: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LIsUndetectable(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsUndetectable, "is-undetectable")
+  DECLARE_HYDROGEN_ACCESSOR(IsUndetectable)
+};
+
+
+class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+ public:
+  explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+                               "is-undetectable-and-branch")
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
 class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LHasInstanceType(LOperand* value) {
@@ -1104,6 +1184,7 @@
 
   Token::Value op() const { return op_; }
 
+  virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
   virtual void CompileToNative(LCodeGen* generator);
   virtual const char* Mnemonic() const;
 
@@ -1120,6 +1201,7 @@
     inputs_[1] = right;
   }
 
+  virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
   virtual void CompileToNative(LCodeGen* generator);
   virtual const char* Mnemonic() const;
 
@@ -1412,6 +1494,23 @@
 };
 
 
+class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LInvokeFunction(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+  DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+  LOperand* function() { return inputs_[0]; }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
 class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCallKeyed(LOperand* key) {
@@ -1707,6 +1806,22 @@
 };
 
 
+class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LStringAdd(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+  DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+};
+
+
+
 class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
  public:
   LStringCharCodeAt(LOperand* string, LOperand* index) {
@@ -1816,6 +1931,44 @@
 };
 
 
+class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LClampDToUint8(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampIToUint8(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LClampTToUint8(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
 class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
@@ -1958,6 +2111,20 @@
 };
 
 
+class LIn: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LIn(LOperand* key, LOperand* object) {
+    inputs_[0] = key;
+    inputs_[1] = object;
+  }
+
+  LOperand* key() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(In, "in")
+};
+
+
 class LChunkBuilder;
 class LChunk: public ZoneObject {
  public:
@@ -2175,7 +2342,6 @@
 };
 
 #undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_INSTRUCTION
 #undef DECLARE_CONCRETE_INSTRUCTION
 
 } }  // namespace v8::internal
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 3dcd427..d25ca49 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -46,7 +46,7 @@
         deoptimization_index_(deoptimization_index) { }
   virtual ~SafepointGenerator() { }
 
-  virtual void BeforeCall(int call_size) {
+  virtual void BeforeCall(int call_size) const {
     ASSERT(call_size >= 0);
     // Ensure that we have enough space after the previous safepoint position
     // for the generated code there.
@@ -63,7 +63,7 @@
     }
   }
 
-  virtual void AfterCall() {
+  virtual void AfterCall() const {
     codegen_->RecordSafepoint(pointers_, deoptimization_index_);
   }
 
@@ -85,13 +85,14 @@
   return GeneratePrologue() &&
       GenerateBody() &&
       GenerateDeferredCode() &&
+      GenerateDeoptJumpTable() &&
       GenerateSafepointTable();
 }
 
 
 void LCodeGen::FinishCode(Handle<Code> code) {
   ASSERT(is_done());
-  code->set_stack_slots(StackSlotCount());
+  code->set_stack_slots(GetStackSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   PopulateDeoptimizationData(code);
   Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
@@ -145,11 +146,25 @@
   // fp: Caller's frame pointer.
   // lr: Caller's pc.
 
+  // Strict mode functions need to replace the receiver with undefined
+  // when called as functions (without an explicit receiver
+  // object). r5 is zero for method calls and non-zero for function
+  // calls.
+  if (info_->is_strict_mode()) {
+    Label ok;
+    __ cmp(r5, Operand(0));
+    __ b(eq, &ok);
+    int receiver_offset = scope()->num_parameters() * kPointerSize;
+    __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+    __ str(r2, MemOperand(sp, receiver_offset));
+    __ bind(&ok);
+  }
+
   __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
   __ add(fp, sp, Operand(2 * kPointerSize));  // Adjust FP to point to saved FP.
 
   // Reserve space for the stack slots needed by the code.
-  int slots = StackSlotCount();
+  int slots = GetStackSlotCount();
   if (slots > 0) {
     if (FLAG_debug_code) {
       __ mov(r0, Operand(slots));
@@ -249,13 +264,43 @@
     __ jmp(code->exit());
   }
 
-  // Force constant pool emission at the end of deferred code to make
-  // sure that no constant pools are emitted after the official end of
-  // the instruction sequence.
+  // Force constant pool emission at the end of the deferred code to make
+  // sure that no constant pools are emitted after.
   masm()->CheckConstPool(true, false);
 
-  // Deferred code is the last part of the instruction sequence. Mark
-  // the generated code as done unless we bailed out.
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateDeoptJumpTable() {
+  // Check that the jump table is accessible from everywhere in the function
+  // code, ie that offsets to the table can be encoded in the 24bit signed
+  // immediate of a branch instruction.
+  // To simplify we consider the code size from the first instruction to the
+  // end of the jump table. We also don't consider the pc load delta.
+  // Each entry in the jump table generates one instruction and inlines one
+  // 32bit data after it.
+  if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
+      deopt_jump_table_.length() * 2)) {
+    Abort("Generated code is too large");
+  }
+
+  // Block the constant pool emission during the jump table emission.
+  __ BlockConstPoolFor(deopt_jump_table_.length());
+  __ RecordComment("[ Deoptimisation jump table");
+  Label table_start;
+  __ bind(&table_start);
+  for (int i = 0; i < deopt_jump_table_.length(); i++) {
+    __ bind(&deopt_jump_table_[i].label);
+    __ ldr(pc, MemOperand(pc, Assembler::kInstrSize - Assembler::kPcLoadDelta));
+    __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address));
+  }
+  ASSERT(masm()->InstructionsGeneratedSince(&table_start) ==
+      deopt_jump_table_.length() * 2);
+  __ RecordComment("]");
+
+  // The deoptimization jump table is the last part of the instruction
+  // sequence. Mark the generated code as done unless we bailed out.
   if (!is_aborted()) status_ = DONE;
   return !is_aborted();
 }
@@ -263,7 +308,7 @@
 
 bool LCodeGen::GenerateSafepointTable() {
   ASSERT(is_done());
-  safepoints_.Emit(masm(), StackSlotCount());
+  safepoints_.Emit(masm(), GetStackSlotCount());
   return !is_aborted();
 }
 
@@ -459,7 +504,7 @@
     translation->StoreDoubleStackSlot(op->index());
   } else if (op->IsArgument()) {
     ASSERT(is_tagged);
-    int src_index = StackSlotCount() + op->index();
+    int src_index = GetStackSlotCount() + op->index();
     translation->StoreStackSlot(src_index);
   } else if (op->IsRegister()) {
     Register reg = ToRegister(op);
@@ -500,7 +545,7 @@
 
   // Signal that we don't inline smi code before these stubs in the
   // optimizing code generator.
-  if (code->kind() == Code::TYPE_RECORDING_BINARY_OP_IC ||
+  if (code->kind() == Code::BINARY_OP_IC ||
       code->kind() == Code::COMPARE_IC) {
     __ nop();
   }
@@ -602,19 +647,18 @@
     return;
   }
 
+  if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc);
+
   if (cc == al) {
-    if (FLAG_trap_on_deopt) __ stop("trap_on_deopt");
     __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
   } else {
-    if (FLAG_trap_on_deopt) {
-      Label done;
-      __ b(&done, NegateCondition(cc));
-      __ stop("trap_on_deopt");
-      __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
-      __ bind(&done);
-    } else {
-      __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc);
+    // We often have several deopts to the same entry, reuse the last
+    // jump entry if this is the case.
+    if (deopt_jump_table_.is_empty() ||
+        (deopt_jump_table_.last().address != entry)) {
+      deopt_jump_table_.Add(JumpTableEntry(entry));
     }
+    __ b(cc, &deopt_jump_table_.last().label);
   }
 }
 
@@ -746,7 +790,7 @@
   }
   __ bind(label->label());
   current_block_ = label->block_id();
-  LCodeGen::DoGap(label);
+  DoGap(label);
 }
 
 
@@ -772,6 +816,11 @@
 }
 
 
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+  DoGap(instr);
+}
+
+
 void LCodeGen::DoParameter(LParameter* instr) {
   // Nothing to do.
 }
@@ -1067,7 +1116,7 @@
     __ mov(r0, right);
     __ mov(r1, left);
   }
-  TypeRecordingBinaryOpStub stub(op, OVERWRITE_LEFT);
+  BinaryOpStub stub(op, OVERWRITE_LEFT);
   __ CallStub(&stub);
   RecordSafepointWithRegistersAndDoubles(instr->pointer_map(),
                                          0,
@@ -1349,11 +1398,11 @@
       // Save r0-r3 on the stack.
       __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
 
-      __ PrepareCallCFunction(4, scratch0());
-      __ vmov(r0, r1, left);
-      __ vmov(r2, r3, right);
+      __ PrepareCallCFunction(0, 2, scratch0());
+      __ SetCallCDoubleArguments(left, right);
       __ CallCFunction(
-          ExternalReference::double_fp_operation(Token::MOD, isolate()), 4);
+          ExternalReference::double_fp_operation(Token::MOD, isolate()),
+          0, 2);
       // Move the result in the double result register.
       __ GetCFunctionDoubleResult(ToDoubleRegister(instr->result()));
 
@@ -1373,7 +1422,7 @@
   ASSERT(ToRegister(instr->InputAt(1)).is(r0));
   ASSERT(ToRegister(instr->result()).is(r0));
 
-  TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
+  BinaryOpStub stub(instr->op(), NO_OVERWRITE);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   __ nop();  // Signals no inlined code.
 }
@@ -1625,6 +1674,28 @@
 }
 
 
+void LCodeGen::DoCmpSymbolEq(LCmpSymbolEq* instr) {
+  Register left = ToRegister(instr->InputAt(0));
+  Register right = ToRegister(instr->InputAt(1));
+  Register result = ToRegister(instr->result());
+
+  __ cmp(left, Operand(right));
+  __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
+  __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
+}
+
+
+void LCodeGen::DoCmpSymbolEqAndBranch(LCmpSymbolEqAndBranch* instr) {
+  Register left = ToRegister(instr->InputAt(0));
+  Register right = ToRegister(instr->InputAt(1));
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+  __ cmp(left, Operand(right));
+  EmitBranch(true_block, false_block, eq);
+}
+
+
 void LCodeGen::DoIsNull(LIsNull* instr) {
   Register reg = ToRegister(instr->InputAt(0));
   Register result = ToRegister(instr->result());
@@ -1779,6 +1850,40 @@
 }
 
 
+void LCodeGen::DoIsUndetectable(LIsUndetectable* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+
+  ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+  Label false_label, done;
+  __ JumpIfSmi(input, &false_label);
+  __ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ ldrb(result, FieldMemOperand(result, Map::kBitFieldOffset));
+  __ tst(result, Operand(1 << Map::kIsUndetectable));
+  __ b(eq, &false_label);
+  __ LoadRoot(result, Heap::kTrueValueRootIndex);
+  __ jmp(&done);
+  __ bind(&false_label);
+  __ LoadRoot(result, Heap::kFalseValueRootIndex);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register temp = ToRegister(instr->TempAt(0));
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+  __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+  __ tst(temp, Operand(1 << Map::kIsUndetectable));
+  EmitBranch(true_block, false_block, ne);
+}
+
+
 static InstanceType TestType(HHasInstanceType* instr) {
   InstanceType from = instr->from();
   InstanceType to = instr->to();
@@ -2190,7 +2295,7 @@
     __ push(r0);
     __ CallRuntime(Runtime::kTraceExit, 1);
   }
-  int32_t sp_delta = (ParameterCount() + 1) * kPointerSize;
+  int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
   __ mov(sp, fp);
   __ ldm(ia_w, sp, fp.bit() | lr.bit());
   __ add(sp, sp, Operand(sp_delta));
@@ -2289,23 +2394,29 @@
 }
 
 
-void LCodeGen::EmitLoadField(Register result,
-                             Register object,
-                             Handle<Map> type,
-                             Handle<String> name) {
+void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
+                                               Register object,
+                                               Handle<Map> type,
+                                               Handle<String> name) {
   LookupResult lookup;
   type->LookupInDescriptors(NULL, *name, &lookup);
-  ASSERT(lookup.IsProperty() && lookup.type() == FIELD);
-  int index = lookup.GetLocalFieldIndexFromMap(*type);
-  int offset = index * kPointerSize;
-  if (index < 0) {
-    // Negative property indices are in-object properties, indexed
-    // from the end of the fixed part of the object.
-    __ ldr(result, FieldMemOperand(object, offset + type->instance_size()));
+  ASSERT(lookup.IsProperty() &&
+         (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
+  if (lookup.type() == FIELD) {
+    int index = lookup.GetLocalFieldIndexFromMap(*type);
+    int offset = index * kPointerSize;
+    if (index < 0) {
+      // Negative property indices are in-object properties, indexed
+      // from the end of the fixed part of the object.
+      __ ldr(result, FieldMemOperand(object, offset + type->instance_size()));
+    } else {
+      // Non-negative property indices are in the properties array.
+      __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+      __ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
+    }
   } else {
-    // Non-negative property indices are in the properties array.
-    __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
-    __ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
+    Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
+    LoadHeapObject(result, Handle<HeapObject>::cast(function));
   }
 }
 
@@ -2329,7 +2440,7 @@
       Label next;
       __ cmp(scratch, Operand(map));
       __ b(ne, &next);
-      EmitLoadField(result, object, map, name);
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
       __ b(&done);
       __ bind(&next);
     }
@@ -2338,7 +2449,7 @@
     if (instr->hydrogen()->need_generic()) {
       Label generic;
       __ b(ne, &generic);
-      EmitLoadField(result, object, map, name);
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
       __ b(&done);
       __ bind(&generic);
       __ mov(r2, Operand(name));
@@ -2346,7 +2457,7 @@
       CallCode(ic, RelocInfo::CODE_TARGET, instr);
     } else {
       DeoptimizeIf(ne, instr->environment());
-      EmitLoadField(result, object, map, name);
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
     }
     __ bind(&done);
   }
@@ -2472,44 +2583,67 @@
   __ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
 
   // Check for the hole value.
-  __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
-  __ cmp(result, scratch);
-  DeoptimizeIf(eq, instr->environment());
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+    __ cmp(result, scratch);
+    DeoptimizeIf(eq, instr->environment());
+  }
 }
 
 
 void LCodeGen::DoLoadKeyedSpecializedArrayElement(
     LLoadKeyedSpecializedArrayElement* instr) {
   Register external_pointer = ToRegister(instr->external_pointer());
-  Register key = ToRegister(instr->key());
+  Register key = no_reg;
   ExternalArrayType array_type = instr->array_type();
-  if (array_type == kExternalFloatArray) {
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort("array index constant value too big.");
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int shift_size = ExternalArrayTypeToShiftSize(array_type);
+
+  if (array_type == kExternalFloatArray || array_type == kExternalDoubleArray) {
     CpuFeatures::Scope scope(VFP3);
     DwVfpRegister result(ToDoubleRegister(instr->result()));
-    __ add(scratch0(), external_pointer, Operand(key, LSL, 2));
-    __ vldr(result.low(), scratch0(), 0);
-    __ vcvt_f64_f32(result, result.low());
+    Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size))
+                                    : Operand(key, LSL, shift_size));
+    __ add(scratch0(), external_pointer, operand);
+    if (array_type == kExternalFloatArray) {
+      __ vldr(result.low(), scratch0(), 0);
+      __ vcvt_f64_f32(result, result.low());
+    } else  {  // i.e. array_type == kExternalDoubleArray
+      __ vldr(result, scratch0(), 0);
+    }
   } else {
     Register result(ToRegister(instr->result()));
+    MemOperand mem_operand(key_is_constant
+        ? MemOperand(external_pointer, constant_key * (1 << shift_size))
+        : MemOperand(external_pointer, key, LSL, shift_size));
     switch (array_type) {
       case kExternalByteArray:
-        __ ldrsb(result, MemOperand(external_pointer, key));
+        __ ldrsb(result, mem_operand);
         break;
       case kExternalUnsignedByteArray:
       case kExternalPixelArray:
-        __ ldrb(result, MemOperand(external_pointer, key));
+        __ ldrb(result, mem_operand);
         break;
       case kExternalShortArray:
-        __ ldrsh(result, MemOperand(external_pointer, key, LSL, 1));
+        __ ldrsh(result, mem_operand);
         break;
       case kExternalUnsignedShortArray:
-        __ ldrh(result, MemOperand(external_pointer, key, LSL, 1));
+        __ ldrh(result, mem_operand);
         break;
       case kExternalIntArray:
-        __ ldr(result, MemOperand(external_pointer, key, LSL, 2));
+        __ ldr(result, mem_operand);
         break;
       case kExternalUnsignedIntArray:
-        __ ldr(result, MemOperand(external_pointer, key, LSL, 2));
+        __ ldr(result, mem_operand);
         __ cmp(result, Operand(0x80000000));
         // TODO(danno): we could be more clever here, perhaps having a special
         // version of the stub that detects if the overflow case actually
@@ -2517,6 +2651,7 @@
         DeoptimizeIf(cs, instr->environment());
         break;
       case kExternalFloatArray:
+      case kExternalDoubleArray:
         UNREACHABLE();
         break;
     }
@@ -2582,6 +2717,9 @@
   ASSERT(function.is(r1));  // Required by InvokeFunction.
   ASSERT(ToRegister(instr->result()).is(r0));
 
+  // TODO(1412): This is not correct if the called function is a
+  // strict mode function or a native.
+  //
   // If the receiver is null or undefined, we have to pass the global object
   // as a receiver.
   Label global_object, receiver_ok;
@@ -2601,6 +2739,8 @@
 
   __ bind(&global_object);
   __ ldr(receiver, GlobalObjectOperand());
+  __ ldr(receiver,
+         FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
   __ bind(&receiver_ok);
 
   // Copy the arguments to this function possibly from the
@@ -2640,7 +2780,8 @@
   // The number of arguments is stored in receiver which is r0, as expected
   // by InvokeFunction.
   v8::internal::ParameterCount actual(receiver);
-  __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
+  __ InvokeFunction(function, actual, CALL_FUNCTION,
+                    safepoint_generator, CALL_AS_METHOD);
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
 }
 
@@ -2687,7 +2828,8 @@
 
 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
                                  int arity,
-                                 LInstruction* instr) {
+                                 LInstruction* instr,
+                                 CallKind call_kind) {
   // Change context if needed.
   bool change_context =
       (info()->closure()->context() != function->context()) ||
@@ -2707,6 +2849,7 @@
   RecordPosition(pointers->position());
 
   // Invoke function.
+  __ SetCallKind(r5, call_kind);
   __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
   __ Call(ip);
 
@@ -2721,7 +2864,10 @@
 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
   ASSERT(ToRegister(instr->result()).is(r0));
   __ mov(r1, Operand(instr->function()));
-  CallKnownFunction(instr->function(), instr->arity(), instr);
+  CallKnownFunction(instr->function(),
+                    instr->arity(),
+                    instr,
+                    CALL_AS_METHOD);
 }
 
 
@@ -2871,9 +3017,49 @@
 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
   DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
   Register result = ToRegister(instr->result());
-  Register scratch1 = scratch0();
-  Register scratch2 = result;
-  __ EmitVFPTruncate(kRoundToNearest,
+  Register scratch1 = result;
+  Register scratch2 = scratch0();
+  Label done, check_sign_on_zero;
+
+  // Extract exponent bits.
+  __ vmov(scratch1, input.high());
+  __ ubfx(scratch2,
+          scratch1,
+          HeapNumber::kExponentShift,
+          HeapNumber::kExponentBits);
+
+  // If the number is in ]-0.5, +0.5[, the result is +/- 0.
+  __ cmp(scratch2, Operand(HeapNumber::kExponentBias - 2));
+  __ mov(result, Operand(0), LeaveCC, le);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    __ b(le, &check_sign_on_zero);
+  } else {
+    __ b(le, &done);
+  }
+
+  // The following conversion will not work with numbers
+  // outside of ]-2^32, 2^32[.
+  __ cmp(scratch2, Operand(HeapNumber::kExponentBias + 32));
+  DeoptimizeIf(ge, instr->environment());
+
+  // Save the original sign for later comparison.
+  __ and_(scratch2, scratch1, Operand(HeapNumber::kSignMask));
+
+  __ vmov(double_scratch0(), 0.5);
+  __ vadd(input, input, double_scratch0());
+
+  // Check sign of the result: if the sign changed, the input
+  // value was in ]0.5, 0[ and the result should be -0.
+  __ vmov(scratch1, input.high());
+  __ eor(scratch1, scratch1, Operand(scratch2), SetCC);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    DeoptimizeIf(mi, instr->environment());
+  } else {
+    __ mov(result, Operand(0), LeaveCC, mi);
+    __ b(mi, &done);
+  }
+
+  __ EmitVFPTruncate(kRoundToMinusInf,
                      double_scratch0().low(),
                      input,
                      scratch1,
@@ -2883,14 +3069,14 @@
 
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
     // Test for -0.
-    Label done;
     __ cmp(result, Operand(0));
     __ b(ne, &done);
+    __ bind(&check_sign_on_zero);
     __ vmov(scratch1, input.high());
     __ tst(scratch1, Operand(HeapNumber::kSignMask));
     DeoptimizeIf(ne, instr->environment());
-    __ bind(&done);
   }
+  __ bind(&done);
 }
 
 
@@ -2925,19 +3111,18 @@
   Representation exponent_type = instr->hydrogen()->right()->representation();
   if (exponent_type.IsDouble()) {
     // Prepare arguments and call C function.
-    __ PrepareCallCFunction(4, scratch);
-    __ vmov(r0, r1, ToDoubleRegister(left));
-    __ vmov(r2, r3, ToDoubleRegister(right));
+    __ PrepareCallCFunction(0, 2, scratch);
+    __ SetCallCDoubleArguments(ToDoubleRegister(left),
+                               ToDoubleRegister(right));
     __ CallCFunction(
-        ExternalReference::power_double_double_function(isolate()), 4);
+        ExternalReference::power_double_double_function(isolate()), 0, 2);
   } else if (exponent_type.IsInteger32()) {
     ASSERT(ToRegister(right).is(r0));
     // Prepare arguments and call C function.
-    __ PrepareCallCFunction(4, scratch);
-    __ mov(r2, ToRegister(right));
-    __ vmov(r0, r1, ToDoubleRegister(left));
+    __ PrepareCallCFunction(1, 1, scratch);
+    __ SetCallCDoubleArguments(ToDoubleRegister(left), ToRegister(right));
     __ CallCFunction(
-        ExternalReference::power_double_int_function(isolate()), 4);
+        ExternalReference::power_double_int_function(isolate()), 1, 1);
   } else {
     ASSERT(exponent_type.IsTagged());
     ASSERT(instr->hydrogen()->left()->representation().IsDouble());
@@ -2967,11 +3152,10 @@
 
     // Prepare arguments and call C function.
     __ bind(&call);
-    __ PrepareCallCFunction(4, scratch);
-    __ vmov(r0, r1, ToDoubleRegister(left));
-    __ vmov(r2, r3, result_reg);
+    __ PrepareCallCFunction(0, 2, scratch);
+    __ SetCallCDoubleArguments(ToDoubleRegister(left), result_reg);
     __ CallCFunction(
-        ExternalReference::power_double_double_function(isolate()), 4);
+        ExternalReference::power_double_double_function(isolate()), 0, 2);
   }
   // Store the result in the result register.
   __ GetCFunctionDoubleResult(result_reg);
@@ -3035,6 +3219,21 @@
 }
 
 
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  ASSERT(ToRegister(instr->function()).is(r1));
+  ASSERT(instr->HasPointerMap());
+  ASSERT(instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  LEnvironment* env = instr->deoptimization_environment();
+  RecordPosition(pointers->position());
+  RegisterEnvironmentForDeoptimization(env);
+  SafepointGenerator generator(this, pointers, env->deoptimization_index());
+  ParameterCount count(instr->arity());
+  __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
   ASSERT(ToRegister(instr->result()).is(r0));
 
@@ -3050,10 +3249,11 @@
   ASSERT(ToRegister(instr->result()).is(r0));
 
   int arity = instr->arity();
-  Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
-      arity, NOT_IN_LOOP);
+  RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode);
   __ mov(r2, Operand(instr->name()));
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  CallCode(ic, mode, instr);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
 }
@@ -3063,7 +3263,7 @@
   ASSERT(ToRegister(instr->result()).is(r0));
 
   int arity = instr->arity();
-  CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
+  CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_IMPLICIT);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   __ Drop(1);
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3074,10 +3274,11 @@
   ASSERT(ToRegister(instr->result()).is(r0));
 
   int arity = instr->arity();
+  RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
   Handle<Code> ic =
-      isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP);
+      isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode);
   __ mov(r2, Operand(instr->name()));
-  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+  CallCode(ic, mode, instr);
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
 }
 
@@ -3085,7 +3286,7 @@
 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
   ASSERT(ToRegister(instr->result()).is(r0));
   __ mov(r1, Operand(instr->target()));
-  CallKnownFunction(instr->target(), instr->arity(), instr);
+  CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
 }
 
 
@@ -3185,35 +3386,53 @@
     LStoreKeyedSpecializedArrayElement* instr) {
 
   Register external_pointer = ToRegister(instr->external_pointer());
-  Register key = ToRegister(instr->key());
+  Register key = no_reg;
   ExternalArrayType array_type = instr->array_type();
-  if (array_type == kExternalFloatArray) {
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort("array index constant value too big.");
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int shift_size = ExternalArrayTypeToShiftSize(array_type);
+
+  if (array_type == kExternalFloatArray || array_type == kExternalDoubleArray) {
     CpuFeatures::Scope scope(VFP3);
     DwVfpRegister value(ToDoubleRegister(instr->value()));
-    __ add(scratch0(), external_pointer, Operand(key, LSL, 2));
-    __ vcvt_f32_f64(double_scratch0().low(), value);
-    __ vstr(double_scratch0().low(), scratch0(), 0);
+    Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size))
+                                    : Operand(key, LSL, shift_size));
+    __ add(scratch0(), external_pointer, operand);
+    if (array_type == kExternalFloatArray) {
+      __ vcvt_f32_f64(double_scratch0().low(), value);
+      __ vstr(double_scratch0().low(), scratch0(), 0);
+    } else {  // i.e. array_type == kExternalDoubleArray
+      __ vstr(value, scratch0(), 0);
+    }
   } else {
     Register value(ToRegister(instr->value()));
+    MemOperand mem_operand(key_is_constant
+        ? MemOperand(external_pointer, constant_key * (1 << shift_size))
+        : MemOperand(external_pointer, key, LSL, shift_size));
     switch (array_type) {
       case kExternalPixelArray:
-        // Clamp the value to [0..255].
-        __ Usat(value, 8, Operand(value));
-        __ strb(value, MemOperand(external_pointer, key));
-        break;
       case kExternalByteArray:
       case kExternalUnsignedByteArray:
-        __ strb(value, MemOperand(external_pointer, key));
+        __ strb(value, mem_operand);
         break;
       case kExternalShortArray:
       case kExternalUnsignedShortArray:
-        __ strh(value, MemOperand(external_pointer, key, LSL, 1));
+        __ strh(value, mem_operand);
         break;
       case kExternalIntArray:
       case kExternalUnsignedIntArray:
-        __ str(value, MemOperand(external_pointer, key, LSL, 2));
+        __ str(value, mem_operand);
         break;
       case kExternalFloatArray:
+      case kExternalDoubleArray:
         UNREACHABLE();
         break;
     }
@@ -3233,6 +3452,14 @@
 }
 
 
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+  __ push(ToRegister(instr->left()));
+  __ push(ToRegister(instr->right()));
+  StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
   class DeferredStringCharCodeAt: public LDeferredCode {
    public:
@@ -3561,10 +3788,13 @@
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister() && input->Equals(instr->result()));
   if (instr->needs_check()) {
-    __ tst(ToRegister(input), Operand(kSmiTagMask));
-    DeoptimizeIf(ne, instr->environment());
+    ASSERT(kHeapObjectTag == 1);
+    // If the input is a HeapObject, SmiUntag will set the carry flag.
+    __ SmiUntag(ToRegister(input), SetCC);
+    DeoptimizeIf(cs, instr->environment());
+  } else {
+    __ SmiUntag(ToRegister(input));
   }
-  __ SmiUntag(ToRegister(input));
 }
 
 
@@ -3641,6 +3871,12 @@
 
   Label done;
 
+  // The input was optimistically untagged; revert it.
+  // The carry flag is set when we reach this deferred code as we just executed
+  // SmiUntag(heap_object, SetCC)
+  ASSERT(kHeapObjectTag == 1);
+  __ adc(input_reg, input_reg, Operand(input_reg));
+
   // Heap number map check.
   __ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
@@ -3713,13 +3949,12 @@
 
   DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
 
-  // Smi check.
-  __ tst(input_reg, Operand(kSmiTagMask));
-  __ b(ne, deferred->entry());
-
-  // Smi to int32 conversion
-  __ SmiUntag(input_reg);  // Untag smi.
-
+  // Optimistically untag the input.
+  // If the input is a HeapObject, SmiUntag will set the carry flag.
+  __ SmiUntag(input_reg, SetCC);
+  // Branch to deferred code if the input was tagged.
+  // The deferred code will take care of restoring the tag.
+  __ b(cs, deferred->entry());
   __ bind(deferred->exit());
 }
 
@@ -3792,22 +4027,41 @@
 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
   Register input = ToRegister(instr->InputAt(0));
   Register scratch = scratch0();
-  InstanceType first = instr->hydrogen()->first();
-  InstanceType last = instr->hydrogen()->last();
 
   __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
-  __ cmp(scratch, Operand(first));
 
-  // If there is only one type in the interval check for equality.
-  if (first == last) {
-    DeoptimizeIf(ne, instr->environment());
+  if (instr->hydrogen()->is_interval_check()) {
+    InstanceType first;
+    InstanceType last;
+    instr->hydrogen()->GetCheckInterval(&first, &last);
+
+    __ cmp(scratch, Operand(first));
+
+    // If there is only one type in the interval check for equality.
+    if (first == last) {
+      DeoptimizeIf(ne, instr->environment());
+    } else {
+      DeoptimizeIf(lo, instr->environment());
+      // Omit check for the last type.
+      if (last != LAST_TYPE) {
+        __ cmp(scratch, Operand(last));
+        DeoptimizeIf(hi, instr->environment());
+      }
+    }
   } else {
-    DeoptimizeIf(lo, instr->environment());
-    // Omit check for the last type.
-    if (last != LAST_TYPE) {
-      __ cmp(scratch, Operand(last));
-      DeoptimizeIf(hi, instr->environment());
+    uint8_t mask;
+    uint8_t tag;
+    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+    if (IsPowerOf2(mask)) {
+      ASSERT(tag == 0 || IsPowerOf2(tag));
+      __ tst(scratch, Operand(mask));
+      DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
+    } else {
+      __ and_(scratch, scratch, Operand(mask));
+      __ cmp(scratch, Operand(tag));
+      DeoptimizeIf(ne, instr->environment());
     }
   }
 }
@@ -3832,6 +4086,59 @@
 }
 
 
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
+  __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+  Register unclamped_reg = ToRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  __ ClampUint8(result_reg, unclamped_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+  Register scratch = scratch0();
+  Register input_reg = ToRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
+  Label is_smi, done, heap_number;
+
+  // Both smi and heap number cases are handled.
+  __ JumpIfSmi(input_reg, &is_smi);
+
+  // Check for heap number
+  __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ cmp(scratch, Operand(factory()->heap_number_map()));
+  __ b(eq, &heap_number);
+
+  // Check for undefined. Undefined is converted to zero for clamping
+  // conversions.
+  __ cmp(input_reg, Operand(factory()->undefined_value()));
+  DeoptimizeIf(ne, instr->environment());
+  __ movt(input_reg, 0);
+  __ jmp(&done);
+
+  // Heap number
+  __ bind(&heap_number);
+  __ vldr(double_scratch0(), FieldMemOperand(input_reg,
+                                             HeapNumber::kValueOffset));
+  __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
+  __ jmp(&done);
+
+  // smi
+  __ bind(&is_smi);
+  __ SmiUntag(result_reg, input_reg);
+  __ ClampUint8(result_reg, result_reg);
+
+  __ bind(&done);
+}
+
+
 void LCodeGen::LoadHeapObject(Register result,
                               Handle<HeapObject> object) {
   if (heap()->InNewSpace(*object)) {
@@ -4187,7 +4494,23 @@
   SafepointGenerator safepoint_generator(this,
                                          pointers,
                                          env->deoptimization_index());
-  __ InvokeBuiltin(Builtins::DELETE, CALL_JS, &safepoint_generator);
+  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoIn(LIn* instr) {
+  Register obj = ToRegister(instr->object());
+  Register key = ToRegister(instr->key());
+  __ Push(key, obj);
+  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  LEnvironment* env = instr->deoptimization_environment();
+  RecordPosition(pointers->position());
+  RegisterEnvironmentForDeoptimization(env);
+  SafepointGenerator safepoint_generator(this,
+                                         pointers,
+                                         env->deoptimization_index());
+  __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
 }
 
 
@@ -4220,6 +4543,8 @@
 }
 
 
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index 092e7b7..8253c17 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -51,6 +51,7 @@
         current_instruction_(-1),
         instructions_(chunk->instructions()),
         deoptimizations_(4),
+        deopt_jump_table_(4),
         deoptimization_literals_(8),
         inlined_function_count_(0),
         scope_(info->scope()),
@@ -115,6 +116,7 @@
 
   // Parallel move support.
   void DoParallelMove(LParallelMove* move);
+  void DoGap(LGap* instr);
 
   // Emit frame translation commands for an environment.
   void WriteTranslation(LEnvironment* environment, Translation* translation);
@@ -158,8 +160,8 @@
                        Register temporary,
                        Register temporary2);
 
-  int StackSlotCount() const { return chunk()->spill_slot_count(); }
-  int ParameterCount() const { return scope()->num_parameters(); }
+  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+  int GetParameterCount() const { return scope()->num_parameters(); }
 
   void Abort(const char* format, ...);
   void Comment(const char* format, ...);
@@ -171,6 +173,7 @@
   bool GeneratePrologue();
   bool GenerateBody();
   bool GenerateDeferredCode();
+  bool GenerateDeoptJumpTable();
   bool GenerateSafepointTable();
 
   enum SafepointMode {
@@ -206,7 +209,8 @@
   // to be in edi.
   void CallKnownFunction(Handle<JSFunction> function,
                          int arity,
-                         LInstruction* instr);
+                         LInstruction* instr,
+                         CallKind call_kind);
 
   void LoadHeapObject(Register result, Handle<HeapObject> object);
 
@@ -284,10 +288,18 @@
   // Caller should branch on equal condition.
   void EmitIsConstructCall(Register temp1, Register temp2);
 
-  void EmitLoadField(Register result,
-                     Register object,
-                     Handle<Map> type,
-                     Handle<String> name);
+  void EmitLoadFieldOrConstantFunction(Register result,
+                                       Register object,
+                                       Handle<Map> type,
+                                       Handle<String> name);
+
+  struct JumpTableEntry {
+    explicit inline JumpTableEntry(Address entry)
+        : label(),
+          address(entry) { }
+    Label label;
+    Address address;
+  };
 
   LChunk* const chunk_;
   MacroAssembler* const masm_;
@@ -297,6 +309,7 @@
   int current_instruction_;
   const ZoneList<LInstruction*>* instructions_;
   ZoneList<LEnvironment*> deoptimizations_;
+  ZoneList<JumpTableEntry> deopt_jump_table_;
   ZoneList<Handle<Object> > deoptimization_literals_;
   int inlined_function_count_;
   Scope* const scope_;
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 6a095d3..c227b13 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -83,7 +83,7 @@
 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
                           Condition cond) {
 #if USE_BX
-  mov(ip, Operand(target, rmode), LeaveCC, cond);
+  mov(ip, Operand(target, rmode));
   bx(ip, cond);
 #else
   mov(pc, Operand(target, rmode), LeaveCC, cond);
@@ -148,8 +148,9 @@
 }
 
 
-void MacroAssembler::Call(
-    intptr_t target, RelocInfo::Mode rmode, Condition cond) {
+void MacroAssembler::Call(intptr_t target,
+                          RelocInfo::Mode rmode,
+                          Condition cond) {
   // Block constant pool for the call instruction sequence.
   BlockConstPoolScope block_const_pool(this);
 #ifdef DEBUG
@@ -167,7 +168,7 @@
   // we have to do it explicitly.
   positions_recorder()->WriteRecordedPositions();
 
-  mov(ip, Operand(target, rmode), LeaveCC, cond);
+  mov(ip, Operand(target, rmode));
   blx(ip, cond);
 
   ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
@@ -214,8 +215,31 @@
 }
 
 
-void MacroAssembler::Call(
-    Handle<Code> code, RelocInfo::Mode rmode, Condition cond) {
+void MacroAssembler::CallWithAstId(Handle<Code> code,
+                                   RelocInfo::Mode rmode,
+                                   unsigned ast_id,
+                                   Condition cond) {
+#ifdef DEBUG
+  int pre_position = pc_offset();
+#endif
+
+  ASSERT(rmode == RelocInfo::CODE_TARGET_WITH_ID);
+  ASSERT(ast_id != kNoASTId);
+  ASSERT(ast_id_for_reloc_info_ == kNoASTId);
+  ast_id_for_reloc_info_ = ast_id;
+  // 'code' is always generated ARM code, never THUMB code
+  Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+
+#ifdef DEBUG
+  int post_position = pc_offset();
+  CHECK_EQ(pre_position + CallSize(code, rmode, cond), post_position);
+#endif
+}
+
+
+void MacroAssembler::Call(Handle<Code> code,
+                          RelocInfo::Mode rmode,
+                          Condition cond) {
 #ifdef DEBUG
   int pre_position = pc_offset();
 #endif
@@ -286,6 +310,15 @@
 }
 
 
+void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
+  ASSERT(CpuFeatures::IsSupported(VFP3));
+  CpuFeatures::Scope scope(VFP3);
+  if (!dst.is(src)) {
+    vmov(dst, src);
+  }
+}
+
+
 void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
                          Condition cond) {
   if (!src2.is_reg() &&
@@ -839,7 +872,25 @@
 }
 
 void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
-  vmov(dst, r0, r1);
+  if (use_eabi_hardfloat()) {
+    Move(dst, d0);
+  } else {
+    vmov(dst, r0, r1);
+  }
+}
+
+
+void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
+  // This macro takes the dst register to make the code more readable
+  // at the call sites. However, the dst register has to be r5 to
+  // follow the calling convention which requires the call type to be
+  // in r5.
+  ASSERT(dst.is(r5));
+  if (call_kind == CALL_AS_FUNCTION) {
+    mov(dst, Operand(Smi::FromInt(1)));
+  } else {
+    mov(dst, Operand(Smi::FromInt(0)));
+  }
 }
 
 
@@ -849,7 +900,8 @@
                                     Register code_reg,
                                     Label* done,
                                     InvokeFlag flag,
-                                    CallWrapper* call_wrapper) {
+                                    const CallWrapper& call_wrapper,
+                                    CallKind call_kind) {
   bool definitely_matches = false;
   Label regular_invoke;
 
@@ -904,13 +956,13 @@
     Handle<Code> adaptor =
         isolate()->builtins()->ArgumentsAdaptorTrampoline();
     if (flag == CALL_FUNCTION) {
-      if (call_wrapper != NULL) {
-        call_wrapper->BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
-      }
+      call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
+      SetCallKind(r5, call_kind);
       Call(adaptor, RelocInfo::CODE_TARGET);
-      if (call_wrapper != NULL) call_wrapper->AfterCall();
+      call_wrapper.AfterCall();
       b(done);
     } else {
+      SetCallKind(r5, call_kind);
       Jump(adaptor, RelocInfo::CODE_TARGET);
     }
     bind(&regular_invoke);
@@ -922,17 +974,20 @@
                                 const ParameterCount& expected,
                                 const ParameterCount& actual,
                                 InvokeFlag flag,
-                                CallWrapper* call_wrapper) {
+                                const CallWrapper& call_wrapper,
+                                CallKind call_kind) {
   Label done;
 
   InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
-                 call_wrapper);
+                 call_wrapper, call_kind);
   if (flag == CALL_FUNCTION) {
-    if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(code));
+    call_wrapper.BeforeCall(CallSize(code));
+    SetCallKind(r5, call_kind);
     Call(code);
-    if (call_wrapper != NULL) call_wrapper->AfterCall();
+    call_wrapper.AfterCall();
   } else {
     ASSERT(flag == JUMP_FUNCTION);
+    SetCallKind(r5, call_kind);
     Jump(code);
   }
 
@@ -946,13 +1001,17 @@
                                 const ParameterCount& expected,
                                 const ParameterCount& actual,
                                 RelocInfo::Mode rmode,
-                                InvokeFlag flag) {
+                                InvokeFlag flag,
+                                CallKind call_kind) {
   Label done;
 
-  InvokePrologue(expected, actual, code, no_reg, &done, flag);
+  InvokePrologue(expected, actual, code, no_reg, &done, flag,
+                 NullCallWrapper(), call_kind);
   if (flag == CALL_FUNCTION) {
+    SetCallKind(r5, call_kind);
     Call(code, rmode);
   } else {
+    SetCallKind(r5, call_kind);
     Jump(code, rmode);
   }
 
@@ -965,7 +1024,8 @@
 void MacroAssembler::InvokeFunction(Register fun,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
-                                    CallWrapper* call_wrapper) {
+                                    const CallWrapper& call_wrapper,
+                                    CallKind call_kind) {
   // Contract with called JS functions requires that function is passed in r1.
   ASSERT(fun.is(r1));
 
@@ -982,13 +1042,14 @@
       FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
 
   ParameterCount expected(expected_reg);
-  InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+  InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
 }
 
 
 void MacroAssembler::InvokeFunction(JSFunction* function,
                                     const ParameterCount& actual,
-                                    InvokeFlag flag) {
+                                    InvokeFlag flag,
+                                    CallKind call_kind) {
   ASSERT(function->is_compiled());
 
   // Get the function and setup the context.
@@ -1003,9 +1064,9 @@
     // code field in the function to allow recompilation to take effect
     // without changing any of the call sites.
     ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
-    InvokeCode(r3, expected, actual, flag);
+    InvokeCode(r3, expected, actual, flag, NullCallWrapper(), call_kind);
   } else {
-    InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+    InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
   }
 }
 
@@ -1620,8 +1681,8 @@
                               Register scratch,
                               Handle<Map> map,
                               Label* fail,
-                              bool is_heap_object) {
-  if (!is_heap_object) {
+                              SmiCheckType smi_check_type) {
+  if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, fail);
   }
   ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
@@ -1635,8 +1696,8 @@
                               Register scratch,
                               Heap::RootListIndex index,
                               Label* fail,
-                              bool is_heap_object) {
-  if (!is_heap_object) {
+                              SmiCheckType smi_check_type) {
+  if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, fail);
   }
   ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
@@ -1646,6 +1707,23 @@
 }
 
 
+void MacroAssembler::DispatchMap(Register obj,
+                                 Register scratch,
+                                 Handle<Map> map,
+                                 Handle<Code> success,
+                                 SmiCheckType smi_check_type) {
+  Label fail;
+  if (smi_check_type == DO_SMI_CHECK) {
+    JumpIfSmi(obj, &fail);
+  }
+  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+  mov(ip, Operand(map));
+  cmp(scratch, ip);
+  Jump(success, RelocInfo::CODE_TARGET, eq);
+  bind(&fail);
+}
+
+
 void MacroAssembler::TryGetFunctionPrototype(Register function,
                                              Register result,
                                              Register scratch,
@@ -1699,6 +1777,17 @@
 }
 
 
+MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) {
+  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  Object* result;
+  { MaybeObject* maybe_result = stub->TryGetCode();
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond);
+  return result;
+}
+
+
 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
   ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
   Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
@@ -1711,7 +1800,7 @@
   { MaybeObject* maybe_result = stub->TryGetCode();
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
+  Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond);
   return result;
 }
 
@@ -2276,15 +2365,17 @@
 
 
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
-                                   InvokeJSFlags flags,
-                                   CallWrapper* call_wrapper) {
+                                   InvokeFlag flag,
+                                   const CallWrapper& call_wrapper) {
   GetBuiltinEntry(r2, id);
-  if (flags == CALL_JS) {
-    if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(r2));
+  if (flag == CALL_FUNCTION) {
+    call_wrapper.BeforeCall(CallSize(r2));
+    SetCallKind(r5, CALL_AS_METHOD);
     Call(r2);
-    if (call_wrapper != NULL) call_wrapper->AfterCall();
+    call_wrapper.AfterCall();
   } else {
-    ASSERT(flags == JUMP_JS);
+    ASSERT(flag == JUMP_FUNCTION);
+    SetCallKind(r5, CALL_AS_METHOD);
     Jump(r2);
   }
 }
@@ -2475,7 +2566,7 @@
   ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   if (emit_debug_code()) {
     Label ok, fail;
-    CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, false);
+    CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
     b(&ok);
     bind(&fail);
     Abort("Global functions must have initial map");
@@ -2794,12 +2885,36 @@
 
 static const int kRegisterPassedArguments = 4;
 
-void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
-  int frame_alignment = ActivationFrameAlignment();
 
+int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
+                                              int num_double_arguments) {
+  int stack_passed_words = 0;
+  if (use_eabi_hardfloat()) {
+    // In the hard floating point calling convention, we can use
+    // all double registers to pass doubles.
+    if (num_double_arguments > DoubleRegister::kNumRegisters) {
+      stack_passed_words +=
+          2 * (num_double_arguments - DoubleRegister::kNumRegisters);
+    }
+  } else {
+    // In the soft floating point calling convention, every double
+    // argument is passed using two registers.
+    num_reg_arguments += 2 * num_double_arguments;
+  }
   // Up to four simple arguments are passed in registers r0..r3.
-  int stack_passed_arguments = (num_arguments <= kRegisterPassedArguments) ?
-                               0 : num_arguments - kRegisterPassedArguments;
+  if (num_reg_arguments > kRegisterPassedArguments) {
+    stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
+  }
+  return stack_passed_words;
+}
+
+
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+                                          int num_double_arguments,
+                                          Register scratch) {
+  int frame_alignment = ActivationFrameAlignment();
+  int stack_passed_arguments = CalculateStackPassedWords(
+      num_reg_arguments, num_double_arguments);
   if (frame_alignment > kPointerSize) {
     // Make stack end at alignment and make room for num_arguments - 4 words
     // and the original value of sp.
@@ -2814,25 +2929,92 @@
 }
 
 
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+                                          Register scratch) {
+  PrepareCallCFunction(num_reg_arguments, 0, scratch);
+}
+
+
+void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
+  if (use_eabi_hardfloat()) {
+    Move(d0, dreg);
+  } else {
+    vmov(r0, r1, dreg);
+  }
+}
+
+
+void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
+                                             DoubleRegister dreg2) {
+  if (use_eabi_hardfloat()) {
+    if (dreg2.is(d0)) {
+      ASSERT(!dreg1.is(d1));
+      Move(d1, dreg2);
+      Move(d0, dreg1);
+    } else {
+      Move(d0, dreg1);
+      Move(d1, dreg2);
+    }
+  } else {
+    vmov(r0, r1, dreg1);
+    vmov(r2, r3, dreg2);
+  }
+}
+
+
+void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
+                                             Register reg) {
+  if (use_eabi_hardfloat()) {
+    Move(d0, dreg);
+    Move(r0, reg);
+  } else {
+    Move(r2, reg);
+    vmov(r0, r1, dreg);
+  }
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+                                   int num_reg_arguments,
+                                   int num_double_arguments) {
+  CallCFunctionHelper(no_reg,
+                      function,
+                      ip,
+                      num_reg_arguments,
+                      num_double_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(Register function,
+                                     Register scratch,
+                                     int num_reg_arguments,
+                                     int num_double_arguments) {
+  CallCFunctionHelper(function,
+                      ExternalReference::the_hole_value_location(isolate()),
+                      scratch,
+                      num_reg_arguments,
+                      num_double_arguments);
+}
+
+
 void MacroAssembler::CallCFunction(ExternalReference function,
                                    int num_arguments) {
-  CallCFunctionHelper(no_reg, function, ip, num_arguments);
+  CallCFunction(function, num_arguments, 0);
 }
 
+
 void MacroAssembler::CallCFunction(Register function,
                                    Register scratch,
                                    int num_arguments) {
-  CallCFunctionHelper(function,
-                      ExternalReference::the_hole_value_location(isolate()),
-                      scratch,
-                      num_arguments);
+  CallCFunction(function, scratch, num_arguments, 0);
 }
 
 
 void MacroAssembler::CallCFunctionHelper(Register function,
                                          ExternalReference function_reference,
                                          Register scratch,
-                                         int num_arguments) {
+                                         int num_reg_arguments,
+                                         int num_double_arguments) {
   // Make sure that the stack is aligned before calling a C function unless
   // running in the simulator. The simulator has its own alignment check which
   // provides more information.
@@ -2861,9 +3043,9 @@
     function = scratch;
   }
   Call(function);
-  int stack_passed_arguments = (num_arguments <= kRegisterPassedArguments) ?
-                               0 : num_arguments - kRegisterPassedArguments;
-  if (OS::ActivationFrameAlignment() > kPointerSize) {
+  int stack_passed_arguments = CalculateStackPassedWords(
+      num_reg_arguments, num_double_arguments);
+  if (ActivationFrameAlignment() > kPointerSize) {
     ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
   } else {
     add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
@@ -2891,6 +3073,55 @@
 }
 
 
+void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
+  Usat(output_reg, 8, Operand(input_reg));
+}
+
+
+void MacroAssembler::ClampDoubleToUint8(Register result_reg,
+                                        DoubleRegister input_reg,
+                                        DoubleRegister temp_double_reg) {
+  Label above_zero;
+  Label done;
+  Label in_bounds;
+
+  vmov(temp_double_reg, 0.0);
+  VFPCompareAndSetFlags(input_reg, temp_double_reg);
+  b(gt, &above_zero);
+
+  // Double value is less than zero, NaN or Inf, return 0.
+  mov(result_reg, Operand(0));
+  b(al, &done);
+
+  // Double value is >= 255, return 255.
+  bind(&above_zero);
+  vmov(temp_double_reg, 255.0);
+  VFPCompareAndSetFlags(input_reg, temp_double_reg);
+  b(le, &in_bounds);
+  mov(result_reg, Operand(255));
+  b(al, &done);
+
+  // In 0-255 range, round and truncate.
+  bind(&in_bounds);
+  vmov(temp_double_reg, 0.5);
+  vadd(temp_double_reg, input_reg, temp_double_reg);
+  vcvt_u32_f64(s0, temp_double_reg);
+  vmov(result_reg, s0);
+  bind(&done);
+}
+
+
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+                                             Register descriptors) {
+  ldr(descriptors,
+      FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
+  Label not_smi;
+  JumpIfNotSmi(descriptors, &not_smi);
+  mov(descriptors, Operand(FACTORY->empty_descriptor_array()));
+  bind(&not_smi);
+}
+
+
 CodePatcher::CodePatcher(byte* address, int instructions)
     : address_(address),
       instructions_(instructions),
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 8d817c0..1e2c9f4 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,13 +29,11 @@
 #define V8_ARM_MACRO_ASSEMBLER_ARM_H_
 
 #include "assembler.h"
+#include "v8globals.h"
 
 namespace v8 {
 namespace internal {
 
-// Forward declaration.
-class CallWrapper;
-
 // ----------------------------------------------------------------------------
 // Static helper functions
 
@@ -55,12 +53,6 @@
 const Register cp = { 8 };  // JavaScript context pointer
 const Register roots = { 10 };  // Roots array pointer.
 
-enum InvokeJSFlags {
-  CALL_JS,
-  JUMP_JS
-};
-
-
 // Flags used for the AllocateInNewSpace functions.
 enum AllocationFlags {
   // No special flags.
@@ -107,7 +99,13 @@
   static int CallSize(Handle<Code> code,
                       RelocInfo::Mode rmode,
                       Condition cond = al);
-  void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+  void Call(Handle<Code> code,
+            RelocInfo::Mode rmode,
+            Condition cond = al);
+  void CallWithAstId(Handle<Code> code,
+            RelocInfo::Mode rmode,
+            unsigned ast_id,
+            Condition cond = al);
   void Ret(Condition cond = al);
 
   // Emit code to discard a non-negative number of pointer-sized elements
@@ -144,9 +142,12 @@
             Condition cond = al);
 
   void Call(Label* target);
+
+  // Register move. May do nothing if the registers are identical.
   void Move(Register dst, Handle<Object> value);
-  // May do nothing if the registers are identical.
   void Move(Register dst, Register src);
+  void Move(DoubleRegister dst, DoubleRegister src);
+
   // Jumps to the label at the index given by the Smi in "index".
   void SmiJumpTable(Register index, Vector<Label*> targets);
   // Load an object from the root table.
@@ -347,29 +348,38 @@
   // ---------------------------------------------------------------------------
   // JavaScript invokes
 
+  // Setup call kind marking in ecx. The method takes ecx as an
+  // explicit first parameter to make the code more readable at the
+  // call sites.
+  void SetCallKind(Register dst, CallKind kind);
+
   // Invoke the JavaScript function code by either calling or jumping.
   void InvokeCode(Register code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
                   InvokeFlag flag,
-                  CallWrapper* call_wrapper = NULL);
+                  const CallWrapper& call_wrapper,
+                  CallKind call_kind);
 
   void InvokeCode(Handle<Code> code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
                   RelocInfo::Mode rmode,
-                  InvokeFlag flag);
+                  InvokeFlag flag,
+                  CallKind call_kind);
 
   // Invoke the JavaScript function in the given register. Changes the
   // current context to the context in the function before invoking.
   void InvokeFunction(Register function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
-                      CallWrapper* call_wrapper = NULL);
+                      const CallWrapper& call_wrapper,
+                      CallKind call_kind);
 
   void InvokeFunction(JSFunction* function,
                       const ParameterCount& actual,
-                      InvokeFlag flag);
+                      InvokeFlag flag,
+                      CallKind call_kind);
 
   void IsObjectJSObjectType(Register heap_object,
                             Register map,
@@ -577,13 +587,24 @@
                 Register scratch,
                 Handle<Map> map,
                 Label* fail,
-                bool is_heap_object);
+                SmiCheckType smi_check_type);
+
 
   void CheckMap(Register obj,
                 Register scratch,
                 Heap::RootListIndex index,
                 Label* fail,
-                bool is_heap_object);
+                SmiCheckType smi_check_type);
+
+
+  // Check if the map of an object is equal to a specified map and branch to a
+  // specified target if equal. Skip the smi check if not required (object is
+  // known to be a heap object)
+  void DispatchMap(Register obj,
+                   Register scratch,
+                   Handle<Map> map,
+                   Handle<Code> success,
+                   SmiCheckType smi_check_type);
 
 
   // Compare the object in a register to a value from the root list.
@@ -704,6 +725,11 @@
   // Call a code stub.
   void CallStub(CodeStub* stub, Condition cond = al);
 
+  // Call a code stub and return the code object called.  Try to generate
+  // the code if necessary.  Do not perform a GC but instead return a retry
+  // after GC failure.
+  MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub, Condition cond = al);
+
   // Call a code stub.
   void TailCallStub(CodeStub* stub, Condition cond = al);
 
@@ -742,15 +768,32 @@
                        int num_arguments,
                        int result_size);
 
+  int CalculateStackPassedWords(int num_reg_arguments,
+                                int num_double_arguments);
+
   // Before calling a C-function from generated code, align arguments on stack.
   // After aligning the frame, non-register arguments must be stored in
   // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
-  // are word sized.
+  // are word sized. If double arguments are used, this function assumes that
+  // all double arguments are stored before core registers; otherwise the
+  // correct alignment of the double values is not guaranteed.
   // Some compilers/platforms require the stack to be aligned when calling
   // C++ code.
   // Needs a scratch register to do some arithmetic. This register will be
   // trashed.
-  void PrepareCallCFunction(int num_arguments, Register scratch);
+  void PrepareCallCFunction(int num_reg_arguments,
+                            int num_double_registers,
+                            Register scratch);
+  void PrepareCallCFunction(int num_reg_arguments,
+                            Register scratch);
+
+  // There are two ways of passing double arguments on ARM, depending on
+  // whether soft or hard floating point ABI is used. These functions
+  // abstract parameter passing for the three different ways we call
+  // C functions from generated code.
+  void SetCallCDoubleArguments(DoubleRegister dreg);
+  void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
+  void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
 
   // Calls a C function and cleans up the space for arguments allocated
   // by PrepareCallCFunction. The called function is not allowed to trigger a
@@ -759,6 +802,12 @@
   // function).
   void CallCFunction(ExternalReference function, int num_arguments);
   void CallCFunction(Register function, Register scratch, int num_arguments);
+  void CallCFunction(ExternalReference function,
+                     int num_reg_arguments,
+                     int num_double_arguments);
+  void CallCFunction(Register function, Register scratch,
+                     int num_reg_arguments,
+                     int num_double_arguments);
 
   void GetCFunctionDoubleResult(const DoubleRegister dst);
 
@@ -777,8 +826,8 @@
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
   void InvokeBuiltin(Builtins::JavaScript id,
-                     InvokeJSFlags flags,
-                     CallWrapper* call_wrapper = NULL);
+                     InvokeFlag flag,
+                     const CallWrapper& call_wrapper = NullCallWrapper());
 
   // Store the code object for the given builtin in the target register and
   // setup the function in r1.
@@ -825,6 +874,15 @@
   void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
   bool allow_stub_calls() { return allow_stub_calls_; }
 
+  // EABI variant for double arguments in use.
+  bool use_eabi_hardfloat() {
+#if USE_EABI_HARDFLOAT
+    return true;
+#else
+    return false;
+#endif
+  }
+
   // ---------------------------------------------------------------------------
   // Number utilities
 
@@ -952,17 +1010,29 @@
                                  Register result);
 
 
+  void ClampUint8(Register output_reg, Register input_reg);
+
+  void ClampDoubleToUint8(Register result_reg,
+                          DoubleRegister input_reg,
+                          DoubleRegister temp_double_reg);
+
+
+  void LoadInstanceDescriptors(Register map, Register descriptors);
+
  private:
   void CallCFunctionHelper(Register function,
                            ExternalReference function_reference,
                            Register scratch,
-                           int num_arguments);
+                           int num_reg_arguments,
+                           int num_double_arguments);
 
   void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
   static int CallSize(intptr_t target,
                       RelocInfo::Mode rmode,
                       Condition cond = al);
-  void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
+  void Call(intptr_t target,
+            RelocInfo::Mode rmode,
+            Condition cond = al);
 
   // Helper functions for generating invokes.
   void InvokePrologue(const ParameterCount& expected,
@@ -971,7 +1041,8 @@
                       Register code_reg,
                       Label* done,
                       InvokeFlag flag,
-                      CallWrapper* call_wrapper = NULL);
+                      const CallWrapper& call_wrapper,
+                      CallKind call_kind);
 
   // Activation support.
   void EnterFrame(StackFrame::Type type);
@@ -1032,21 +1103,6 @@
 #endif  // ENABLE_DEBUGGER_SUPPORT
 
 
-// Helper class for generating code or data associated with the code
-// right after a call instruction. As an example this can be used to
-// generate safepoint data after calls for crankshaft.
-class CallWrapper {
- public:
-  CallWrapper() { }
-  virtual ~CallWrapper() { }
-  // Called just before emitting a call. Argument is the size of the generated
-  // call code.
-  virtual void BeforeCall(int call_size) = 0;
-  // Called just after emitting a call, i.e., at the return site for the call.
-  virtual void AfterCall() = 0;
-};
-
-
 // -----------------------------------------------------------------------------
 // Static helper functions.
 
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index da554c2..6af5355 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -719,20 +719,21 @@
 }
 
 
-void Simulator::Initialize() {
-  if (Isolate::Current()->simulator_initialized()) return;
-  Isolate::Current()->set_simulator_initialized(true);
-  ::v8::internal::ExternalReference::set_redirector(&RedirectExternalReference);
+void Simulator::Initialize(Isolate* isolate) {
+  if (isolate->simulator_initialized()) return;
+  isolate->set_simulator_initialized(true);
+  ::v8::internal::ExternalReference::set_redirector(isolate,
+                                                    &RedirectExternalReference);
 }
 
 
-Simulator::Simulator() : isolate_(Isolate::Current()) {
+Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
   i_cache_ = isolate_->simulator_i_cache();
   if (i_cache_ == NULL) {
     i_cache_ = new v8::internal::HashMap(&ICacheMatch);
     isolate_->set_simulator_i_cache(i_cache_);
   }
-  Initialize();
+  Initialize(isolate);
   // Setup simulator support first. Some of this information is needed to
   // setup the architecture state.
   size_t stack_size = 1 * 1024*1024;  // allocate 1MB for stack
@@ -848,17 +849,13 @@
 // Get the active Simulator for the current thread.
 Simulator* Simulator::current(Isolate* isolate) {
   v8::internal::Isolate::PerIsolateThreadData* isolate_data =
-      Isolate::CurrentPerIsolateThreadData();
-  if (isolate_data == NULL) {
-    Isolate::EnterDefaultIsolate();
-    isolate_data = Isolate::CurrentPerIsolateThreadData();
-  }
+      isolate->FindOrAllocatePerThreadDataForThisThread();
   ASSERT(isolate_data != NULL);
 
   Simulator* sim = isolate_data->simulator();
   if (sim == NULL) {
     // TODO(146): delete the simulator object when a thread/isolate goes away.
-    sim = new Simulator();
+    sim = new Simulator(isolate);
     isolate_data->set_simulator(sim);
   }
   return sim;
@@ -1009,26 +1006,74 @@
 }
 
 
-// For use in calls that take two double values, constructed from r0, r1, r2
-// and r3.
+// For use in calls that take two double values, constructed either
+// from r0-r3 or d0 and d1.
 void Simulator::GetFpArgs(double* x, double* y) {
-  // We use a char buffer to get around the strict-aliasing rules which
-  // otherwise allow the compiler to optimize away the copy.
-  char buffer[2 * sizeof(registers_[0])];
-  // Registers 0 and 1 -> x.
-  memcpy(buffer, registers_, sizeof(buffer));
-  memcpy(x, buffer, sizeof(buffer));
-  // Registers 2 and 3 -> y.
-  memcpy(buffer, registers_ + 2, sizeof(buffer));
-  memcpy(y, buffer, sizeof(buffer));
+  if (use_eabi_hardfloat()) {
+    *x = vfp_register[0];
+    *y = vfp_register[1];
+  } else {
+    // We use a char buffer to get around the strict-aliasing rules which
+    // otherwise allow the compiler to optimize away the copy.
+    char buffer[sizeof(*x)];
+    // Registers 0 and 1 -> x.
+    memcpy(buffer, registers_, sizeof(*x));
+    memcpy(x, buffer, sizeof(*x));
+    // Registers 2 and 3 -> y.
+    memcpy(buffer, registers_ + 2, sizeof(*y));
+    memcpy(y, buffer, sizeof(*y));
+  }
+}
+
+// For use in calls that take one double value, constructed either
+// from r0 and r1 or d0.
+void Simulator::GetFpArgs(double* x) {
+  if (use_eabi_hardfloat()) {
+    *x = vfp_register[0];
+  } else {
+    // We use a char buffer to get around the strict-aliasing rules which
+    // otherwise allow the compiler to optimize away the copy.
+    char buffer[sizeof(*x)];
+    // Registers 0 and 1 -> x.
+    memcpy(buffer, registers_, sizeof(*x));
+    memcpy(x, buffer, sizeof(*x));
+  }
 }
 
 
+// For use in calls that take one double value constructed either
+// from r0 and r1 or d0 and one integer value.
+void Simulator::GetFpArgs(double* x, int32_t* y) {
+  if (use_eabi_hardfloat()) {
+    *x = vfp_register[0];
+    *y = registers_[1];
+  } else {
+    // We use a char buffer to get around the strict-aliasing rules which
+    // otherwise allow the compiler to optimize away the copy.
+    char buffer[sizeof(*x)];
+    // Registers 0 and 1 -> x.
+    memcpy(buffer, registers_, sizeof(*x));
+    memcpy(x, buffer, sizeof(*x));
+    // Register 2 -> y.
+    memcpy(buffer, registers_ + 2, sizeof(*y));
+    memcpy(y, buffer, sizeof(*y));
+  }
+}
+
+
+// The return value is either in r0/r1 or d0.
 void Simulator::SetFpResult(const double& result) {
-  char buffer[2 * sizeof(registers_[0])];
-  memcpy(buffer, &result, sizeof(buffer));
-  // result -> registers 0 and 1.
-  memcpy(registers_, buffer, sizeof(buffer));
+  if (use_eabi_hardfloat()) {
+    char buffer[2 * sizeof(vfp_register[0])];
+    memcpy(buffer, &result, sizeof(buffer));
+    // Copy result to d0.
+    memcpy(vfp_register, buffer, sizeof(buffer));
+  } else {
+    char buffer[2 * sizeof(registers_[0])];
+    memcpy(buffer, &result, sizeof(buffer));
+    // Copy result to r0 and r1.
+    memcpy(registers_, buffer, sizeof(buffer));
+  }
 }
 
 
@@ -1282,12 +1327,13 @@
 
 
 // Calculate C flag value for additions.
-bool Simulator::CarryFrom(int32_t left, int32_t right) {
+bool Simulator::CarryFrom(int32_t left, int32_t right, int32_t carry) {
   uint32_t uleft = static_cast<uint32_t>(left);
   uint32_t uright = static_cast<uint32_t>(right);
   uint32_t urest  = 0xffffffffU - uleft;
 
-  return (uright > urest);
+  return (uright > urest) ||
+         (carry && (((uright + 1) > urest) || (uright > (urest - 1))));
 }
 
 
@@ -1684,27 +1730,92 @@
       int32_t* stack_pointer = reinterpret_cast<int32_t*>(get_register(sp));
       int32_t arg4 = stack_pointer[0];
       int32_t arg5 = stack_pointer[1];
+      bool fp_call =
+         (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
+         (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
+         (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
+         (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
+      if (use_eabi_hardfloat()) {
+        // With the hard floating point calling convention, double
+        // arguments are passed in VFP registers. Fetch the arguments
+        // from there and call the builtin using soft floating point
+        // convention.
+        switch (redirection->type()) {
+        case ExternalReference::BUILTIN_FP_FP_CALL:
+        case ExternalReference::BUILTIN_COMPARE_CALL:
+          arg0 = vfp_register[0];
+          arg1 = vfp_register[1];
+          arg2 = vfp_register[2];
+          arg3 = vfp_register[3];
+          break;
+        case ExternalReference::BUILTIN_FP_CALL:
+          arg0 = vfp_register[0];
+          arg1 = vfp_register[1];
+          break;
+        case ExternalReference::BUILTIN_FP_INT_CALL:
+          arg0 = vfp_register[0];
+          arg1 = vfp_register[1];
+          arg2 = get_register(0);
+          break;
+        default:
+          break;
+        }
+      }
       // This is dodgy but it works because the C entry stubs are never moved.
       // See comment in codegen-arm.cc and bug 1242173.
       int32_t saved_lr = get_register(lr);
       intptr_t external =
           reinterpret_cast<intptr_t>(redirection->external_function());
-      if (redirection->type() == ExternalReference::FP_RETURN_CALL) {
-        SimulatorRuntimeFPCall target =
-            reinterpret_cast<SimulatorRuntimeFPCall>(external);
+      if (fp_call) {
         if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
-          double x, y;
-          GetFpArgs(&x, &y);
-          PrintF("Call to host function at %p with args %f, %f",
-                 FUNCTION_ADDR(target), x, y);
+          SimulatorRuntimeFPCall target =
+              reinterpret_cast<SimulatorRuntimeFPCall>(external);
+          double dval0, dval1;
+          int32_t ival;
+          switch (redirection->type()) {
+          case ExternalReference::BUILTIN_FP_FP_CALL:
+          case ExternalReference::BUILTIN_COMPARE_CALL:
+            GetFpArgs(&dval0, &dval1);
+            PrintF("Call to host function at %p with args %f, %f",
+                FUNCTION_ADDR(target), dval0, dval1);
+            break;
+          case ExternalReference::BUILTIN_FP_CALL:
+            GetFpArgs(&dval0);
+            PrintF("Call to host function at %p with arg %f",
+                FUNCTION_ADDR(target), dval0);
+            break;
+          case ExternalReference::BUILTIN_FP_INT_CALL:
+            GetFpArgs(&dval0, &ival);
+            PrintF("Call to host function at %p with args %f, %d",
+                FUNCTION_ADDR(target), dval0, ival);
+            break;
+          default:
+            UNREACHABLE();
+            break;
+          }
           if (!stack_aligned) {
             PrintF(" with unaligned stack %08x\n", get_register(sp));
           }
           PrintF("\n");
         }
         CHECK(stack_aligned);
-        double result = target(arg0, arg1, arg2, arg3);
-        SetFpResult(result);
+        if (redirection->type() != ExternalReference::BUILTIN_COMPARE_CALL) {
+          SimulatorRuntimeFPCall target =
+              reinterpret_cast<SimulatorRuntimeFPCall>(external);
+          double result = target(arg0, arg1, arg2, arg3);
+          SetFpResult(result);
+        } else {
+          SimulatorRuntimeCall target =
+              reinterpret_cast<SimulatorRuntimeCall>(external);
+          int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+          int32_t lo_res = static_cast<int32_t>(result);
+          int32_t hi_res = static_cast<int32_t>(result >> 32);
+          if (::v8::internal::FLAG_trace_sim) {
+            PrintF("Returned %08x\n", lo_res);
+          }
+          set_register(r0, lo_res);
+          set_register(r1, hi_res);
+        }
       } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
         SimulatorRuntimeDirectApiCall target =
             reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
@@ -2209,8 +2320,15 @@
       }
 
       case ADC: {
-        Format(instr, "adc'cond's 'rd, 'rn, 'shift_rm");
-        Format(instr, "adc'cond's 'rd, 'rn, 'imm");
+        // Format(instr, "adc'cond's 'rd, 'rn, 'shift_rm");
+        // Format(instr, "adc'cond's 'rd, 'rn, 'imm");
+        alu_out = rn_val + shifter_operand + GetCarry();
+        set_register(rd, alu_out);
+        if (instr->HasS()) {
+          SetNZFlags(alu_out);
+          SetCFlag(CarryFrom(rn_val, shifter_operand, GetCarry()));
+          SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
+        }
         break;
       }
 
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index a16cae5..391ef69 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -68,7 +68,9 @@
 // just use the C stack limit.
 class SimulatorStack : public v8::internal::AllStatic {
  public:
-  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+  static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+                                            uintptr_t c_limit) {
+    USE(isolate);
     return c_limit;
   }
 
@@ -143,7 +145,7 @@
     num_d_registers = 16
   };
 
-  Simulator();
+  explicit Simulator(Isolate* isolate);
   ~Simulator();
 
   // The currently executing Simulator instance. Potentially there can be one
@@ -179,7 +181,7 @@
   void Execute();
 
   // Call on program start.
-  static void Initialize();
+  static void Initialize(Isolate* isolate);
 
   // V8 generally calls into generated JS code with 5 parameters and into
   // generated RegExp code with 7 parameters. This is a convenience function,
@@ -200,6 +202,15 @@
   // below (bad_lr, end_sim_pc).
   bool has_bad_pc() const;
 
+  // EABI variant for double arguments in use.
+  bool use_eabi_hardfloat() {
+#if USE_EABI_HARDFLOAT
+    return true;
+#else
+    return false;
+#endif
+  }
+
  private:
   enum special_values {
     // Known bad pc value to ensure that the simulator does not execute
@@ -223,13 +234,17 @@
   void SetNZFlags(int32_t val);
   void SetCFlag(bool val);
   void SetVFlag(bool val);
-  bool CarryFrom(int32_t left, int32_t right);
+  bool CarryFrom(int32_t left, int32_t right, int32_t carry = 0);
   bool BorrowFrom(int32_t left, int32_t right);
   bool OverflowFrom(int32_t alu_out,
                     int32_t left,
                     int32_t right,
                     bool addition);
 
+  inline int GetCarry() {
+    return c_flag_ ? 1 : 0;
+  };
+
   // Support for VFP.
   void Compute_FPSCR_Flags(double val1, double val2);
   void Copy_FPSCR_to_APSR();
@@ -306,9 +321,10 @@
       void* external_function,
       v8::internal::ExternalReference::Type type);
 
-  // For use in calls that take two double values, constructed from r0, r1, r2
-  // and r3.
+  // For use in calls that take double value arguments.
   void GetFpArgs(double* x, double* y);
+  void GetFpArgs(double* x);
+  void GetFpArgs(double* x, int32_t* y);
   void SetFpResult(const double& result);
   void TrashCallerSaveRegisters();
 
@@ -394,8 +410,9 @@
 // trouble down the line.
 class SimulatorStack : public v8::internal::AllStatic {
  public:
-  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
-    return Simulator::current(Isolate::Current())->StackLimit();
+  static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+                                            uintptr_t c_limit) {
+    return Simulator::current(isolate)->StackLimit();
   }
 
   static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 47d675b..be8b7d6 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -95,12 +95,13 @@
 // must always call a backup property check that is complete.
 // This function is safe to call if the receiver has fast properties.
 // Name must be a symbol and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
-                                             Label* miss_label,
-                                             Register receiver,
-                                             String* name,
-                                             Register scratch0,
-                                             Register scratch1) {
+MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
+    MacroAssembler* masm,
+    Label* miss_label,
+    Register receiver,
+    String* name,
+    Register scratch0,
+    Register scratch1) {
   ASSERT(name->IsSymbol());
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
@@ -136,71 +137,21 @@
   // Restore the temporarily used register.
   __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
 
-  // Compute the capacity mask.
-  const int kCapacityOffset =
-      StringDictionary::kHeaderSize +
-      StringDictionary::kCapacityIndex * kPointerSize;
 
-  // Generate an unrolled loop that performs a few probes before
-  // giving up.
-  static const int kProbes = 4;
-  const int kElementsStartOffset =
-      StringDictionary::kHeaderSize +
-      StringDictionary::kElementsStartIndex * kPointerSize;
+  MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
+      masm,
+      miss_label,
+      &done,
+      receiver,
+      properties,
+      name,
+      scratch1);
+  if (result->IsFailure()) return result;
 
-  // If names of slots in range from 1 to kProbes - 1 for the hash value are
-  // not equal to the name and kProbes-th slot is not used (its name is the
-  // undefined value), it guarantees the hash table doesn't contain the
-  // property. It's true even if some slots represent deleted properties
-  // (their names are the null value).
-  for (int i = 0; i < kProbes; i++) {
-    // scratch0 points to properties hash.
-    // Compute the masked index: (hash + i + i * i) & mask.
-    Register index = scratch1;
-    // Capacity is smi 2^n.
-    __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
-    __ sub(index, index, Operand(1));
-    __ and_(index, index, Operand(
-        Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
-
-    // Scale the index by multiplying by the entry size.
-    ASSERT(StringDictionary::kEntrySize == 3);
-    __ add(index, index, Operand(index, LSL, 1));  // index *= 3.
-
-    Register entity_name = scratch1;
-    // Having undefined at this place means the name is not contained.
-    ASSERT_EQ(kSmiTagSize, 1);
-    Register tmp = properties;
-    __ add(tmp, properties, Operand(index, LSL, 1));
-    __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
-
-    ASSERT(!tmp.is(entity_name));
-    __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
-    __ cmp(entity_name, tmp);
-    if (i != kProbes - 1) {
-      __ b(eq, &done);
-
-      // Stop if found the property.
-      __ cmp(entity_name, Operand(Handle<String>(name)));
-      __ b(eq, miss_label);
-
-      // Check if the entry name is not a symbol.
-      __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
-      __ ldrb(entity_name,
-              FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
-      __ tst(entity_name, Operand(kIsSymbolMask));
-      __ b(eq, miss_label);
-
-      // Restore the properties.
-      __ ldr(properties,
-             FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-    } else {
-      // Give up probing if still not found the undefined value.
-      __ b(ne, miss_label);
-    }
-  }
   __ bind(&done);
   __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+  return result;
 }
 
 
@@ -525,7 +476,8 @@
 static void GenerateCallFunction(MacroAssembler* masm,
                                  Object* object,
                                  const ParameterCount& arguments,
-                                 Label* miss) {
+                                 Label* miss,
+                                 Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   //  -- r0: receiver
   //  -- r1: function to call
@@ -544,7 +496,10 @@
   }
 
   // Invoke the function.
-  __ InvokeFunction(r1, arguments, JUMP_FUNCTION);
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  __ InvokeFunction(r1, arguments, JUMP_FUNCTION, NullCallWrapper(), call_kind);
 }
 
 
@@ -674,10 +629,12 @@
  public:
   CallInterceptorCompiler(StubCompiler* stub_compiler,
                           const ParameterCount& arguments,
-                          Register name)
+                          Register name,
+                          Code::ExtraICState extra_ic_state)
       : stub_compiler_(stub_compiler),
         arguments_(arguments),
-        name_(name) {}
+        name_(name),
+        extra_ic_state_(extra_ic_state) {}
 
   MaybeObject* Compile(MacroAssembler* masm,
                        JSObject* object,
@@ -805,8 +762,11 @@
                                                       arguments_.immediate());
       if (result->IsFailure()) return result;
     } else {
+      CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+          ? CALL_AS_FUNCTION
+          : CALL_AS_METHOD;
       __ InvokeFunction(optimization.constant_function(), arguments_,
-                        JUMP_FUNCTION);
+                        JUMP_FUNCTION, call_kind);
     }
 
     // Deferred code for fast API call case---clean preallocated space.
@@ -888,6 +848,7 @@
   StubCompiler* stub_compiler_;
   const ParameterCount& arguments_;
   Register name_;
+  Code::ExtraICState extra_ic_state_;
 };
 
 
@@ -1102,12 +1063,17 @@
       ASSERT(current->property_dictionary()->FindEntry(name) ==
              StringDictionary::kNotFound);
 
-      GenerateDictionaryNegativeLookup(masm(),
-                                       miss,
-                                       reg,
-                                       name,
-                                       scratch1,
-                                       scratch2);
+      MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
+                                                                      miss,
+                                                                      reg,
+                                                                      name,
+                                                                      scratch1,
+                                                                      scratch2);
+      if (negative_lookup->IsFailure()) {
+        set_failure(Failure::cast(negative_lookup));
+        return reg;
+      }
+
       __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
       reg = holder_reg;  // from now the object is in holder_reg
       __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
@@ -1501,8 +1467,10 @@
 
 
 MaybeObject* CallStubCompiler::GenerateMissBranch() {
-  MaybeObject* maybe_obj = masm()->isolate()->stub_cache()->ComputeCallMiss(
-      arguments().immediate(), kind_);
+  MaybeObject* maybe_obj =
+      isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
+                                               kind_,
+                                               extra_ic_state_);
   Object* obj;
   if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
@@ -1534,7 +1502,7 @@
   Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss);
   GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
 
-  GenerateCallFunction(masm(), object, arguments(), &miss);
+  GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
 
   // Handle call cache miss.
   __ bind(&miss);
@@ -1594,8 +1562,11 @@
     __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
 
     // Check that the elements are in fast mode and writable.
-    __ CheckMap(elements, r0,
-                Heap::kFixedArrayMapRootIndex, &call_builtin, true);
+    __ CheckMap(elements,
+                r0,
+                Heap::kFixedArrayMapRootIndex,
+                &call_builtin,
+                DONT_DO_SMI_CHECK);
 
     if (argc == 1) {  // Otherwise fall through to call the builtin.
       Label exit, with_write_barrier, attempt_to_grow_elements;
@@ -1744,7 +1715,11 @@
   __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
 
   // Check that the elements are in fast mode and writable.
-  __ CheckMap(elements, r0, Heap::kFixedArrayMapRootIndex, &call_builtin, true);
+  __ CheckMap(elements,
+              r0,
+              Heap::kFixedArrayMapRootIndex,
+              &call_builtin,
+              DONT_DO_SMI_CHECK);
 
   // Get the array's length into r4 and calculate new length.
   __ ldr(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
@@ -1815,7 +1790,9 @@
   Label index_out_of_range;
   Label* index_out_of_range_label = &index_out_of_range;
 
-  if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
+  if (kind_ == Code::CALL_IC &&
+      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+       DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
 
@@ -1899,7 +1876,9 @@
   Label index_out_of_range;
   Label* index_out_of_range_label = &index_out_of_range;
 
-  if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
+  if (kind_ == Code::CALL_IC &&
+      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+       DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
 
@@ -2023,7 +2002,7 @@
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
 
   __ bind(&miss);
   // r2: function name.
@@ -2086,7 +2065,7 @@
   __ Drop(argc + 1, eq);
   __ Ret(eq);
 
-  __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, true);
+  __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
 
   Label wont_fit_smi, no_vfp_exception, restore_fpscr_and_return;
 
@@ -2171,7 +2150,7 @@
   __ bind(&slow);
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
 
   __ bind(&miss);
   // r2: function name.
@@ -2247,7 +2226,7 @@
   // Check if the argument is a heap number and load its exponent and
   // sign.
   __ bind(&not_smi);
-  __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, true);
+  __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
   __ ldr(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset));
 
   // Check the sign of the argument. If the argument is positive,
@@ -2273,7 +2252,7 @@
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
 
   __ bind(&miss);
   // r2: function name.
@@ -2299,6 +2278,7 @@
   // repatch it to global receiver.
   if (object->IsGlobalObject()) return heap()->undefined_value();
   if (cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSObject()) return heap()->undefined_value();
   int depth = optimization.GetPrototypeDepthOfExpectedType(
             JSObject::cast(object), holder);
   if (depth == kInvalidProtoDepth) return heap()->undefined_value();
@@ -2460,7 +2440,10 @@
       UNREACHABLE();
   }
 
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, call_kind);
 
   // Handle call cache miss.
   __ bind(&miss);
@@ -2493,7 +2476,7 @@
   // Get the receiver from the stack.
   __ ldr(r1, MemOperand(sp, argc * kPointerSize));
 
-  CallInterceptorCompiler compiler(this, arguments(), r2);
+  CallInterceptorCompiler compiler(this, arguments(), r2, extra_ic_state_);
   MaybeObject* result = compiler.Compile(masm(),
                                          object,
                                          holder,
@@ -2513,7 +2496,7 @@
   // Restore receiver.
   __ ldr(r0, MemOperand(sp, argc * kPointerSize));
 
-  GenerateCallFunction(masm(), object, arguments(), &miss);
+  GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
 
   // Handle call cache miss.
   __ bind(&miss);
@@ -2571,15 +2554,19 @@
   ASSERT(function->is_compiled());
   Handle<Code> code(function->code());
   ParameterCount expected(function->shared()->formal_parameter_count());
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
   if (V8::UseCrankshaft()) {
     // TODO(kasperl): For now, we always call indirectly through the
     // code field in the function to allow recompilation to take effect
     // without changing any of the call sites.
     __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
-    __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION);
+    __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION,
+                  NullCallWrapper(), call_kind);
   } else {
-    __ InvokeCode(code, expected, arguments(),
-                  RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+    __ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET,
+                  JUMP_FUNCTION, call_kind);
   }
 
   // Handle call cache miss.
@@ -3128,52 +3115,56 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadFastElement(Map* receiver_map) {
+  // ----------- S t a t e -------------
+  //  -- lr    : return address
+  //  -- r0    : key
+  //  -- r1    : receiver
+  // -----------------------------------
+  MaybeObject* maybe_stub = KeyedLoadFastElementStub().TryGetCode();
+  Code* stub;
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(r1,
+                 r2,
+                 Handle<Map>(receiver_map),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
+
+  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
+    MapList* receiver_maps,
+    CodeList* handler_ics) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
   //  -- r1    : receiver
   // -----------------------------------
   Label miss;
+  __ JumpIfSmi(r1, &miss);
 
-  // Check that the receiver isn't a smi.
-  __ tst(r1, Operand(kSmiTagMask));
-  __ b(eq, &miss);
-
-  // Check that the map matches.
+  int receiver_count = receiver_maps->length();
   __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ cmp(r2, Operand(Handle<Map>(receiver->map())));
-  __ b(ne, &miss);
-
-  // Check that the key is a smi.
-  __ tst(r0, Operand(kSmiTagMask));
-  __ b(ne, &miss);
-
-  // Get the elements array.
-  __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
-  __ AssertFastElements(r2);
-
-  // Check that the key is within bounds.
-  __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
-  __ cmp(r0, Operand(r3));
-  __ b(hs, &miss);
-
-  // Load the result and make sure it's not the hole.
-  __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
-  __ ldr(r4,
-         MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-  __ cmp(r4, ip);
-  __ b(eq, &miss);
-  __ mov(r0, r4);
-  __ Ret();
+  for (int current = 0; current < receiver_count; ++current) {
+    Handle<Map> map(receiver_maps->at(current));
+    Handle<Code> code(handler_ics->at(current));
+    __ mov(ip, Operand(map));
+    __ cmp(r2, ip);
+    __ Jump(code, RelocInfo::CODE_TARGET, eq);
+  }
 
   __ bind(&miss);
-  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+  Handle<Code> miss_ic = isolate()->builtins()->KeyedLoadIC_Miss();
+  __ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL);
+  return GetCode(NORMAL, NULL, MEGAMORPHIC);
 }
 
 
@@ -3215,69 +3206,27 @@
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
-    JSObject* receiver) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreFastElement(
+    Map* receiver_map) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : key
   //  -- r2    : receiver
   //  -- lr    : return address
   //  -- r3    : scratch
-  //  -- r4    : scratch (elements)
   // -----------------------------------
-  Label miss;
+  bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+  MaybeObject* maybe_stub =
+      KeyedStoreFastElementStub(is_js_array).TryGetCode();
+  Code* stub;
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(r2,
+                 r3,
+                 Handle<Map>(receiver_map),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
 
-  Register value_reg = r0;
-  Register key_reg = r1;
-  Register receiver_reg = r2;
-  Register scratch = r3;
-  Register elements_reg = r4;
-
-  // Check that the receiver isn't a smi.
-  __ tst(receiver_reg, Operand(kSmiTagMask));
-  __ b(eq, &miss);
-
-  // Check that the map matches.
-  __ ldr(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
-  __ cmp(scratch, Operand(Handle<Map>(receiver->map())));
-  __ b(ne, &miss);
-
-  // Check that the key is a smi.
-  __ tst(key_reg, Operand(kSmiTagMask));
-  __ b(ne, &miss);
-
-  // Get the elements array and make sure it is a fast element array, not 'cow'.
-  __ ldr(elements_reg,
-         FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-  __ ldr(scratch, FieldMemOperand(elements_reg, HeapObject::kMapOffset));
-  __ cmp(scratch, Operand(Handle<Map>(factory()->fixed_array_map())));
-  __ b(ne, &miss);
-
-  // Check that the key is within bounds.
-  if (receiver->IsJSArray()) {
-    __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
-  } else {
-    __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
-  }
-  // Compare smis.
-  __ cmp(key_reg, scratch);
-  __ b(hs, &miss);
-
-  __ add(scratch,
-         elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
-  __ str(value_reg,
-         MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ RecordWrite(scratch,
-                 Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize),
-                 receiver_reg , elements_reg);
-
-  // value_reg (r0) is preserved.
-  // Done.
-  __ Ret();
-
-  __ bind(&miss);
-  Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss();
+  Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
@@ -3285,6 +3234,38 @@
 }
 
 
+MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
+    MapList* receiver_maps,
+    CodeList* handler_ics) {
+  // ----------- S t a t e -------------
+  //  -- r0    : value
+  //  -- r1    : key
+  //  -- r2    : receiver
+  //  -- lr    : return address
+  //  -- r3    : scratch
+  // -----------------------------------
+  Label miss;
+  __ JumpIfSmi(r2, &miss);
+
+  int receiver_count = receiver_maps->length();
+  __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+  for (int current = 0; current < receiver_count; ++current) {
+    Handle<Map> map(receiver_maps->at(current));
+    Handle<Code> code(handler_ics->at(current));
+    __ mov(ip, Operand(map));
+    __ cmp(r3, ip);
+    __ Jump(code, RelocInfo::CODE_TARGET, eq);
+  }
+
+  __ bind(&miss);
+  Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
+  __ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL, MEGAMORPHIC);
+}
+
+
 MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
   // ----------- S t a t e -------------
   //  -- r0    : argc
@@ -3429,6 +3410,60 @@
 }
 
 
+MaybeObject* ExternalArrayLoadStubCompiler::CompileLoad(
+    JSObject*receiver, ExternalArrayType array_type) {
+  // ----------- S t a t e -------------
+  //  -- lr    : return address
+  //  -- r0    : key
+  //  -- r1    : receiver
+  // -----------------------------------
+  MaybeObject* maybe_stub =
+      KeyedLoadExternalArrayStub(array_type).TryGetCode();
+  Code* stub;
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(r1,
+                 r2,
+                 Handle<Map>(receiver->map()),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
+
+  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode();
+}
+
+
+MaybeObject* ExternalArrayStoreStubCompiler::CompileStore(
+    JSObject* receiver, ExternalArrayType array_type) {
+  // ----------- S t a t e -------------
+  //  -- r0    : value
+  //  -- r1    : name
+  //  -- r2    : receiver
+  //  -- lr    : return address
+  // -----------------------------------
+  MaybeObject* maybe_stub =
+      KeyedStoreExternalArrayStub(array_type).TryGetCode();
+  Code* stub;
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(r2,
+                 r3,
+                 Handle<Map>(receiver->map()),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
+
+  Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  return GetCode();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
 static bool IsElementTypeSigned(ExternalArrayType array_type) {
   switch (array_type) {
     case kExternalByteArray:
@@ -3448,30 +3483,24 @@
 }
 
 
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
-    JSObject* receiver_object,
-    ExternalArrayType array_type,
-    Code::Flags flags) {
+void KeyedLoadStubCompiler::GenerateLoadExternalArray(
+    MacroAssembler* masm,
+    ExternalArrayType array_type) {
   // ---------- S t a t e --------------
   //  -- lr     : return address
   //  -- r0     : key
   //  -- r1     : receiver
   // -----------------------------------
-  Label slow, failed_allocation;
+  Label miss_force_generic, slow, failed_allocation;
 
   Register key = r0;
   Register receiver = r1;
 
-  // Check that the object isn't a smi
-  __ JumpIfSmi(receiver, &slow);
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
 
   // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &slow);
-
-  // Make sure that we've got the right map.
-  __ ldr(r2, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ cmp(r2, Operand(Handle<Map>(receiver_object->map())));
-  __ b(ne, &slow);
+  __ JumpIfNotSmi(key, &miss_force_generic);
 
   __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
   // r3: elements array
@@ -3480,7 +3509,7 @@
   __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
   __ cmp(ip, Operand(key, ASR, kSmiTagSize));
   // Unsigned comparison catches both negative and too-large values.
-  __ b(lo, &slow);
+  __ b(lo, &miss_force_generic);
 
   __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
   // r3: base pointer of external storage
@@ -3517,6 +3546,18 @@
         __ ldr(value, MemOperand(r3, key, LSL, 1));
       }
       break;
+    case kExternalDoubleArray:
+      if (CpuFeatures::IsSupported(VFP3)) {
+        CpuFeatures::Scope scope(VFP3);
+        __ add(r2, r3, Operand(key, LSL, 2));
+        __ vldr(d0, r2, 0);
+      } else {
+        __ add(r4, r3, Operand(key, LSL, 2));
+        // r4: pointer to the beginning of the double we want to load.
+        __ ldr(r2, MemOperand(r4, 0));
+        __ ldr(r3, MemOperand(r4, Register::kSizeInBytes));
+      }
+      break;
     default:
       UNREACHABLE();
       break;
@@ -3524,9 +3565,12 @@
 
   // For integer array types:
   // r2: value
-  // For floating-point array type
+  // For float array type:
   // s0: value (if VFP3 is supported)
   // r2: value (if VFP3 is not supported)
+  // For double array type:
+  // d0: value (if VFP3 is supported)
+  // r2/r3: value (if VFP3 is not supported)
 
   if (array_type == kExternalIntArray) {
     // For the Int and UnsignedInt array types, we need to see whether
@@ -3556,8 +3600,21 @@
       __ vstr(d0, r3, HeapNumber::kValueOffset);
       __ Ret();
     } else {
-      WriteInt32ToHeapNumberStub stub(value, r0, r3);
-      __ TailCallStub(&stub);
+      Register dst1 = r1;
+      Register dst2 = r3;
+      FloatingPointHelper::Destination dest =
+          FloatingPointHelper::kCoreRegisters;
+      FloatingPointHelper::ConvertIntToDouble(masm,
+                                              value,
+                                              dest,
+                                              d0,
+                                              dst1,
+                                              dst2,
+                                              r9,
+                                              s0);
+      __ str(dst1, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+      __ str(dst2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+      __ Ret();
     }
   } else if (array_type == kExternalUnsignedIntArray) {
     // The test is different for unsigned int values. Since we need
@@ -3602,12 +3659,12 @@
 
       __ bind(&box_int_0);
       // Integer does not have leading zeros.
-      GenerateUInt2Double(masm(), hiword, loword, r4, 0);
+      GenerateUInt2Double(masm, hiword, loword, r4, 0);
       __ b(&done);
 
       __ bind(&box_int_1);
       // Integer has one leading zero.
-      GenerateUInt2Double(masm(), hiword, loword, r4, 1);
+      GenerateUInt2Double(masm, hiword, loword, r4, 1);
 
 
       __ bind(&done);
@@ -3694,6 +3751,31 @@
       __ mov(r0, r3);
       __ Ret();
     }
+  } else if (array_type == kExternalDoubleArray) {
+    if (CpuFeatures::IsSupported(VFP3)) {
+      CpuFeatures::Scope scope(VFP3);
+      // Allocate a HeapNumber for the result. Don't use r0 and r1 as
+      // AllocateHeapNumber clobbers all registers - also when jumping due to
+      // exhausted young space.
+      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
+      __ sub(r1, r2, Operand(kHeapObjectTag));
+      __ vstr(d0, r1, HeapNumber::kValueOffset);
+
+      __ mov(r0, r2);
+      __ Ret();
+    } else {
+      // Allocate a HeapNumber for the result. Don't use r0 and r1 as
+      // AllocateHeapNumber clobbers all registers - also when jumping due to
+      // exhausted young space.
+      __ LoadRoot(r7, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(r4, r5, r6, r7, &slow);
+
+      __ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
+      __ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset));
+      __ mov(r0, r4);
+      __ Ret();
+    }
 
   } else {
     // Tag integer as smi and return it.
@@ -3704,7 +3786,7 @@
   // Slow case, key and receiver still in r0 and r1.
   __ bind(&slow);
   __ IncrementCounter(
-      masm()->isolate()->counters()->keyed_load_external_array_slow(),
+      masm->isolate()->counters()->keyed_load_external_array_slow(),
       1, r2, r3);
 
   // ---------- S t a t e --------------
@@ -3717,21 +3799,23 @@
 
   __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
 
-  return GetCode(flags);
+  __ bind(&miss_force_generic);
+  Code* stub = masm->isolate()->builtins()->builtin(
+      Builtins::kKeyedLoadIC_MissForceGeneric);
+  __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
 }
 
 
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
-    JSObject* receiver_object,
-    ExternalArrayType array_type,
-    Code::Flags flags) {
+void KeyedStoreStubCompiler::GenerateStoreExternalArray(
+    MacroAssembler* masm,
+    ExternalArrayType array_type) {
   // ---------- S t a t e --------------
   //  -- r0     : value
   //  -- r1     : key
   //  -- r2     : receiver
   //  -- lr     : return address
   // -----------------------------------
-  Label slow, check_heap_number;
+  Label slow, check_heap_number, miss_force_generic;
 
   // Register usage.
   Register value = r0;
@@ -3739,25 +3823,20 @@
   Register receiver = r2;
   // r3 mostly holds the elements array or the destination external array.
 
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, &slow);
-
-  // Make sure that we've got the right map.
-  __ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ cmp(r3, Operand(Handle<Map>(receiver_object->map())));
-  __ b(ne, &slow);
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
 
   __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
 
   // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &slow);
+  __ JumpIfNotSmi(key, &miss_force_generic);
 
   // Check that the index is in range
   __ SmiUntag(r4, key);
   __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
   __ cmp(r4, ip);
   // Unsigned comparison catches both negative and too-large values.
-  __ b(hs, &slow);
+  __ b(hs, &miss_force_generic);
 
   // Handle both smis and HeapNumbers in the fast path. Go to the
   // runtime for all other kinds of values.
@@ -3795,7 +3874,28 @@
       break;
     case kExternalFloatArray:
       // Perform int-to-float conversion and store to memory.
-      StoreIntAsFloat(masm(), r3, r4, r5, r6, r7, r9);
+      StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9);
+      break;
+    case kExternalDoubleArray:
+      __ add(r3, r3, Operand(r4, LSL, 3));
+      // r3: effective address of the double element
+      FloatingPointHelper::Destination destination;
+      if (CpuFeatures::IsSupported(VFP3)) {
+        destination = FloatingPointHelper::kVFPRegisters;
+      } else {
+        destination = FloatingPointHelper::kCoreRegisters;
+      }
+      FloatingPointHelper::ConvertIntToDouble(
+          masm, r5, destination,
+          d0, r6, r7,  // These are: double_dst, dst1, dst2.
+          r4, s2);  // These are: scratch2, single_scratch.
+      if (destination == FloatingPointHelper::kVFPRegisters) {
+        CpuFeatures::Scope scope(VFP3);
+        __ vstr(d0, r3, 0);
+      } else {
+        __ str(r6, MemOperand(r3, 0));
+        __ str(r7, MemOperand(r3, Register::kSizeInBytes));
+      }
       break;
     default:
       UNREACHABLE();
@@ -3831,6 +3931,11 @@
         __ add(r5, r3, Operand(r4, LSL, 2));
         __ vcvt_f32_f64(s0, d0);
         __ vstr(s0, r5, 0);
+      } else if (array_type == kExternalDoubleArray) {
+        __ sub(r5, r0, Operand(kHeapObjectTag));
+        __ vldr(d0, r5, HeapNumber::kValueOffset);
+        __ add(r5, r3, Operand(r4, LSL, 3));
+        __ vstr(d0, r5, 0);
       } else {
         // Need to perform float-to-int conversion.
         // Test for NaN or infinity (both give zero).
@@ -3933,6 +4038,12 @@
         __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
         __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
         __ b(&done);
+      } else if (array_type == kExternalDoubleArray) {
+        __ add(r7, r3, Operand(r4, LSL, 3));
+        // r7: effective address of destination element.
+        __ str(r6, MemOperand(r7, 0));
+        __ str(r5, MemOperand(r7, Register::kSizeInBytes));
+        __ Ret();
       } else {
         bool is_signed_type = IsElementTypeSigned(array_type);
         int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
@@ -4002,28 +4113,137 @@
     }
   }
 
-  // Slow case: call runtime.
+  // Slow case, key and receiver still in r0 and r1.
   __ bind(&slow);
+  __ IncrementCounter(
+      masm->isolate()->counters()->keyed_load_external_array_slow(),
+      1, r2, r3);
 
-  // Entry registers are intact.
   // ---------- S t a t e --------------
-  //  -- r0     : value
-  //  -- r1     : key
-  //  -- r2     : receiver
   //  -- lr     : return address
+  //  -- r0     : key
+  //  -- r1     : receiver
+  // -----------------------------------
+  Handle<Code> slow_ic =
+      masm->isolate()->builtins()->KeyedStoreIC_Slow();
+  __ Jump(slow_ic, RelocInfo::CODE_TARGET);
+
+  // Miss case, call the runtime.
+  __ bind(&miss_force_generic);
+
+  // ---------- S t a t e --------------
+  //  -- lr     : return address
+  //  -- r0     : key
+  //  -- r1     : receiver
   // -----------------------------------
 
-  // Push receiver, key and value for runtime call.
-  __ Push(r2, r1, r0);
+  Handle<Code> miss_ic =
+      masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+  __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+}
 
-  __ mov(r1, Operand(Smi::FromInt(NONE)));  // PropertyAttributes
-  __ mov(r0, Operand(Smi::FromInt(
-      Code::ExtractExtraICStateFromFlags(flags) & kStrictMode)));
-  __ Push(r1, r0);
 
-  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- lr    : return address
+  //  -- r0    : key
+  //  -- r1    : receiver
+  // -----------------------------------
+  Label miss_force_generic;
 
-  return GetCode(flags);
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(r0, &miss_force_generic);
+
+  // Get the elements array.
+  __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
+  __ AssertFastElements(r2);
+
+  // Check that the key is within bounds.
+  __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
+  __ cmp(r0, Operand(r3));
+  __ b(hs, &miss_force_generic);
+
+  // Load the result and make sure it's not the hole.
+  __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  __ ldr(r4,
+         MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+  __ cmp(r4, ip);
+  __ b(eq, &miss_force_generic);
+  __ mov(r0, r4);
+  __ Ret();
+
+  __ bind(&miss_force_generic);
+  Code* stub = masm->isolate()->builtins()->builtin(
+      Builtins::kKeyedLoadIC_MissForceGeneric);
+  __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
+                                                      bool is_js_array) {
+  // ----------- S t a t e -------------
+  //  -- r0    : value
+  //  -- r1    : key
+  //  -- r2    : receiver
+  //  -- lr    : return address
+  //  -- r3    : scratch
+  //  -- r4    : scratch (elements)
+  // -----------------------------------
+  Label miss_force_generic;
+
+  Register value_reg = r0;
+  Register key_reg = r1;
+  Register receiver_reg = r2;
+  Register scratch = r3;
+  Register elements_reg = r4;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(r0, &miss_force_generic);
+
+  // Get the elements array and make sure it is a fast element array, not 'cow'.
+  __ ldr(elements_reg,
+         FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+  __ CheckMap(elements_reg,
+              scratch,
+              Heap::kFixedArrayMapRootIndex,
+              &miss_force_generic,
+              DONT_DO_SMI_CHECK);
+
+  // Check that the key is within bounds.
+  if (is_js_array) {
+    __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+  } else {
+    __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+  }
+  // Compare smis.
+  __ cmp(key_reg, scratch);
+  __ b(hs, &miss_force_generic);
+
+  __ add(scratch,
+         elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  __ str(value_reg,
+         MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ RecordWrite(scratch,
+                 Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize),
+                 receiver_reg , elements_reg);
+
+  // value_reg (r0) is preserved.
+  // Done.
+  __ Ret();
+
+  __ bind(&miss_force_generic);
+  Handle<Code> ic =
+      masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
 }
 
 
diff --git a/src/array.js b/src/array.js
index 6ed1476..df080a7 100644
--- a/src/array.js
+++ b/src/array.js
@@ -67,6 +67,25 @@
 }
 
 
+function SparseJoinWithSeparator(array, len, convert, separator) {
+  var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
+  var totalLength = 0;
+  var elements = new InternalArray(keys.length * 2);
+  var previousKey = -1;
+  for (var i = 0; i < keys.length; i++) {
+    var key = keys[i];
+    if (key != previousKey) {  // keys may contain duplicates.
+      var e = array[key];
+      if (!IS_STRING(e)) e = convert(e);
+      elements[i * 2] = key;
+      elements[i * 2 + 1] = e;
+      previousKey = key;
+    }
+  }
+  return %SparseJoinWithSeparator(elements, len, separator);
+}
+
+
 // Optimized for sparse arrays if separator is ''.
 function SparseJoin(array, len, convert) {
   var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
@@ -110,8 +129,12 @@
 
   // Attempt to convert the elements.
   try {
-    if (UseSparseVariant(array, length, is_array) && (separator.length == 0)) {
-      return SparseJoin(array, length, convert);
+    if (UseSparseVariant(array, length, is_array)) {
+      if (separator.length == 0) {
+        return SparseJoin(array, length, convert);
+      } else {
+        return SparseJoinWithSeparator(array, length, convert, separator);
+      }
     }
 
     // Fast case for one-element arrays.
@@ -129,10 +152,8 @@
       var elements_length = 0;
       for (var i = 0; i < length; i++) {
         var e = array[i];
-        if (!IS_UNDEFINED(e)) {
-          if (!IS_STRING(e)) e = convert(e);
-          elements[elements_length++] = e;
-        }
+        if (!IS_STRING(e)) e = convert(e);
+        elements[elements_length++] = e;
       }
       elements.length = elements_length;
       var result = %_FastAsciiArrayJoin(elements, '');
@@ -151,11 +172,12 @@
     } else {
       for (var i = 0; i < length; i++) {
         var e = array[i];
-        if (IS_NUMBER(e)) elements[i] = %_NumberToString(e);
-        else {
-          if (!IS_STRING(e)) e = convert(e);
+          if (IS_NUMBER(e)) {
+            e = %_NumberToString(e);
+          } else if (!IS_STRING(e)) {
+            e = convert(e);
+          }
           elements[i] = e;
-        }
       }
     }
     var result = %_FastAsciiArrayJoin(elements, separator);
@@ -375,6 +397,11 @@
 
 
 function ArrayJoin(separator) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.join"]);
+  }
+
   if (IS_UNDEFINED(separator)) {
     separator = ',';
   } else if (!IS_STRING(separator)) {
@@ -391,6 +418,11 @@
 // Removes the last element from the array and returns it. See
 // ECMA-262, section 15.4.4.6.
 function ArrayPop() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.pop"]);
+  }
+
   var n = TO_UINT32(this.length);
   if (n == 0) {
     this.length = n;
@@ -407,6 +439,11 @@
 // Appends the arguments to the end of the array and returns the new
 // length of the array. See ECMA-262, section 15.4.4.7.
 function ArrayPush() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.push"]);
+  }
+
   var n = TO_UINT32(this.length);
   var m = %_ArgumentsLength();
   for (var i = 0; i < m; i++) {
@@ -418,6 +455,11 @@
 
 
 function ArrayConcat(arg1) {  // length == 1
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.concat"]);
+  }
+
   var arg_count = %_ArgumentsLength();
   var arrays = new InternalArray(1 + arg_count);
   arrays[0] = this;
@@ -474,6 +516,11 @@
 
 
 function ArrayReverse() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.reverse"]);
+  }
+
   var j = TO_UINT32(this.length) - 1;
 
   if (UseSparseVariant(this, j, IS_ARRAY(this))) {
@@ -505,6 +552,11 @@
 
 
 function ArrayShift() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.shift"]);
+  }
+
   var len = TO_UINT32(this.length);
 
   if (len === 0) {
@@ -526,6 +578,11 @@
 
 
 function ArrayUnshift(arg1) {  // length == 1
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.unshift"]);
+  }
+
   var len = TO_UINT32(this.length);
   var num_arguments = %_ArgumentsLength();
 
@@ -545,6 +602,11 @@
 
 
 function ArraySlice(start, end) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.slice"]);
+  }
+
   var len = TO_UINT32(this.length);
   var start_i = TO_INTEGER(start);
   var end_i = len;
@@ -582,6 +644,11 @@
 
 
 function ArraySplice(start, delete_count) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.splice"]);
+  }
+
   var num_arguments = %_ArgumentsLength();
 
   var len = TO_UINT32(this.length);
@@ -653,6 +720,11 @@
 
 
 function ArraySort(comparefn) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.sort"]);
+  }
+
   // In-place QuickSort algorithm.
   // For short (length <= 22) arrays, insertion sort is used for efficiency.
 
@@ -914,6 +986,11 @@
 // preserving the semantics, since the calls to the receiver function can add
 // or delete elements from the array.
 function ArrayFilter(f, receiver) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.filter"]);
+  }
+
   if (!IS_FUNCTION(f)) {
     throw MakeTypeError('called_non_callable', [ f ]);
   }
@@ -935,6 +1012,11 @@
 
 
 function ArrayForEach(f, receiver) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.forEach"]);
+  }
+
   if (!IS_FUNCTION(f)) {
     throw MakeTypeError('called_non_callable', [ f ]);
   }
@@ -953,6 +1035,11 @@
 // Executes the function once for each element present in the
 // array until it finds one where callback returns true.
 function ArraySome(f, receiver) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.some"]);
+  }
+
   if (!IS_FUNCTION(f)) {
     throw MakeTypeError('called_non_callable', [ f ]);
   }
@@ -970,6 +1057,11 @@
 
 
 function ArrayEvery(f, receiver) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.every"]);
+  }
+
   if (!IS_FUNCTION(f)) {
     throw MakeTypeError('called_non_callable', [ f ]);
   }
@@ -986,6 +1078,11 @@
 }
 
 function ArrayMap(f, receiver) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.map"]);
+  }
+
   if (!IS_FUNCTION(f)) {
     throw MakeTypeError('called_non_callable', [ f ]);
   }
@@ -1006,6 +1103,11 @@
 
 
 function ArrayIndexOf(element, index) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.indexOf"]);
+  }
+
   var length = TO_UINT32(this.length);
   if (length == 0) return -1;
   if (IS_UNDEFINED(index)) {
@@ -1063,6 +1165,11 @@
 
 
 function ArrayLastIndexOf(element, index) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.lastIndexOf"]);
+  }
+
   var length = TO_UINT32(this.length);
   if (length == 0) return -1;
   if (%_ArgumentsLength() < 2) {
@@ -1116,6 +1223,11 @@
 
 
 function ArrayReduce(callback, current) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.reduce"]);
+  }
+
   if (!IS_FUNCTION(callback)) {
     throw MakeTypeError('called_non_callable', [callback]);
   }
@@ -1145,6 +1257,11 @@
 }
 
 function ArrayReduceRight(callback, current) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Array.prototype.reduceRight"]);
+  }
+
   if (!IS_FUNCTION(callback)) {
     throw MakeTypeError('called_non_callable', [callback]);
   }
diff --git a/src/assembler.cc b/src/assembler.cc
index bfecc77..3c7fc1c 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 #include "v8.h"
 
@@ -69,6 +69,8 @@
 const double DoubleConstant::min_int = kMinInt;
 const double DoubleConstant::one_half = 0.5;
 const double DoubleConstant::minus_zero = -0.0;
+const double DoubleConstant::uint8_max_value = 255;
+const double DoubleConstant::zero = 0.0;
 const double DoubleConstant::nan = OS::nan_value();
 const double DoubleConstant::negative_infinity = -V8_INFINITY;
 const char* RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
@@ -99,58 +101,85 @@
 // -----------------------------------------------------------------------------
 // Implementation of RelocInfoWriter and RelocIterator
 //
+// Relocation information is written backwards in memory, from high addresses
+// towards low addresses, byte by byte.  Therefore, in the encodings listed
+// below, the first byte listed it at the highest address, and successive
+// bytes in the record are at progressively lower addresses.
+//
 // Encoding
 //
 // The most common modes are given single-byte encodings.  Also, it is
 // easy to identify the type of reloc info and skip unwanted modes in
 // an iteration.
 //
-// The encoding relies on the fact that there are less than 14
-// different relocation modes.
+// The encoding relies on the fact that there are fewer than 14
+// different non-compactly encoded relocation modes.
 //
-// embedded_object:    [6 bits pc delta] 00
+// The first byte of a relocation record has a tag in its low 2 bits:
+// Here are the record schemes, depending on the low tag and optional higher
+// tags.
 //
-// code_taget:         [6 bits pc delta] 01
+// Low tag:
+//   00: embedded_object:      [6-bit pc delta] 00
 //
-// position:           [6 bits pc delta] 10,
-//                     [7 bits signed data delta] 0
+//   01: code_target:          [6-bit pc delta] 01
 //
-// statement_position: [6 bits pc delta] 10,
-//                     [7 bits signed data delta] 1
+//   10: short_data_record:    [6-bit pc delta] 10 followed by
+//                             [6-bit data delta] [2-bit data type tag]
 //
-// any nondata mode:   00 [4 bits rmode] 11,  // rmode: 0..13 only
-//                     00 [6 bits pc delta]
+//   11: long_record           [2-bit high tag][4 bit middle_tag] 11
+//                             followed by variable data depending on type.
 //
-// pc-jump:            00 1111 11,
-//                     00 [6 bits pc delta]
+//  2-bit data type tags, used in short_data_record and data_jump long_record:
+//   code_target_with_id: 00
+//   position:            01
+//   statement_position:  10
+//   comment:             11 (not used in short_data_record)
 //
-// pc-jump:            01 1111 11,
-// (variable length)   7 - 26 bit pc delta, written in chunks of 7
-//                     bits, the lowest 7 bits written first.
+//  Long record format:
+//    4-bit middle_tag:
+//      0000 - 1100 : Short record for RelocInfo::Mode middle_tag + 2
+//         (The middle_tag encodes rmode - RelocInfo::LAST_COMPACT_ENUM,
+//          and is between 0000 and 1100)
+//        The format is:
+//                              00 [4 bit middle_tag] 11 followed by
+//                              00 [6 bit pc delta]
 //
-// data-jump + pos:    00 1110 11,
-//                     signed intptr_t, lowest byte written first
+//      1101: not used (would allow one more relocation mode to be added)
+//      1110: long_data_record
+//        The format is:       [2-bit data_type_tag] 1110 11
+//                             signed intptr_t, lowest byte written first
+//                             (except data_type code_target_with_id, which
+//                             is followed by a signed int, not intptr_t.)
 //
-// data-jump + st.pos: 01 1110 11,
-//                     signed intptr_t, lowest byte written first
-//
-// data-jump + comm.:  10 1110 11,
-//                     signed intptr_t, lowest byte written first
-//
+//      1111: long_pc_jump
+//        The format is:
+//          pc-jump:             00 1111 11,
+//                               00 [6 bits pc delta]
+//        or
+//          pc-jump (variable length):
+//                               01 1111 11,
+//                               [7 bits data] 0
+//                                  ...
+//                               [7 bits data] 1
+//               (Bits 6..31 of pc delta, with leading zeroes
+//                dropped, and last non-zero chunk tagged with 1.)
+
+
 const int kMaxRelocModes = 14;
 
 const int kTagBits = 2;
 const int kTagMask = (1 << kTagBits) - 1;
 const int kExtraTagBits = 4;
-const int kPositionTypeTagBits = 1;
-const int kSmallDataBits = kBitsPerByte - kPositionTypeTagBits;
+const int kLocatableTypeTagBits = 2;
+const int kSmallDataBits = kBitsPerByte - kLocatableTypeTagBits;
 
 const int kEmbeddedObjectTag = 0;
 const int kCodeTargetTag = 1;
-const int kPositionTag = 2;
+const int kLocatableTag = 2;
 const int kDefaultTag = 3;
 
-const int kPCJumpTag = (1 << kExtraTagBits) - 1;
+const int kPCJumpExtraTag = (1 << kExtraTagBits) - 1;
 
 const int kSmallPCDeltaBits = kBitsPerByte - kTagBits;
 const int kSmallPCDeltaMask = (1 << kSmallPCDeltaBits) - 1;
@@ -164,11 +193,12 @@
 const int kLastChunkTag = 1;
 
 
-const int kDataJumpTag = kPCJumpTag - 1;
+const int kDataJumpExtraTag = kPCJumpExtraTag - 1;
 
-const int kNonstatementPositionTag = 0;
-const int kStatementPositionTag = 1;
-const int kCommentTag = 2;
+const int kCodeWithIdTag = 0;
+const int kNonstatementPositionTag = 1;
+const int kStatementPositionTag = 2;
+const int kCommentTag = 3;
 
 
 uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) {
@@ -176,7 +206,7 @@
   // Otherwise write a variable length PC jump for the bits that do
   // not fit in the kSmallPCDeltaBits bits.
   if (is_uintn(pc_delta, kSmallPCDeltaBits)) return pc_delta;
-  WriteExtraTag(kPCJumpTag, kVariableLengthPCJumpTopTag);
+  WriteExtraTag(kPCJumpExtraTag, kVariableLengthPCJumpTopTag);
   uint32_t pc_jump = pc_delta >> kSmallPCDeltaBits;
   ASSERT(pc_jump > 0);
   // Write kChunkBits size chunks of the pc_jump.
@@ -199,7 +229,7 @@
 
 
 void RelocInfoWriter::WriteTaggedData(intptr_t data_delta, int tag) {
-  *--pos_ = static_cast<byte>(data_delta << kPositionTypeTagBits | tag);
+  *--pos_ = static_cast<byte>(data_delta << kLocatableTypeTagBits | tag);
 }
 
 
@@ -218,11 +248,20 @@
 }
 
 
+void RelocInfoWriter::WriteExtraTaggedIntData(int data_delta, int top_tag) {
+  WriteExtraTag(kDataJumpExtraTag, top_tag);
+  for (int i = 0; i < kIntSize; i++) {
+    *--pos_ = static_cast<byte>(data_delta);
+    // Signed right shift is arithmetic shift.  Tested in test-utils.cc.
+    data_delta = data_delta >> kBitsPerByte;
+  }
+}
+
 void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) {
-  WriteExtraTag(kDataJumpTag, top_tag);
+  WriteExtraTag(kDataJumpExtraTag, top_tag);
   for (int i = 0; i < kIntptrSize; i++) {
     *--pos_ = static_cast<byte>(data_delta);
-  // Signed right shift is arithmetic shift.  Tested in test-utils.cc.
+    // Signed right shift is arithmetic shift.  Tested in test-utils.cc.
     data_delta = data_delta >> kBitsPerByte;
   }
 }
@@ -233,7 +272,8 @@
   byte* begin_pos = pos_;
 #endif
   ASSERT(rinfo->pc() - last_pc_ >= 0);
-  ASSERT(RelocInfo::NUMBER_OF_MODES <= kMaxRelocModes);
+  ASSERT(RelocInfo::NUMBER_OF_MODES - RelocInfo::LAST_COMPACT_ENUM <=
+         kMaxRelocModes);
   // Use unsigned delta-encoding for pc.
   uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_);
   RelocInfo::Mode rmode = rinfo->rmode();
@@ -244,35 +284,48 @@
   } else if (rmode == RelocInfo::CODE_TARGET) {
     WriteTaggedPC(pc_delta, kCodeTargetTag);
     ASSERT(begin_pos - pos_ <= RelocInfo::kMaxCallSize);
-  } else if (RelocInfo::IsPosition(rmode)) {
-    // Use signed delta-encoding for data.
-    intptr_t data_delta = rinfo->data() - last_data_;
-    int pos_type_tag = rmode == RelocInfo::POSITION ? kNonstatementPositionTag
-                                                    : kStatementPositionTag;
-    // Check if data is small enough to fit in a tagged byte.
-    // We cannot use is_intn because data_delta is not an int32_t.
-    if (data_delta >= -(1 << (kSmallDataBits-1)) &&
-        data_delta < 1 << (kSmallDataBits-1)) {
-      WriteTaggedPC(pc_delta, kPositionTag);
-      WriteTaggedData(data_delta, pos_type_tag);
-      last_data_ = rinfo->data();
+  } else if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
+    // Use signed delta-encoding for id.
+    ASSERT(static_cast<int>(rinfo->data()) == rinfo->data());
+    int id_delta = static_cast<int>(rinfo->data()) - last_id_;
+    // Check if delta is small enough to fit in a tagged byte.
+    if (is_intn(id_delta, kSmallDataBits)) {
+      WriteTaggedPC(pc_delta, kLocatableTag);
+      WriteTaggedData(id_delta, kCodeWithIdTag);
     } else {
       // Otherwise, use costly encoding.
-      WriteExtraTaggedPC(pc_delta, kPCJumpTag);
-      WriteExtraTaggedData(data_delta, pos_type_tag);
-      last_data_ = rinfo->data();
+      WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
+      WriteExtraTaggedIntData(id_delta, kCodeWithIdTag);
     }
+    last_id_ = static_cast<int>(rinfo->data());
+  } else if (RelocInfo::IsPosition(rmode)) {
+    // Use signed delta-encoding for position.
+    ASSERT(static_cast<int>(rinfo->data()) == rinfo->data());
+    int pos_delta = static_cast<int>(rinfo->data()) - last_position_;
+    int pos_type_tag = (rmode == RelocInfo::POSITION) ? kNonstatementPositionTag
+                                                      : kStatementPositionTag;
+    // Check if delta is small enough to fit in a tagged byte.
+    if (is_intn(pos_delta, kSmallDataBits)) {
+      WriteTaggedPC(pc_delta, kLocatableTag);
+      WriteTaggedData(pos_delta, pos_type_tag);
+    } else {
+      // Otherwise, use costly encoding.
+      WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
+      WriteExtraTaggedIntData(pos_delta, pos_type_tag);
+    }
+    last_position_ = static_cast<int>(rinfo->data());
   } else if (RelocInfo::IsComment(rmode)) {
     // Comments are normally not generated, so we use the costly encoding.
-    WriteExtraTaggedPC(pc_delta, kPCJumpTag);
-    WriteExtraTaggedData(rinfo->data() - last_data_, kCommentTag);
-    last_data_ = rinfo->data();
+    WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
+    WriteExtraTaggedData(rinfo->data(), kCommentTag);
     ASSERT(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize);
   } else {
+    ASSERT(rmode > RelocInfo::LAST_COMPACT_ENUM);
+    int saved_mode = rmode - RelocInfo::LAST_COMPACT_ENUM;
     // For all other modes we simply use the mode as the extra tag.
     // None of these modes need a data component.
-    ASSERT(rmode < kPCJumpTag && rmode < kDataJumpTag);
-    WriteExtraTaggedPC(pc_delta, rmode);
+    ASSERT(saved_mode < kPCJumpExtraTag && saved_mode < kDataJumpExtraTag);
+    WriteExtraTaggedPC(pc_delta, saved_mode);
   }
   last_pc_ = rinfo->pc();
 #ifdef DEBUG
@@ -306,12 +359,32 @@
 }
 
 
+void RelocIterator::AdvanceReadId() {
+  int x = 0;
+  for (int i = 0; i < kIntSize; i++) {
+    x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
+  }
+  last_id_ += x;
+  rinfo_.data_ = last_id_;
+}
+
+
+void RelocIterator::AdvanceReadPosition() {
+  int x = 0;
+  for (int i = 0; i < kIntSize; i++) {
+    x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
+  }
+  last_position_ += x;
+  rinfo_.data_ = last_position_;
+}
+
+
 void RelocIterator::AdvanceReadData() {
   intptr_t x = 0;
   for (int i = 0; i < kIntptrSize; i++) {
     x |= static_cast<intptr_t>(*--pos_) << i * kBitsPerByte;
   }
-  rinfo_.data_ += x;
+  rinfo_.data_ = x;
 }
 
 
@@ -331,27 +404,33 @@
 }
 
 
-inline int RelocIterator::GetPositionTypeTag() {
-  return *pos_ & ((1 << kPositionTypeTagBits) - 1);
+inline int RelocIterator::GetLocatableTypeTag() {
+  return *pos_ & ((1 << kLocatableTypeTagBits) - 1);
 }
 
 
-inline void RelocIterator::ReadTaggedData() {
+inline void RelocIterator::ReadTaggedId() {
   int8_t signed_b = *pos_;
   // Signed right shift is arithmetic shift.  Tested in test-utils.cc.
-  rinfo_.data_ += signed_b >> kPositionTypeTagBits;
+  last_id_ += signed_b >> kLocatableTypeTagBits;
+  rinfo_.data_ = last_id_;
 }
 
 
-inline RelocInfo::Mode RelocIterator::DebugInfoModeFromTag(int tag) {
-  if (tag == kStatementPositionTag) {
-    return RelocInfo::STATEMENT_POSITION;
-  } else if (tag == kNonstatementPositionTag) {
-    return RelocInfo::POSITION;
-  } else {
-    ASSERT(tag == kCommentTag);
-    return RelocInfo::COMMENT;
-  }
+inline void RelocIterator::ReadTaggedPosition() {
+  int8_t signed_b = *pos_;
+  // Signed right shift is arithmetic shift.  Tested in test-utils.cc.
+  last_position_ += signed_b >> kLocatableTypeTagBits;
+  rinfo_.data_ = last_position_;
+}
+
+
+static inline RelocInfo::Mode GetPositionModeFromTag(int tag) {
+  ASSERT(tag == kNonstatementPositionTag ||
+         tag == kStatementPositionTag);
+  return (tag == kNonstatementPositionTag) ?
+         RelocInfo::POSITION :
+         RelocInfo::STATEMENT_POSITION;
 }
 
 
@@ -370,37 +449,64 @@
     } else if (tag == kCodeTargetTag) {
       ReadTaggedPC();
       if (SetMode(RelocInfo::CODE_TARGET)) return;
-    } else if (tag == kPositionTag) {
+    } else if (tag == kLocatableTag) {
       ReadTaggedPC();
       Advance();
-      // Check if we want source positions.
-      if (mode_mask_ & RelocInfo::kPositionMask) {
-        ReadTaggedData();
-        if (SetMode(DebugInfoModeFromTag(GetPositionTypeTag()))) return;
+      int locatable_tag = GetLocatableTypeTag();
+      if (locatable_tag == kCodeWithIdTag) {
+        if (SetMode(RelocInfo::CODE_TARGET_WITH_ID)) {
+          ReadTaggedId();
+          return;
+        }
+      } else {
+        // Compact encoding is never used for comments,
+        // so it must be a position.
+        ASSERT(locatable_tag == kNonstatementPositionTag ||
+               locatable_tag == kStatementPositionTag);
+        if (mode_mask_ & RelocInfo::kPositionMask) {
+          ReadTaggedPosition();
+          if (SetMode(GetPositionModeFromTag(locatable_tag))) return;
+        }
       }
     } else {
       ASSERT(tag == kDefaultTag);
       int extra_tag = GetExtraTag();
-      if (extra_tag == kPCJumpTag) {
+      if (extra_tag == kPCJumpExtraTag) {
         int top_tag = GetTopTag();
         if (top_tag == kVariableLengthPCJumpTopTag) {
           AdvanceReadVariableLengthPCJump();
         } else {
           AdvanceReadPC();
         }
-      } else if (extra_tag == kDataJumpTag) {
-        // Check if we want debug modes (the only ones with data).
-        if (mode_mask_ & RelocInfo::kDebugMask) {
-          int top_tag = GetTopTag();
-          AdvanceReadData();
-          if (SetMode(DebugInfoModeFromTag(top_tag))) return;
+      } else if (extra_tag == kDataJumpExtraTag) {
+        int locatable_tag = GetTopTag();
+        if (locatable_tag == kCodeWithIdTag) {
+          if (SetMode(RelocInfo::CODE_TARGET_WITH_ID)) {
+            AdvanceReadId();
+            return;
+          }
+          Advance(kIntSize);
+        } else if (locatable_tag != kCommentTag) {
+          ASSERT(locatable_tag == kNonstatementPositionTag ||
+                 locatable_tag == kStatementPositionTag);
+          if (mode_mask_ & RelocInfo::kPositionMask) {
+            AdvanceReadPosition();
+            if (SetMode(GetPositionModeFromTag(locatable_tag))) return;
+          } else {
+            Advance(kIntSize);
+          }
         } else {
-          // Otherwise, just skip over the data.
+          ASSERT(locatable_tag == kCommentTag);
+          if (SetMode(RelocInfo::COMMENT)) {
+            AdvanceReadData();
+            return;
+          }
           Advance(kIntptrSize);
         }
       } else {
         AdvanceReadPC();
-        if (SetMode(static_cast<RelocInfo::Mode>(extra_tag))) return;
+        int rmode = extra_tag + RelocInfo::LAST_COMPACT_ENUM;
+        if (SetMode(static_cast<RelocInfo::Mode>(rmode))) return;
       }
     }
   }
@@ -416,6 +522,8 @@
   end_ = code->relocation_start();
   done_ = false;
   mode_mask_ = mode_mask;
+  last_id_ = 0;
+  last_position_ = 0;
   if (mode_mask_ == 0) pos_ = end_;
   next();
 }
@@ -429,6 +537,8 @@
   end_ = pos_ - desc.reloc_size;
   done_ = false;
   mode_mask_ = mode_mask;
+  last_id_ = 0;
+  last_position_ = 0;
   if (mode_mask_ == 0) pos_ = end_;
   next();
 }
@@ -456,6 +566,8 @@
       return "debug break";
     case RelocInfo::CODE_TARGET:
       return "code target";
+    case RelocInfo::CODE_TARGET_WITH_ID:
+      return "code target with id";
     case RelocInfo::GLOBAL_PROPERTY_CELL:
       return "global property cell";
     case RelocInfo::RUNTIME_ENTRY:
@@ -502,6 +614,9 @@
     Code* code = Code::GetCodeFromTargetAddress(target_address());
     PrintF(out, " (%s)  (%p)", Code::Kind2String(code->kind()),
            target_address());
+    if (rmode_ == CODE_TARGET_WITH_ID) {
+      PrintF(" (id=%d)", static_cast<int>(data_));
+    }
   } else if (IsPosition(rmode_)) {
     PrintF(out, "  (%" V8_PTR_PREFIX "d)", data());
   } else if (rmode_ == RelocInfo::RUNTIME_ENTRY &&
@@ -535,6 +650,7 @@
 #endif
     case CONSTRUCT_CALL:
     case CODE_TARGET_CONTEXT:
+    case CODE_TARGET_WITH_ID:
     case CODE_TARGET: {
       // convert inline target address to code object
       Address addr = target_address();
@@ -787,6 +903,18 @@
 }
 
 
+ExternalReference ExternalReference::address_of_zero() {
+  return ExternalReference(reinterpret_cast<void*>(
+      const_cast<double*>(&DoubleConstant::zero)));
+}
+
+
+ExternalReference ExternalReference::address_of_uint8_max_value() {
+  return ExternalReference(reinterpret_cast<void*>(
+      const_cast<double*>(&DoubleConstant::uint8_max_value)));
+}
+
+
 ExternalReference ExternalReference::address_of_negative_infinity() {
   return ExternalReference(reinterpret_cast<void*>(
       const_cast<double*>(&DoubleConstant::negative_infinity)));
@@ -899,7 +1027,7 @@
     Isolate* isolate) {
   return ExternalReference(Redirect(isolate,
                                     FUNCTION_ADDR(math_sin_double),
-                                    FP_RETURN_CALL));
+                                    BUILTIN_FP_CALL));
 }
 
 
@@ -907,7 +1035,7 @@
     Isolate* isolate) {
   return ExternalReference(Redirect(isolate,
                                     FUNCTION_ADDR(math_cos_double),
-                                    FP_RETURN_CALL));
+                                    BUILTIN_FP_CALL));
 }
 
 
@@ -915,7 +1043,7 @@
     Isolate* isolate) {
   return ExternalReference(Redirect(isolate,
                                     FUNCTION_ADDR(math_log_double),
-                                    FP_RETURN_CALL));
+                                    BUILTIN_FP_CALL));
 }
 
 
@@ -958,7 +1086,7 @@
     Isolate* isolate) {
   return ExternalReference(Redirect(isolate,
                                     FUNCTION_ADDR(power_double_double),
-                                    FP_RETURN_CALL));
+                                    BUILTIN_FP_FP_CALL));
 }
 
 
@@ -966,7 +1094,7 @@
     Isolate* isolate) {
   return ExternalReference(Redirect(isolate,
                                     FUNCTION_ADDR(power_double_int),
-                                    FP_RETURN_CALL));
+                                    BUILTIN_FP_INT_CALL));
 }
 
 
@@ -999,17 +1127,16 @@
     default:
       UNREACHABLE();
   }
-  // Passing true as 2nd parameter indicates that they return an fp value.
   return ExternalReference(Redirect(isolate,
                                     FUNCTION_ADDR(function),
-                                    FP_RETURN_CALL));
+                                    BUILTIN_FP_FP_CALL));
 }
 
 
 ExternalReference ExternalReference::compare_doubles(Isolate* isolate) {
   return ExternalReference(Redirect(isolate,
                                     FUNCTION_ADDR(native_compare_doubles),
-                                    BUILTIN_CALL));
+                                    BUILTIN_COMPARE_CALL));
 }
 
 
diff --git a/src/assembler.h b/src/assembler.h
index 395bbd5..29f1ea9 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -35,6 +35,7 @@
 #ifndef V8_ASSEMBLER_H_
 #define V8_ASSEMBLER_H_
 
+#include "allocation.h"
 #include "gdb-jit.h"
 #include "runtime.h"
 #include "token.h"
@@ -42,7 +43,7 @@
 namespace v8 {
 namespace internal {
 
-
+const unsigned kNoASTId = -1;
 // -----------------------------------------------------------------------------
 // Platform independent assembler base class.
 
@@ -66,6 +67,8 @@
   static const double min_int;
   static const double one_half;
   static const double minus_zero;
+  static const double zero;
+  static const double uint8_max_value;
   static const double negative_infinity;
   static const double nan;
 };
@@ -79,18 +82,28 @@
 
 class Label BASE_EMBEDDED {
  public:
-  INLINE(Label())                 { Unuse(); }
+  enum Distance {
+    kNear, kFar
+  };
+
+  INLINE(Label()) {
+    Unuse();
+    UnuseNear();
+  }
   INLINE(~Label())                { ASSERT(!is_linked()); }
 
   INLINE(void Unuse())            { pos_ = 0; }
+  INLINE(void UnuseNear())        { near_link_pos_ = 0; }
 
   INLINE(bool is_bound() const)  { return pos_ <  0; }
-  INLINE(bool is_unused() const)  { return pos_ == 0; }
+  INLINE(bool is_unused() const)  { return pos_ == 0 && near_link_pos_ == 0; }
   INLINE(bool is_linked() const)  { return pos_ >  0; }
+  INLINE(bool is_near_linked() const) { return near_link_pos_ > 0; }
 
   // Returns the position of bound or linked labels. Cannot be used
   // for unused labels.
   int pos() const;
+  int near_link_pos() const { return near_link_pos_ - 1; }
 
  private:
   // pos_ encodes both the binding state (via its sign)
@@ -101,13 +114,21 @@
   // pos_ >  0  linked label, pos() returns the last reference position
   int pos_;
 
+  // Behaves like |pos_| in the "> 0" case, but for near jumps to this label.
+  int near_link_pos_;
+
   void bind_to(int pos)  {
     pos_ = -pos - 1;
     ASSERT(is_bound());
   }
-  void link_to(int pos)  {
-    pos_ =  pos + 1;
-    ASSERT(is_linked());
+  void link_to(int pos, Distance distance = kFar) {
+    if (distance == kNear) {
+      near_link_pos_ = pos + 1;
+      ASSERT(is_near_linked());
+    } else {
+      pos_ = pos + 1;
+      ASSERT(is_linked());
+    }
   }
 
   friend class Assembler;
@@ -118,57 +139,6 @@
 
 
 // -----------------------------------------------------------------------------
-// NearLabels are labels used for short jumps (in Intel jargon).
-// NearLabels should be used if it can be guaranteed that the jump range is
-// within -128 to +127. We already use short jumps when jumping backwards,
-// so using a NearLabel will only have performance impact if used for forward
-// jumps.
-class NearLabel BASE_EMBEDDED {
- public:
-  NearLabel() { Unuse(); }
-  ~NearLabel() { ASSERT(!is_linked()); }
-
-  void Unuse() {
-    pos_ = -1;
-    unresolved_branches_ = 0;
-#ifdef DEBUG
-    for (int i = 0; i < kMaxUnresolvedBranches; i++) {
-      unresolved_positions_[i] = -1;
-    }
-#endif
-  }
-
-  int pos() {
-    ASSERT(is_bound());
-    return pos_;
-  }
-
-  bool is_bound() { return pos_ >= 0; }
-  bool is_linked() { return !is_bound() && unresolved_branches_ > 0; }
-  bool is_unused() { return !is_bound() && unresolved_branches_ == 0; }
-
-  void bind_to(int position) {
-    ASSERT(!is_bound());
-    pos_ = position;
-  }
-
-  void link_to(int position) {
-    ASSERT(!is_bound());
-    ASSERT(unresolved_branches_ < kMaxUnresolvedBranches);
-    unresolved_positions_[unresolved_branches_++] = position;
-  }
-
- private:
-  static const int kMaxUnresolvedBranches = 8;
-  int pos_;
-  int unresolved_branches_;
-  int unresolved_positions_[kMaxUnresolvedBranches];
-
-  friend class Assembler;
-};
-
-
-// -----------------------------------------------------------------------------
 // Relocation information
 
 
@@ -211,10 +181,11 @@
 
   enum Mode {
     // Please note the order is important (see IsCodeTarget, IsGCRelocMode).
+    CODE_TARGET,  // Code target which is not any of the above.
+    CODE_TARGET_WITH_ID,
     CONSTRUCT_CALL,  // code target that is a call to a JavaScript constructor.
     CODE_TARGET_CONTEXT,  // Code target used for contextual loads and stores.
     DEBUG_BREAK,  // Code target for the debugger statement.
-    CODE_TARGET,  // Code target which is not any of the above.
     EMBEDDED_OBJECT,
     GLOBAL_PROPERTY_CELL,
 
@@ -230,10 +201,12 @@
 
     // add more as needed
     // Pseudo-types
-    NUMBER_OF_MODES,  // must be no greater than 14 - see RelocInfoWriter
+    NUMBER_OF_MODES,  // There are at most 14 modes with noncompact encoding.
     NONE,  // never recorded
-    LAST_CODE_ENUM = CODE_TARGET,
-    LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL
+    LAST_CODE_ENUM = DEBUG_BREAK,
+    LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL,
+    // Modes <= LAST_COMPACT_ENUM are guaranteed to have compact encoding.
+    LAST_COMPACT_ENUM = CODE_TARGET_WITH_ID
   };
 
 
@@ -363,7 +336,8 @@
 
   static const int kCodeTargetMask = (1 << (LAST_CODE_ENUM + 1)) - 1;
   static const int kPositionMask = 1 << POSITION | 1 << STATEMENT_POSITION;
-  static const int kDebugMask = kPositionMask | 1 << COMMENT;
+  static const int kDataMask =
+      (1 << CODE_TARGET_WITH_ID) | kPositionMask | (1 << COMMENT);
   static const int kApplyMask;  // Modes affected by apply. Depends on arch.
 
  private:
@@ -374,6 +348,19 @@
   byte* pc_;
   Mode rmode_;
   intptr_t data_;
+#ifdef V8_TARGET_ARCH_MIPS
+  // Code and Embedded Object pointers in mips are stored split
+  // across two consecutive 32-bit instructions. Heap management
+  // routines expect to access these pointers indirectly. The following
+  // location provides a place for these pointers to exist natually
+  // when accessed via the Iterator.
+  Object *reconstructed_obj_ptr_;
+  // External-reference pointers are also split across instruction-pairs
+  // in mips, but are accessed via indirect pointers. This location
+  // provides a place for that pointer to exist naturally. Its address
+  // is returned by RelocInfo::target_reference_address().
+  Address reconstructed_adr_ptr_;
+#endif  // V8_TARGET_ARCH_MIPS
   friend class RelocIterator;
 };
 
@@ -382,9 +369,14 @@
 // lower addresses.
 class RelocInfoWriter BASE_EMBEDDED {
  public:
-  RelocInfoWriter() : pos_(NULL), last_pc_(NULL), last_data_(0) {}
-  RelocInfoWriter(byte* pos, byte* pc) : pos_(pos), last_pc_(pc),
-                                         last_data_(0) {}
+  RelocInfoWriter() : pos_(NULL),
+                      last_pc_(NULL),
+                      last_id_(0),
+                      last_position_(0) {}
+  RelocInfoWriter(byte* pos, byte* pc) : pos_(pos),
+                                         last_pc_(pc),
+                                         last_id_(0),
+                                         last_position_(0) {}
 
   byte* pos() const { return pos_; }
   byte* last_pc() const { return last_pc_; }
@@ -409,13 +401,15 @@
   inline uint32_t WriteVariableLengthPCJump(uint32_t pc_delta);
   inline void WriteTaggedPC(uint32_t pc_delta, int tag);
   inline void WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag);
+  inline void WriteExtraTaggedIntData(int data_delta, int top_tag);
   inline void WriteExtraTaggedData(intptr_t data_delta, int top_tag);
   inline void WriteTaggedData(intptr_t data_delta, int tag);
   inline void WriteExtraTag(int extra_tag, int top_tag);
 
   byte* pos_;
   byte* last_pc_;
-  intptr_t last_data_;
+  int last_id_;
+  int last_position_;
   DISALLOW_COPY_AND_ASSIGN(RelocInfoWriter);
 };
 
@@ -457,12 +451,13 @@
   int GetTopTag();
   void ReadTaggedPC();
   void AdvanceReadPC();
+  void AdvanceReadId();
+  void AdvanceReadPosition();
   void AdvanceReadData();
   void AdvanceReadVariableLengthPCJump();
-  int GetPositionTypeTag();
-  void ReadTaggedData();
-
-  static RelocInfo::Mode DebugInfoModeFromTag(int tag);
+  int GetLocatableTypeTag();
+  void ReadTaggedId();
+  void ReadTaggedPosition();
 
   // If the given mode is wanted, set it in rinfo_ and return true.
   // Else return false. Used for efficiently skipping unwanted modes.
@@ -475,6 +470,8 @@
   RelocInfo rinfo_;
   bool done_;
   int mode_mask_;
+  int last_id_;
+  int last_position_;
   DISALLOW_COPY_AND_ASSIGN(RelocIterator);
 };
 
@@ -503,9 +500,21 @@
     // MaybeObject* f(v8::internal::Arguments).
     BUILTIN_CALL,  // default
 
+    // Builtin that takes float arguments and returns an int.
+    // int f(double, double).
+    BUILTIN_COMPARE_CALL,
+
     // Builtin call that returns floating point.
     // double f(double, double).
-    FP_RETURN_CALL,
+    BUILTIN_FP_FP_CALL,
+
+    // Builtin call that returns floating point.
+    // double f(double).
+    BUILTIN_FP_CALL,
+
+    // Builtin call that returns floating point.
+    // double f(double, int).
+    BUILTIN_FP_INT_CALL,
 
     // Direct call to API function callback.
     // Handle<Value> f(v8::Arguments&)
@@ -613,6 +622,8 @@
   static ExternalReference address_of_min_int();
   static ExternalReference address_of_one_half();
   static ExternalReference address_of_minus_zero();
+  static ExternalReference address_of_zero();
+  static ExternalReference address_of_uint8_max_value();
   static ExternalReference address_of_negative_infinity();
   static ExternalReference address_of_nan();
 
@@ -649,10 +660,11 @@
 
   // This lets you register a function that rewrites all external references.
   // Used by the ARM simulator to catch calls to external references.
-  static void set_redirector(ExternalReferenceRedirector* redirector) {
+  static void set_redirector(Isolate* isolate,
+                             ExternalReferenceRedirector* redirector) {
     // We can't stack them.
-    ASSERT(Isolate::Current()->external_reference_redirector() == NULL);
-    Isolate::Current()->set_external_reference_redirector(
+    ASSERT(isolate->external_reference_redirector() == NULL);
+    isolate->set_external_reference_redirector(
         reinterpret_cast<ExternalReferenceRedirectorPointer*>(redirector));
   }
 
@@ -819,6 +831,28 @@
 double power_double_int(double x, int y);
 double power_double_double(double x, double y);
 
+// Helper class for generating code or data associated with the code
+// right after a call instruction. As an example this can be used to
+// generate safepoint data after calls for crankshaft.
+class CallWrapper {
+ public:
+  CallWrapper() { }
+  virtual ~CallWrapper() { }
+  // Called just before emitting a call. Argument is the size of the generated
+  // call code.
+  virtual void BeforeCall(int call_size) const = 0;
+  // Called just after emitting a call, i.e., at the return site for the call.
+  virtual void AfterCall() const = 0;
+};
+
+class NullCallWrapper : public CallWrapper {
+ public:
+  NullCallWrapper() { }
+  virtual ~NullCallWrapper() { }
+  virtual void BeforeCall(int call_size) const { }
+  virtual void AfterCall() const { }
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_ASSEMBLER_H_
diff --git a/src/ast-inl.h b/src/ast-inl.h
index d80684a..c2bd613 100644
--- a/src/ast-inl.h
+++ b/src/ast-inl.h
@@ -31,6 +31,7 @@
 #include "v8.h"
 
 #include "ast.h"
+#include "scopes.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/ast.cc b/src/ast.cc
index 8ab09b3..b4abf54 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -337,15 +337,6 @@
 }
 
 
-BinaryOperation::BinaryOperation(Assignment* assignment) {
-  ASSERT(assignment->is_compound());
-  op_ = assignment->binary_op();
-  left_ = assignment->target();
-  right_ = assignment->value();
-  pos_ = assignment->position();
-}
-
-
 // ----------------------------------------------------------------------------
 // Inlining support
 
@@ -413,8 +404,7 @@
 
 
 bool Throw::IsInlineable() const {
-  // TODO(1143): Make functions containing throw inlineable.
-  return false;
+  return exception()->IsInlineable();
 }
 
 
@@ -733,14 +723,15 @@
 }
 
 
-void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
+                              CallKind call_kind) {
   Property* property = expression()->AsProperty();
   ASSERT(property != NULL);
   // Specialize for the receiver types seen at runtime.
   Literal* key = property->key()->AsLiteral();
   ASSERT(key != NULL && key->handle()->IsString());
   Handle<String> name = Handle<String>::cast(key->handle());
-  receiver_types_ = oracle->CallReceiverTypes(this, name);
+  receiver_types_ = oracle->CallReceiverTypes(this, name, call_kind);
 #ifdef DEBUG
   if (FLAG_enable_slow_asserts) {
     if (receiver_types_ != NULL) {
@@ -1159,6 +1150,7 @@
       statements_(statements),
       position_(pos),
       compare_type_(NONE),
+      compare_id_(AstNode::GetNextId()),
       entry_id_(AstNode::GetNextId()) {
 }
 
diff --git a/src/ast.h b/src/ast.h
index 65a25a9..0ac1644 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -28,6 +28,7 @@
 #ifndef V8_AST_H_
 #define V8_AST_H_
 
+#include "allocation.h"
 #include "execution.h"
 #include "factory.h"
 #include "jsregexp.h"
@@ -661,6 +662,7 @@
   void set_position(int pos) { position_ = pos; }
 
   int EntryId() { return entry_id_; }
+  int CompareId() { return compare_id_; }
 
   // Type feedback information.
   void RecordTypeFeedback(TypeFeedbackOracle* oracle);
@@ -674,6 +676,7 @@
   int position_;
   enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY };
   CompareTypeFeedback compare_type_;
+  int compare_id_;
   int entry_id_;
 };
 
@@ -1276,7 +1279,8 @@
   ZoneList<Expression*>* arguments() const { return arguments_; }
   virtual int position() const { return pos_; }
 
-  void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+  void RecordTypeFeedback(TypeFeedbackOracle* oracle,
+                          CallKind call_kind);
   virtual ZoneMapList* GetReceiverTypes() { return receiver_types_; }
   virtual bool IsMonomorphic() { return is_monomorphic_; }
   CheckType check_type() const { return check_type_; }
@@ -1390,8 +1394,8 @@
 
 class UnaryOperation: public Expression {
  public:
-  UnaryOperation(Token::Value op, Expression* expression)
-      : op_(op), expression_(expression) {
+  UnaryOperation(Token::Value op, Expression* expression, int pos)
+      : op_(op), expression_(expression), pos_(pos) {
     ASSERT(Token::IsUnaryOp(op));
   }
 
@@ -1403,10 +1407,12 @@
 
   Token::Value op() const { return op_; }
   Expression* expression() const { return expression_; }
+  virtual int position() const { return pos_; }
 
  private:
   Token::Value op_;
   Expression* expression_;
+  int pos_;
 };
 
 
@@ -1423,9 +1429,6 @@
         : AstNode::kNoNumber;
   }
 
-  // Create the binary operation corresponding to a compound assignment.
-  explicit BinaryOperation(Assignment* assignment);
-
   DECLARE_NODE_TYPE(BinaryOperation)
 
   virtual bool IsInlineable() const;
diff --git a/src/atomicops_internals_x86_gcc.cc b/src/atomicops_internals_x86_gcc.cc
index a572564..181c202 100644
--- a/src/atomicops_internals_x86_gcc.cc
+++ b/src/atomicops_internals_x86_gcc.cc
@@ -57,6 +57,9 @@
 
 #if defined(cpuid)        // initialize the struct only on x86
 
+namespace v8 {
+namespace internal {
+
 // Set the flags so that code will run correctly and conservatively, so even
 // if we haven't been initialized yet, we're probably single threaded, and our
 // default values should hopefully be pretty safe.
@@ -65,8 +68,14 @@
   false,          // no SSE2
 };
 
+} }  // namespace v8::internal
+
+namespace {
+
 // Initialize the AtomicOps_Internalx86CPUFeatures struct.
-static void AtomicOps_Internalx86CPUFeaturesInit() {
+void AtomicOps_Internalx86CPUFeaturesInit() {
+  using v8::internal::AtomicOps_Internalx86CPUFeatures;
+
   uint32_t eax;
   uint32_t ebx;
   uint32_t ecx;
@@ -107,8 +116,6 @@
   AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
 }
 
-namespace {
-
 class AtomicOpsx86Initializer {
  public:
   AtomicOpsx86Initializer() {
diff --git a/src/atomicops_internals_x86_gcc.h b/src/atomicops_internals_x86_gcc.h
index 3f17fa0..6e55b50 100644
--- a/src/atomicops_internals_x86_gcc.h
+++ b/src/atomicops_internals_x86_gcc.h
@@ -30,6 +30,9 @@
 #ifndef V8_ATOMICOPS_INTERNALS_X86_GCC_H_
 #define V8_ATOMICOPS_INTERNALS_X86_GCC_H_
 
+namespace v8 {
+namespace internal {
+
 // This struct is not part of the public API of this module; clients may not
 // use it.
 // Features of this x86.  Values may not be correct before main() is run,
@@ -43,9 +46,6 @@
 
 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
 
-namespace v8 {
-namespace internal {
-
 // 32-bit low-level operations on any platform.
 
 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 5b87640..d32ac80 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -141,7 +141,8 @@
 
 class Genesis BASE_EMBEDDED {
  public:
-  Genesis(Handle<Object> global_object,
+  Genesis(Isolate* isolate,
+          Handle<Object> global_object,
           v8::Handle<v8::ObjectTemplate> global_template,
           v8::ExtensionConfiguration* extensions);
   ~Genesis() { }
@@ -150,8 +151,13 @@
 
   Genesis* previous() { return previous_; }
 
+  Isolate* isolate() const { return isolate_; }
+  Factory* factory() const { return isolate_->factory(); }
+  Heap* heap() const { return isolate_->heap(); }
+
  private:
   Handle<Context> global_context_;
+  Isolate* isolate_;
 
   // There may be more than one active genesis object: When GC is
   // triggered during environment creation there may be weak handle
@@ -163,9 +169,9 @@
   // Creates some basic objects. Used for creating a context from scratch.
   void CreateRoots();
   // Creates the empty function.  Used for creating a context from scratch.
-  Handle<JSFunction> CreateEmptyFunction();
+  Handle<JSFunction> CreateEmptyFunction(Isolate* isolate);
   // Creates the ThrowTypeError function. ECMA 5th Ed. 13.2.3
-  Handle<JSFunction> CreateThrowTypeErrorFunction(Builtins::Name builtin);
+  Handle<JSFunction> GetThrowTypeErrorFunction();
 
   void CreateStrictModeFunctionMaps(Handle<JSFunction> empty);
   // Creates the global objects using the global and the template passed in
@@ -193,7 +199,9 @@
   // Installs the contents of the native .js files on the global objects.
   // Used for creating a context from scratch.
   void InstallNativeFunctions();
+  void InstallExperimentalNativeFunctions();
   bool InstallNatives();
+  bool InstallExperimentalNatives();
   void InstallBuiltinFunctionIds();
   void InstallJSFunctionResultCaches();
   void InitializeNormalizedMapCaches();
@@ -239,7 +247,8 @@
       Handle<FixedArray> arguments,
       Handle<FixedArray> caller);
 
-  static bool CompileBuiltin(int index);
+  static bool CompileBuiltin(Isolate* isolate, int index);
+  static bool CompileExperimentalBuiltin(Isolate* isolate, int index);
   static bool CompileNative(Vector<const char> name, Handle<String> source);
   static bool CompileScriptCached(Vector<const char> name,
                                   Handle<String> source,
@@ -256,6 +265,7 @@
   // These are the final, writable prototype, maps.
   Handle<Map> function_instance_map_writable_prototype_;
   Handle<Map> strict_mode_function_instance_map_writable_prototype_;
+  Handle<JSFunction> throw_type_error_function;
 
   BootstrapperActive active_;
   friend class Bootstrapper;
@@ -269,12 +279,13 @@
 
 
 Handle<Context> Bootstrapper::CreateEnvironment(
+    Isolate* isolate,
     Handle<Object> global_object,
     v8::Handle<v8::ObjectTemplate> global_template,
     v8::ExtensionConfiguration* extensions) {
   HandleScope scope;
   Handle<Context> env;
-  Genesis genesis(global_object, global_template, extensions);
+  Genesis genesis(isolate, global_object, global_template, extensions);
   env = genesis.result();
   if (!env.is_null()) {
     if (InstallExtensions(env, extensions)) {
@@ -287,15 +298,16 @@
 
 static void SetObjectPrototype(Handle<JSObject> object, Handle<Object> proto) {
   // object.__proto__ = proto;
+  Factory* factory = object->GetIsolate()->factory();
   Handle<Map> old_to_map = Handle<Map>(object->map());
-  Handle<Map> new_to_map = FACTORY->CopyMapDropTransitions(old_to_map);
+  Handle<Map> new_to_map = factory->CopyMapDropTransitions(old_to_map);
   new_to_map->set_prototype(*proto);
   object->set_map(*new_to_map);
 }
 
 
 void Bootstrapper::DetachGlobal(Handle<Context> env) {
-  Factory* factory = Isolate::Current()->factory();
+  Factory* factory = env->GetIsolate()->factory();
   JSGlobalProxy::cast(env->global_proxy())->set_context(*factory->null_value());
   SetObjectPrototype(Handle<JSObject>(env->global_proxy()),
                      factory->null_value());
@@ -322,7 +334,7 @@
                                           Handle<JSObject> prototype,
                                           Builtins::Name call,
                                           bool is_ecma_native) {
-  Isolate* isolate = Isolate::Current();
+  Isolate* isolate = target->GetIsolate();
   Factory* factory = isolate->factory();
   Handle<String> symbol = factory->LookupAsciiSymbol(name);
   Handle<Code> call_code = Handle<Code>(isolate->builtins()->builtin(call));
@@ -344,30 +356,32 @@
 
 Handle<DescriptorArray> Genesis::ComputeFunctionInstanceDescriptor(
     PrototypePropertyMode prototypeMode) {
-  Factory* factory = Isolate::Current()->factory();
   Handle<DescriptorArray> descriptors =
-      factory->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE ? 4 : 5);
+      factory()->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE
+                                    ? 4
+                                    : 5);
   PropertyAttributes attributes =
       static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
 
   {  // Add length.
-    Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionLength);
-    CallbacksDescriptor d(*factory->length_symbol(), *proxy, attributes);
+    Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionLength);
+    CallbacksDescriptor d(*factory()->length_symbol(), *foreign, attributes);
     descriptors->Set(0, &d);
   }
   {  // Add name.
-    Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionName);
-    CallbacksDescriptor d(*factory->name_symbol(), *proxy, attributes);
+    Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionName);
+    CallbacksDescriptor d(*factory()->name_symbol(), *foreign, attributes);
     descriptors->Set(1, &d);
   }
   {  // Add arguments.
-    Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionArguments);
-    CallbacksDescriptor d(*factory->arguments_symbol(), *proxy, attributes);
+    Handle<Foreign> foreign =
+        factory()->NewForeign(&Accessors::FunctionArguments);
+    CallbacksDescriptor d(*factory()->arguments_symbol(), *foreign, attributes);
     descriptors->Set(2, &d);
   }
   {  // Add caller.
-    Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionCaller);
-    CallbacksDescriptor d(*factory->caller_symbol(), *proxy, attributes);
+    Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionCaller);
+    CallbacksDescriptor d(*factory()->caller_symbol(), *foreign, attributes);
     descriptors->Set(3, &d);
   }
   if (prototypeMode != DONT_ADD_PROTOTYPE) {
@@ -375,8 +389,9 @@
     if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
       attributes = static_cast<PropertyAttributes>(attributes & ~READ_ONLY);
     }
-    Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionPrototype);
-    CallbacksDescriptor d(*factory->prototype_symbol(), *proxy, attributes);
+    Handle<Foreign> foreign =
+        factory()->NewForeign(&Accessors::FunctionPrototype);
+    CallbacksDescriptor d(*factory()->prototype_symbol(), *foreign, attributes);
     descriptors->Set(4, &d);
   }
   descriptors->Sort();
@@ -385,7 +400,7 @@
 
 
 Handle<Map> Genesis::CreateFunctionMap(PrototypePropertyMode prototype_mode) {
-  Handle<Map> map = FACTORY->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
+  Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
   Handle<DescriptorArray> descriptors =
       ComputeFunctionInstanceDescriptor(prototype_mode);
   map->set_instance_descriptors(*descriptors);
@@ -394,7 +409,7 @@
 }
 
 
-Handle<JSFunction> Genesis::CreateEmptyFunction() {
+Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
   // Allocate the map for function instances. Maps are allocated first and their
   // prototypes patched later, once empty function is created.
 
@@ -422,7 +437,6 @@
   function_instance_map_writable_prototype_ =
       CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE);
 
-  Isolate* isolate = Isolate::Current();
   Factory* factory = isolate->factory();
   Heap* heap = isolate->heap();
 
@@ -491,28 +505,31 @@
     PrototypePropertyMode prototypeMode,
     Handle<FixedArray> arguments,
     Handle<FixedArray> caller) {
-  Factory* factory = Isolate::Current()->factory();
   Handle<DescriptorArray> descriptors =
-      factory->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE ? 4 : 5);
+      factory()->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE
+                                    ? 4
+                                    : 5);
   PropertyAttributes attributes = static_cast<PropertyAttributes>(
       DONT_ENUM | DONT_DELETE | READ_ONLY);
 
   {  // length
-    Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionLength);
-    CallbacksDescriptor d(*factory->length_symbol(), *proxy, attributes);
+    Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionLength);
+    CallbacksDescriptor d(*factory()->length_symbol(), *foreign, attributes);
     descriptors->Set(0, &d);
   }
   {  // name
-    Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionName);
-    CallbacksDescriptor d(*factory->name_symbol(), *proxy, attributes);
+    Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionName);
+    CallbacksDescriptor d(*factory()->name_symbol(), *foreign, attributes);
     descriptors->Set(1, &d);
   }
   {  // arguments
-    CallbacksDescriptor d(*factory->arguments_symbol(), *arguments, attributes);
+    CallbacksDescriptor d(*factory()->arguments_symbol(),
+                          *arguments,
+                          attributes);
     descriptors->Set(2, &d);
   }
   {  // caller
-    CallbacksDescriptor d(*factory->caller_symbol(), *caller, attributes);
+    CallbacksDescriptor d(*factory()->caller_symbol(), *caller, attributes);
     descriptors->Set(3, &d);
   }
 
@@ -521,8 +538,9 @@
     if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
       attributes = static_cast<PropertyAttributes>(attributes & ~READ_ONLY);
     }
-    Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionPrototype);
-    CallbacksDescriptor d(*factory->prototype_symbol(), *proxy, attributes);
+    Handle<Foreign> foreign =
+        factory()->NewForeign(&Accessors::FunctionPrototype);
+    CallbacksDescriptor d(*factory()->prototype_symbol(), *foreign, attributes);
     descriptors->Set(4, &d);
   }
 
@@ -532,25 +550,22 @@
 
 
 // ECMAScript 5th Edition, 13.2.3
-Handle<JSFunction> Genesis::CreateThrowTypeErrorFunction(
-    Builtins::Name builtin) {
-  Isolate* isolate = Isolate::Current();
-  Factory* factory = isolate->factory();
+Handle<JSFunction> Genesis::GetThrowTypeErrorFunction() {
+  if (throw_type_error_function.is_null()) {
+    Handle<String> name = factory()->LookupAsciiSymbol("ThrowTypeError");
+    throw_type_error_function =
+      factory()->NewFunctionWithoutPrototype(name, kNonStrictMode);
+    Handle<Code> code(isolate()->builtins()->builtin(
+        Builtins::kStrictModePoisonPill));
+    throw_type_error_function->set_map(
+        global_context()->function_map());
+    throw_type_error_function->set_code(*code);
+    throw_type_error_function->shared()->set_code(*code);
+    throw_type_error_function->shared()->DontAdaptArguments();
 
-  Handle<String> name = factory->LookupAsciiSymbol("ThrowTypeError");
-  Handle<JSFunction> throw_type_error =
-      factory->NewFunctionWithoutPrototype(name, kStrictMode);
-  Handle<Code> code = Handle<Code>(
-      isolate->builtins()->builtin(builtin));
-
-  throw_type_error->set_map(global_context()->strict_mode_function_map());
-  throw_type_error->set_code(*code);
-  throw_type_error->shared()->set_code(*code);
-  throw_type_error->shared()->DontAdaptArguments();
-
-  PreventExtensions(throw_type_error);
-
-  return throw_type_error;
+    PreventExtensions(throw_type_error_function);
+  }
+  return throw_type_error_function;
 }
 
 
@@ -559,7 +574,7 @@
     Handle<JSFunction> empty_function,
     Handle<FixedArray> arguments_callbacks,
     Handle<FixedArray> caller_callbacks) {
-  Handle<Map> map = FACTORY->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
+  Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
   Handle<DescriptorArray> descriptors =
       ComputeStrictFunctionInstanceDescriptor(prototype_mode,
                                               arguments_callbacks,
@@ -574,7 +589,7 @@
 void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
   // Create the callbacks arrays for ThrowTypeError functions.
   // The get/set callacks are filled in after the maps are created below.
-  Factory* factory = Isolate::Current()->factory();
+  Factory* factory = empty->GetIsolate()->factory();
   Handle<FixedArray> arguments = factory->NewFixedArray(2, TENURED);
   Handle<FixedArray> caller = factory->NewFixedArray(2, TENURED);
 
@@ -607,23 +622,21 @@
       CreateStrictModeFunctionMap(
           ADD_WRITEABLE_PROTOTYPE, empty, arguments, caller);
 
-  // Create the ThrowTypeError function instances.
-  Handle<JSFunction> arguments_throw =
-      CreateThrowTypeErrorFunction(Builtins::kStrictFunctionArguments);
-  Handle<JSFunction> caller_throw =
-      CreateThrowTypeErrorFunction(Builtins::kStrictFunctionCaller);
+  // Create the ThrowTypeError function instance.
+  Handle<JSFunction> throw_function =
+      GetThrowTypeErrorFunction();
 
   // Complete the callback fixed arrays.
-  arguments->set(0, *arguments_throw);
-  arguments->set(1, *arguments_throw);
-  caller->set(0, *caller_throw);
-  caller->set(1, *caller_throw);
+  arguments->set(0, *throw_function);
+  arguments->set(1, *throw_function);
+  caller->set(0, *throw_function);
+  caller->set(1, *throw_function);
 }
 
 
 static void AddToWeakGlobalContextList(Context* context) {
   ASSERT(context->IsGlobalContext());
-  Heap* heap = Isolate::Current()->heap();
+  Heap* heap = context->GetIsolate()->heap();
 #ifdef DEBUG
   { // NOLINT
     ASSERT(context->get(Context::NEXT_CONTEXT_LINK)->IsUndefined());
@@ -641,15 +654,14 @@
 
 
 void Genesis::CreateRoots() {
-  Isolate* isolate = Isolate::Current();
   // Allocate the global context FixedArray first and then patch the
   // closure and extension object later (we need the empty function
   // and the global object, but in order to create those, we need the
   // global context).
-  global_context_ = Handle<Context>::cast(isolate->global_handles()->Create(
-              *isolate->factory()->NewGlobalContext()));
+  global_context_ = Handle<Context>::cast(isolate()->global_handles()->Create(
+              *factory()->NewGlobalContext()));
   AddToWeakGlobalContextList(*global_context_);
-  isolate->set_context(*global_context());
+  isolate()->set_context(*global_context());
 
   // Allocate the message listeners object.
   {
@@ -692,17 +704,13 @@
     }
   }
 
-  Isolate* isolate = Isolate::Current();
-  Factory* factory = isolate->factory();
-  Heap* heap = isolate->heap();
-
   if (js_global_template.is_null()) {
-    Handle<String> name = Handle<String>(heap->empty_symbol());
-    Handle<Code> code = Handle<Code>(isolate->builtins()->builtin(
+    Handle<String> name = Handle<String>(heap()->empty_symbol());
+    Handle<Code> code = Handle<Code>(isolate()->builtins()->builtin(
         Builtins::kIllegal));
     js_global_function =
-        factory->NewFunction(name, JS_GLOBAL_OBJECT_TYPE,
-                             JSGlobalObject::kSize, code, true);
+        factory()->NewFunction(name, JS_GLOBAL_OBJECT_TYPE,
+                               JSGlobalObject::kSize, code, true);
     // Change the constructor property of the prototype of the
     // hidden global function to refer to the Object function.
     Handle<JSObject> prototype =
@@ -710,20 +718,20 @@
             JSObject::cast(js_global_function->instance_prototype()));
     SetLocalPropertyNoThrow(
         prototype,
-        factory->constructor_symbol(),
-        isolate->object_function(),
+        factory()->constructor_symbol(),
+        isolate()->object_function(),
         NONE);
   } else {
     Handle<FunctionTemplateInfo> js_global_constructor(
         FunctionTemplateInfo::cast(js_global_template->constructor()));
     js_global_function =
-        factory->CreateApiFunction(js_global_constructor,
-                                   factory->InnerGlobalObject);
+        factory()->CreateApiFunction(js_global_constructor,
+                                     factory()->InnerGlobalObject);
   }
 
   js_global_function->initial_map()->set_is_hidden_prototype();
   Handle<GlobalObject> inner_global =
-      factory->NewGlobalObject(js_global_function);
+      factory()->NewGlobalObject(js_global_function);
   if (inner_global_out != NULL) {
     *inner_global_out = inner_global;
   }
@@ -731,23 +739,23 @@
   // Step 2: create or re-initialize the global proxy object.
   Handle<JSFunction> global_proxy_function;
   if (global_template.IsEmpty()) {
-    Handle<String> name = Handle<String>(heap->empty_symbol());
-    Handle<Code> code = Handle<Code>(isolate->builtins()->builtin(
+    Handle<String> name = Handle<String>(heap()->empty_symbol());
+    Handle<Code> code = Handle<Code>(isolate()->builtins()->builtin(
         Builtins::kIllegal));
     global_proxy_function =
-        factory->NewFunction(name, JS_GLOBAL_PROXY_TYPE,
-                             JSGlobalProxy::kSize, code, true);
+        factory()->NewFunction(name, JS_GLOBAL_PROXY_TYPE,
+                               JSGlobalProxy::kSize, code, true);
   } else {
     Handle<ObjectTemplateInfo> data =
         v8::Utils::OpenHandle(*global_template);
     Handle<FunctionTemplateInfo> global_constructor(
             FunctionTemplateInfo::cast(data->constructor()));
     global_proxy_function =
-        factory->CreateApiFunction(global_constructor,
-                                   factory->OuterGlobalObject);
+        factory()->CreateApiFunction(global_constructor,
+                                     factory()->OuterGlobalObject);
   }
 
-  Handle<String> global_name = factory->LookupAsciiSymbol("global");
+  Handle<String> global_name = factory()->LookupAsciiSymbol("global");
   global_proxy_function->shared()->set_instance_class_name(*global_name);
   global_proxy_function->initial_map()->set_is_access_check_needed(true);
 
@@ -761,7 +769,7 @@
         Handle<JSGlobalProxy>::cast(global_object));
   } else {
     return Handle<JSGlobalProxy>::cast(
-        factory->NewJSObject(global_proxy_function, TENURED));
+        factory()->NewJSObject(global_proxy_function, TENURED));
   }
 }
 
@@ -786,7 +794,7 @@
   static const PropertyAttributes attributes =
       static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
   ForceSetProperty(builtins_global,
-                   FACTORY->LookupAsciiSymbol("global"),
+                   factory()->LookupAsciiSymbol("global"),
                    inner_global,
                    attributes);
   // Setup the reference from the global object to the builtins object.
@@ -814,7 +822,7 @@
   // object reinitialization.
   global_context()->set_security_token(*inner_global);
 
-  Isolate* isolate = Isolate::Current();
+  Isolate* isolate = inner_global->GetIsolate();
   Factory* factory = isolate->factory();
   Heap* heap = isolate->heap();
 
@@ -841,10 +849,10 @@
     // is 1.
     array_function->shared()->set_length(1);
     Handle<DescriptorArray> array_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory->CopyAppendForeignDescriptor(
             factory->empty_descriptor_array(),
             factory->length_symbol(),
-            factory->NewProxy(&Accessors::ArrayLength),
+            factory->NewForeign(&Accessors::ArrayLength),
             static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE));
 
     // Cache the fast JavaScript array map
@@ -884,10 +892,10 @@
     global_context()->set_string_function(*string_fun);
     // Add 'length' property to strings.
     Handle<DescriptorArray> string_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory->CopyAppendForeignDescriptor(
             factory->empty_descriptor_array(),
             factory->length_symbol(),
-            factory->NewProxy(&Accessors::StringLength),
+            factory->NewForeign(&Accessors::StringLength),
             static_cast<PropertyAttributes>(DONT_ENUM |
                                             DONT_DELETE |
                                             READ_ONLY));
@@ -1052,16 +1060,14 @@
     Handle<FixedArray> callee = factory->NewFixedArray(2, TENURED);
     Handle<FixedArray> caller = factory->NewFixedArray(2, TENURED);
 
-    Handle<JSFunction> callee_throw =
-        CreateThrowTypeErrorFunction(Builtins::kStrictArgumentsCallee);
-    Handle<JSFunction> caller_throw =
-        CreateThrowTypeErrorFunction(Builtins::kStrictArgumentsCaller);
+    Handle<JSFunction> throw_function =
+        GetThrowTypeErrorFunction();
 
     // Install the ThrowTypeError functions.
-    callee->set(0, *callee_throw);
-    callee->set(1, *callee_throw);
-    caller->set(0, *caller_throw);
-    caller->set(1, *caller_throw);
+    callee->set(0, *throw_function);
+    callee->set(1, *throw_function);
+    caller->set(0, *throw_function);
+    caller->set(1, *throw_function);
 
     // Create the descriptor array for the arguments object.
     Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(3);
@@ -1164,17 +1170,26 @@
 }
 
 
-bool Genesis::CompileBuiltin(int index) {
+bool Genesis::CompileBuiltin(Isolate* isolate, int index) {
   Vector<const char> name = Natives::GetScriptName(index);
   Handle<String> source_code =
-      Isolate::Current()->bootstrapper()->NativesSourceLookup(index);
+      isolate->bootstrapper()->NativesSourceLookup(index);
+  return CompileNative(name, source_code);
+}
+
+
+bool Genesis::CompileExperimentalBuiltin(Isolate* isolate, int index) {
+  Vector<const char> name = ExperimentalNatives::GetScriptName(index);
+  Factory* factory = isolate->factory();
+  Handle<String> source_code =
+      factory->NewStringFromAscii(ExperimentalNatives::GetScriptSource(index));
   return CompileNative(name, source_code);
 }
 
 
 bool Genesis::CompileNative(Vector<const char> name, Handle<String> source) {
   HandleScope scope;
-  Isolate* isolate = Isolate::Current();
+  Isolate* isolate = source->GetIsolate();
 #ifdef ENABLE_DEBUGGER_SUPPORT
   isolate->debugger()->set_compiling_natives(true);
 #endif
@@ -1199,7 +1214,7 @@
                                   v8::Extension* extension,
                                   Handle<Context> top_context,
                                   bool use_runtime_context) {
-  Factory* factory = Isolate::Current()->factory();
+  Factory* factory = source->GetIsolate()->factory();
   HandleScope scope;
   Handle<SharedFunctionInfo> function_info;
 
@@ -1239,22 +1254,21 @@
                      ? top_context->builtins()
                      : top_context->global());
   bool has_pending_exception;
-  Handle<Object> result =
-      Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
+  Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
   if (has_pending_exception) return false;
   return true;
 }
 
 
-#define INSTALL_NATIVE(Type, name, var)                                        \
-  Handle<String> var##_name = factory->LookupAsciiSymbol(name);                \
-  Object* var##_native =                                                       \
-      global_context()->builtins()->GetPropertyNoExceptionThrown(*var##_name); \
+#define INSTALL_NATIVE(Type, name, var)                                       \
+  Handle<String> var##_name = factory()->LookupAsciiSymbol(name);             \
+  Object* var##_native =                                                      \
+      global_context()->builtins()->GetPropertyNoExceptionThrown(             \
+           *var##_name);                                                      \
   global_context()->set_##var(Type::cast(var##_native));
 
 
 void Genesis::InstallNativeFunctions() {
-  Factory* factory = Isolate::Current()->factory();
   HandleScope scope;
   INSTALL_NATIVE(JSFunction, "CreateDate", create_date_fun);
   INSTALL_NATIVE(JSFunction, "ToNumber", to_number_fun);
@@ -1272,30 +1286,34 @@
   INSTALL_NATIVE(JSObject, "functionCache", function_cache);
 }
 
+void Genesis::InstallExperimentalNativeFunctions() {
+  if (FLAG_harmony_proxies) {
+    INSTALL_NATIVE(JSFunction, "DerivedGetTrap", derived_get_trap);
+  }
+}
+
 #undef INSTALL_NATIVE
 
 
 bool Genesis::InstallNatives() {
   HandleScope scope;
-  Isolate* isolate = Isolate::Current();
-  Factory* factory = isolate->factory();
-  Heap* heap = isolate->heap();
 
   // Create a function for the builtins object. Allocate space for the
   // JavaScript builtins, a reference to the builtins object
   // (itself) and a reference to the global_context directly in the object.
   Handle<Code> code = Handle<Code>(
-      isolate->builtins()->builtin(Builtins::kIllegal));
+      isolate()->builtins()->builtin(Builtins::kIllegal));
   Handle<JSFunction> builtins_fun =
-      factory->NewFunction(factory->empty_symbol(), JS_BUILTINS_OBJECT_TYPE,
-                           JSBuiltinsObject::kSize, code, true);
+      factory()->NewFunction(factory()->empty_symbol(),
+                             JS_BUILTINS_OBJECT_TYPE,
+                             JSBuiltinsObject::kSize, code, true);
 
-  Handle<String> name = factory->LookupAsciiSymbol("builtins");
+  Handle<String> name = factory()->LookupAsciiSymbol("builtins");
   builtins_fun->shared()->set_instance_class_name(*name);
 
   // Allocate the builtins object.
   Handle<JSBuiltinsObject> builtins =
-      Handle<JSBuiltinsObject>::cast(factory->NewGlobalObject(builtins_fun));
+      Handle<JSBuiltinsObject>::cast(factory()->NewGlobalObject(builtins_fun));
   builtins->set_builtins(*builtins);
   builtins->set_global_context(*global_context());
   builtins->set_global_receiver(*builtins);
@@ -1306,7 +1324,7 @@
   // global object.
   static const PropertyAttributes attributes =
       static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
-  Handle<String> global_symbol = factory->LookupAsciiSymbol("global");
+  Handle<String> global_symbol = factory()->LookupAsciiSymbol("global");
   Handle<Object> global_obj(global_context()->global());
   SetLocalPropertyNoThrow(builtins, global_symbol, global_obj, attributes);
 
@@ -1315,12 +1333,13 @@
 
   // Create a bridge function that has context in the global context.
   Handle<JSFunction> bridge =
-      factory->NewFunction(factory->empty_symbol(), factory->undefined_value());
-  ASSERT(bridge->context() == *isolate->global_context());
+      factory()->NewFunction(factory()->empty_symbol(),
+                             factory()->undefined_value());
+  ASSERT(bridge->context() == *isolate()->global_context());
 
   // Allocate the builtins context.
   Handle<Context> context =
-    factory->NewFunctionContext(Context::MIN_CONTEXT_SLOTS, bridge);
+    factory()->NewFunctionContext(Context::MIN_CONTEXT_SLOTS, bridge);
   context->set_global(*builtins);  // override builtins global object
 
   global_context()->set_runtime_context(*context);
@@ -1329,123 +1348,127 @@
     // Builtin functions for Script.
     Handle<JSFunction> script_fun =
         InstallFunction(builtins, "Script", JS_VALUE_TYPE, JSValue::kSize,
-                        isolate->initial_object_prototype(),
+                        isolate()->initial_object_prototype(),
                         Builtins::kIllegal, false);
     Handle<JSObject> prototype =
-        factory->NewJSObject(isolate->object_function(), TENURED);
+        factory()->NewJSObject(isolate()->object_function(), TENURED);
     SetPrototype(script_fun, prototype);
     global_context()->set_script_function(*script_fun);
 
     // Add 'source' and 'data' property to scripts.
     PropertyAttributes common_attributes =
         static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
-    Handle<Proxy> proxy_source = factory->NewProxy(&Accessors::ScriptSource);
+    Handle<Foreign> foreign_source =
+        factory()->NewForeign(&Accessors::ScriptSource);
     Handle<DescriptorArray> script_descriptors =
-        factory->CopyAppendProxyDescriptor(
-            factory->empty_descriptor_array(),
-            factory->LookupAsciiSymbol("source"),
-            proxy_source,
+        factory()->CopyAppendForeignDescriptor(
+            factory()->empty_descriptor_array(),
+            factory()->LookupAsciiSymbol("source"),
+            foreign_source,
             common_attributes);
-    Handle<Proxy> proxy_name = factory->NewProxy(&Accessors::ScriptName);
+    Handle<Foreign> foreign_name =
+        factory()->NewForeign(&Accessors::ScriptName);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendForeignDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("name"),
-            proxy_name,
+            factory()->LookupAsciiSymbol("name"),
+            foreign_name,
             common_attributes);
-    Handle<Proxy> proxy_id = factory->NewProxy(&Accessors::ScriptId);
+    Handle<Foreign> foreign_id = factory()->NewForeign(&Accessors::ScriptId);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendForeignDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("id"),
-            proxy_id,
+            factory()->LookupAsciiSymbol("id"),
+            foreign_id,
             common_attributes);
-    Handle<Proxy> proxy_line_offset =
-        factory->NewProxy(&Accessors::ScriptLineOffset);
+    Handle<Foreign> foreign_line_offset =
+        factory()->NewForeign(&Accessors::ScriptLineOffset);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendForeignDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("line_offset"),
-            proxy_line_offset,
+            factory()->LookupAsciiSymbol("line_offset"),
+            foreign_line_offset,
             common_attributes);
-    Handle<Proxy> proxy_column_offset =
-        factory->NewProxy(&Accessors::ScriptColumnOffset);
+    Handle<Foreign> foreign_column_offset =
+        factory()->NewForeign(&Accessors::ScriptColumnOffset);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendForeignDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("column_offset"),
-            proxy_column_offset,
+            factory()->LookupAsciiSymbol("column_offset"),
+            foreign_column_offset,
             common_attributes);
-    Handle<Proxy> proxy_data = factory->NewProxy(&Accessors::ScriptData);
+    Handle<Foreign> foreign_data =
+        factory()->NewForeign(&Accessors::ScriptData);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendForeignDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("data"),
-            proxy_data,
+            factory()->LookupAsciiSymbol("data"),
+            foreign_data,
             common_attributes);
-    Handle<Proxy> proxy_type = factory->NewProxy(&Accessors::ScriptType);
+    Handle<Foreign> foreign_type =
+        factory()->NewForeign(&Accessors::ScriptType);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendForeignDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("type"),
-            proxy_type,
+            factory()->LookupAsciiSymbol("type"),
+            foreign_type,
             common_attributes);
-    Handle<Proxy> proxy_compilation_type =
-        factory->NewProxy(&Accessors::ScriptCompilationType);
+    Handle<Foreign> foreign_compilation_type =
+        factory()->NewForeign(&Accessors::ScriptCompilationType);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendForeignDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("compilation_type"),
-            proxy_compilation_type,
+            factory()->LookupAsciiSymbol("compilation_type"),
+            foreign_compilation_type,
             common_attributes);
-    Handle<Proxy> proxy_line_ends =
-        factory->NewProxy(&Accessors::ScriptLineEnds);
+    Handle<Foreign> foreign_line_ends =
+        factory()->NewForeign(&Accessors::ScriptLineEnds);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendForeignDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("line_ends"),
-            proxy_line_ends,
+            factory()->LookupAsciiSymbol("line_ends"),
+            foreign_line_ends,
             common_attributes);
-    Handle<Proxy> proxy_context_data =
-        factory->NewProxy(&Accessors::ScriptContextData);
+    Handle<Foreign> foreign_context_data =
+        factory()->NewForeign(&Accessors::ScriptContextData);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendForeignDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("context_data"),
-            proxy_context_data,
+            factory()->LookupAsciiSymbol("context_data"),
+            foreign_context_data,
             common_attributes);
-    Handle<Proxy> proxy_eval_from_script =
-        factory->NewProxy(&Accessors::ScriptEvalFromScript);
+    Handle<Foreign> foreign_eval_from_script =
+        factory()->NewForeign(&Accessors::ScriptEvalFromScript);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendForeignDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("eval_from_script"),
-            proxy_eval_from_script,
+            factory()->LookupAsciiSymbol("eval_from_script"),
+            foreign_eval_from_script,
             common_attributes);
-    Handle<Proxy> proxy_eval_from_script_position =
-        factory->NewProxy(&Accessors::ScriptEvalFromScriptPosition);
+    Handle<Foreign> foreign_eval_from_script_position =
+        factory()->NewForeign(&Accessors::ScriptEvalFromScriptPosition);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendForeignDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("eval_from_script_position"),
-            proxy_eval_from_script_position,
+            factory()->LookupAsciiSymbol("eval_from_script_position"),
+            foreign_eval_from_script_position,
             common_attributes);
-    Handle<Proxy> proxy_eval_from_function_name =
-        factory->NewProxy(&Accessors::ScriptEvalFromFunctionName);
+    Handle<Foreign> foreign_eval_from_function_name =
+        factory()->NewForeign(&Accessors::ScriptEvalFromFunctionName);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendForeignDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("eval_from_function_name"),
-            proxy_eval_from_function_name,
+            factory()->LookupAsciiSymbol("eval_from_function_name"),
+            foreign_eval_from_function_name,
             common_attributes);
 
     Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
     script_map->set_instance_descriptors(*script_descriptors);
 
     // Allocate the empty script.
-    Handle<Script> script = factory->NewScript(factory->empty_string());
+    Handle<Script> script = factory()->NewScript(factory()->empty_string());
     script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
-    heap->public_set_empty_script(*script);
+    heap()->public_set_empty_script(*script);
   }
   {
     // Builtin function for OpaqueReference -- a JSValue-based object,
@@ -1454,10 +1477,10 @@
     Handle<JSFunction> opaque_reference_fun =
         InstallFunction(builtins, "OpaqueReference", JS_VALUE_TYPE,
                         JSValue::kSize,
-                        isolate->initial_object_prototype(),
+                        isolate()->initial_object_prototype(),
                         Builtins::kIllegal, false);
     Handle<JSObject> prototype =
-        factory->NewJSObject(isolate->object_function(), TENURED);
+        factory()->NewJSObject(isolate()->object_function(), TENURED);
     SetPrototype(opaque_reference_fun, prototype);
     global_context()->set_opaque_reference_function(*opaque_reference_fun);
   }
@@ -1476,23 +1499,23 @@
                         "InternalArray",
                         JS_ARRAY_TYPE,
                         JSArray::kSize,
-                        isolate->initial_object_prototype(),
+                        isolate()->initial_object_prototype(),
                         Builtins::kArrayCode,
                         true);
     Handle<JSObject> prototype =
-        factory->NewJSObject(isolate->object_function(), TENURED);
+        factory()->NewJSObject(isolate()->object_function(), TENURED);
     SetPrototype(array_function, prototype);
 
     array_function->shared()->set_construct_stub(
-        isolate->builtins()->builtin(Builtins::kArrayConstructCode));
+        isolate()->builtins()->builtin(Builtins::kArrayConstructCode));
     array_function->shared()->DontAdaptArguments();
 
     // Make "length" magic on instances.
     Handle<DescriptorArray> array_descriptors =
-        factory->CopyAppendProxyDescriptor(
-            factory->empty_descriptor_array(),
-            factory->length_symbol(),
-            factory->NewProxy(&Accessors::ArrayLength),
+        factory()->CopyAppendForeignDescriptor(
+            factory()->empty_descriptor_array(),
+            factory()->length_symbol(),
+            factory()->NewForeign(&Accessors::ArrayLength),
             static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE));
 
     array_function->initial_map()->set_instance_descriptors(
@@ -1508,8 +1531,7 @@
   for (int i = Natives::GetDebuggerCount();
        i < Natives::GetBuiltinsCount();
        i++) {
-    Vector<const char> name = Natives::GetScriptName(i);
-    if (!CompileBuiltin(i)) return false;
+    if (!CompileBuiltin(isolate(), i)) return false;
     // TODO(ager): We really only need to install the JS builtin
     // functions on the builtins object after compiling and running
     // runtime.js.
@@ -1527,9 +1549,9 @@
       HeapObject::cast(string_function->initial_map()->prototype())->map());
 
   // Install Function.prototype.call and apply.
-  { Handle<String> key = factory->function_class_symbol();
+  { Handle<String> key = factory()->function_class_symbol();
     Handle<JSFunction> function =
-        Handle<JSFunction>::cast(GetProperty(isolate->global(), key));
+        Handle<JSFunction>::cast(GetProperty(isolate()->global(), key));
     Handle<JSObject> proto =
         Handle<JSObject>(JSObject::cast(function->instance_prototype()));
 
@@ -1573,7 +1595,7 @@
 
     // Add initial map.
     Handle<Map> initial_map =
-        factory->NewMap(JS_ARRAY_TYPE, JSRegExpResult::kSize);
+        factory()->NewMap(JS_ARRAY_TYPE, JSRegExpResult::kSize);
     initial_map->set_constructor(*array_constructor);
 
     // Set prototype on map.
@@ -1587,13 +1609,13 @@
     ASSERT_EQ(1, array_descriptors->number_of_descriptors());
 
     Handle<DescriptorArray> reresult_descriptors =
-        factory->NewDescriptorArray(3);
+        factory()->NewDescriptorArray(3);
 
     reresult_descriptors->CopyFrom(0, *array_descriptors, 0);
 
     int enum_index = 0;
     {
-      FieldDescriptor index_field(heap->index_symbol(),
+      FieldDescriptor index_field(heap()->index_symbol(),
                                   JSRegExpResult::kIndexIndex,
                                   NONE,
                                   enum_index++);
@@ -1601,7 +1623,7 @@
     }
 
     {
-      FieldDescriptor input_field(heap->input_symbol(),
+      FieldDescriptor input_field(heap()->input_symbol(),
                                   JSRegExpResult::kInputIndex,
                                   NONE,
                                   enum_index++);
@@ -1626,10 +1648,27 @@
 }
 
 
+bool Genesis::InstallExperimentalNatives() {
+  for (int i = ExperimentalNatives::GetDebuggerCount();
+       i < ExperimentalNatives::GetBuiltinsCount();
+       i++) {
+    if (FLAG_harmony_proxies &&
+        strcmp(ExperimentalNatives::GetScriptName(i).start(),
+               "native proxy.js") == 0) {
+      if (!CompileExperimentalBuiltin(isolate(), i)) return false;
+    }
+  }
+
+  InstallExperimentalNativeFunctions();
+
+  return true;
+}
+
+
 static Handle<JSObject> ResolveBuiltinIdHolder(
     Handle<Context> global_context,
     const char* holder_expr) {
-  Factory* factory = Isolate::Current()->factory();
+  Factory* factory = global_context->GetIsolate()->factory();
   Handle<GlobalObject> global(global_context->global());
   const char* period_pos = strchr(holder_expr, '.');
   if (period_pos == NULL) {
@@ -1648,7 +1687,8 @@
 static void InstallBuiltinFunctionId(Handle<JSObject> holder,
                                      const char* function_name,
                                      BuiltinFunctionId id) {
-  Handle<String> name = FACTORY->LookupAsciiSymbol(function_name);
+  Factory* factory = holder->GetIsolate()->factory();
+  Handle<String> name = factory->LookupAsciiSymbol(function_name);
   Object* function_object = holder->GetProperty(*name)->ToObjectUnchecked();
   Handle<JSFunction> function(JSFunction::cast(function_object));
   function->shared()->set_function_data(Smi::FromInt(id));
@@ -1675,13 +1715,14 @@
   F(16, global_context()->regexp_function())
 
 
-static FixedArray* CreateCache(int size, JSFunction* factory) {
+static FixedArray* CreateCache(int size, Handle<JSFunction> factory_function) {
+  Factory* factory = factory_function->GetIsolate()->factory();
   // Caches are supposed to live for a long time, allocate in old space.
   int array_size = JSFunctionResultCache::kEntriesIndex + 2 * size;
   // Cannot use cast as object is not fully initialized yet.
   JSFunctionResultCache* cache = reinterpret_cast<JSFunctionResultCache*>(
-      *FACTORY->NewFixedArrayWithHoles(array_size, TENURED));
-  cache->set(JSFunctionResultCache::kFactoryIndex, factory);
+      *factory->NewFixedArrayWithHoles(array_size, TENURED));
+  cache->set(JSFunctionResultCache::kFactoryIndex, *factory_function);
   cache->MakeZeroSize();
   return cache;
 }
@@ -1698,9 +1739,9 @@
 
   int index = 0;
 
-#define F(size, func) do {                           \
-    FixedArray* cache = CreateCache((size), (func)); \
-    caches->set(index++, cache);                     \
+#define F(size, func) do {                                              \
+    FixedArray* cache = CreateCache((size), Handle<JSFunction>(func));  \
+    caches->set(index++, cache);                                        \
   } while (false)
 
   JSFUNCTION_RESULT_CACHE_LIST(F);
@@ -1720,7 +1761,7 @@
 
 bool Bootstrapper::InstallExtensions(Handle<Context> global_context,
                                      v8::ExtensionConfiguration* extensions) {
-  Isolate* isolate = Isolate::Current();
+  Isolate* isolate = global_context->GetIsolate();
   BootstrapperActive active;
   SaveContext saved_context(isolate);
   isolate->set_context(*global_context);
@@ -1731,7 +1772,7 @@
 
 
 void Genesis::InstallSpecialObjects(Handle<Context> global_context) {
-  Factory* factory = Isolate::Current()->factory();
+  Factory* factory = global_context->GetIsolate()->factory();
   HandleScope scope;
   Handle<JSGlobalObject> js_global(
       JSGlobalObject::cast(global_context->global()));
@@ -1867,9 +1908,10 @@
 
 bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
   HandleScope scope;
+  Factory* factory = builtins->GetIsolate()->factory();
   for (int i = 0; i < Builtins::NumberOfJavaScriptBuiltins(); i++) {
     Builtins::JavaScript id = static_cast<Builtins::JavaScript>(i);
-    Handle<String> name = FACTORY->LookupAsciiSymbol(Builtins::GetName(id));
+    Handle<String> name = factory->LookupAsciiSymbol(Builtins::GetName(id));
     Object* function_object = builtins->GetPropertyNoExceptionThrown(*name);
     Handle<JSFunction> function
         = Handle<JSFunction>(JSFunction::cast(function_object));
@@ -1918,13 +1960,12 @@
   ASSERT(object->IsInstanceOf(
       FunctionTemplateInfo::cast(object_template->constructor())));
 
-  Isolate* isolate = Isolate::Current();
   bool pending_exception = false;
   Handle<JSObject> obj =
       Execution::InstantiateObject(object_template, &pending_exception);
   if (pending_exception) {
-    ASSERT(isolate->has_pending_exception());
-    isolate->clear_pending_exception();
+    ASSERT(isolate()->has_pending_exception());
+    isolate()->clear_pending_exception();
     return false;
   }
   TransferObject(obj, object);
@@ -1979,8 +2020,9 @@
           break;
         case NORMAL:
           // Do not occur since the from object has fast properties.
+        case HANDLER:
         case INTERCEPTOR:
-          // No element in instance descriptors have interceptor type.
+          // No element in instance descriptors have proxy or interceptor type.
           UNREACHABLE();
           break;
       }
@@ -2023,6 +2065,7 @@
 
 void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
   HandleScope outer;
+  Factory* factory = from->GetIsolate()->factory();
 
   ASSERT(!from->IsJSArray());
   ASSERT(!to->IsJSArray());
@@ -2032,7 +2075,7 @@
 
   // Transfer the prototype (new map is needed).
   Handle<Map> old_to_map = Handle<Map>(to->map());
-  Handle<Map> new_to_map = FACTORY->CopyMapDropTransitions(old_to_map);
+  Handle<Map> new_to_map = factory->CopyMapDropTransitions(old_to_map);
   new_to_map->set_prototype(from->map()->prototype());
   to->set_map(*new_to_map);
 }
@@ -2053,10 +2096,10 @@
 }
 
 
-Genesis::Genesis(Handle<Object> global_object,
+Genesis::Genesis(Isolate* isolate,
+                 Handle<Object> global_object,
                  v8::Handle<v8::ObjectTemplate> global_template,
-                 v8::ExtensionConfiguration* extensions) {
-  Isolate* isolate = Isolate::Current();
+                 v8::ExtensionConfiguration* extensions) : isolate_(isolate) {
   result_ = Handle<Context>::null();
   // If V8 isn't running and cannot be initialized, just return.
   if (!V8::IsRunning() && !V8::Initialize(NULL)) return;
@@ -2086,7 +2129,7 @@
   } else {
     // We get here if there was no context snapshot.
     CreateRoots();
-    Handle<JSFunction> empty_function = CreateEmptyFunction();
+    Handle<JSFunction> empty_function = CreateEmptyFunction(isolate);
     CreateStrictModeFunctionMaps(empty_function);
     Handle<GlobalObject> inner_global;
     Handle<JSGlobalProxy> global_proxy =
@@ -2103,6 +2146,9 @@
     isolate->counters()->contexts_created_from_scratch()->Increment();
   }
 
+  // Install experimental natives.
+  if (!InstallExperimentalNatives()) return;
+
   result_ = global_context_;
 }
 
diff --git a/src/bootstrapper.h b/src/bootstrapper.h
index 3e158d6..2e05452 100644
--- a/src/bootstrapper.h
+++ b/src/bootstrapper.h
@@ -29,6 +29,8 @@
 #ifndef V8_BOOTSTRAPPER_H_
 #define V8_BOOTSTRAPPER_H_
 
+#include "allocation.h"
+
 namespace v8 {
 namespace internal {
 
@@ -93,6 +95,7 @@
   // Creates a JavaScript Global Context with initial object graph.
   // The returned value is a global handle casted to V8Environment*.
   Handle<Context> CreateEnvironment(
+      Isolate* isolate,
       Handle<Object> global_object,
       v8::Handle<v8::ObjectTemplate> global_template,
       v8::ExtensionConfiguration* extensions);
@@ -113,7 +116,7 @@
   bool IsActive() const { return nesting_ != 0; }
 
   // Support for thread preemption.
-  RLYSTC int ArchiveSpacePerThread();
+  static int ArchiveSpacePerThread();
   char* ArchiveState(char* to);
   char* RestoreState(char* from);
   void FreeThreadResources();
diff --git a/src/builtins.cc b/src/builtins.cc
index 1846590..c34b074 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -982,34 +982,12 @@
 // Strict mode poison pills
 
 
-BUILTIN(StrictArgumentsCallee) {
+BUILTIN(StrictModePoisonPill) {
   HandleScope scope;
   return isolate->Throw(*isolate->factory()->NewTypeError(
-      "strict_arguments_callee", HandleVector<Object>(NULL, 0)));
+      "strict_poison_pill", HandleVector<Object>(NULL, 0)));
 }
 
-
-BUILTIN(StrictArgumentsCaller) {
-  HandleScope scope;
-  return isolate->Throw(*isolate->factory()->NewTypeError(
-      "strict_arguments_caller", HandleVector<Object>(NULL, 0)));
-}
-
-
-BUILTIN(StrictFunctionCaller) {
-  HandleScope scope;
-  return isolate->Throw(*isolate->factory()->NewTypeError(
-      "strict_function_caller", HandleVector<Object>(NULL, 0)));
-}
-
-
-BUILTIN(StrictFunctionArguments) {
-  HandleScope scope;
-  return isolate->Throw(*isolate->factory()->NewTypeError(
-      "strict_function_arguments", HandleVector<Object>(NULL, 0)));
-}
-
-
 // -----------------------------------------------------------------------------
 //
 
@@ -1025,6 +1003,8 @@
                                 Object** argv,
                                 FunctionTemplateInfo* info) {
   Object* recv = argv[0];
+  // API calls are only supported with JSObject receivers.
+  if (!recv->IsJSObject()) return heap->null_value();
   Object* sig_obj = info->signature();
   if (sig_obj->IsUndefined()) return recv;
   SignatureInfo* sig = SignatureInfo::cast(sig_obj);
@@ -1338,8 +1318,18 @@
 }
 
 
+static void Generate_KeyedLoadIC_Slow(MacroAssembler* masm) {
+  KeyedLoadIC::GenerateRuntimeGetProperty(masm);
+}
+
+
 static void Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
-  KeyedLoadIC::GenerateMiss(masm);
+  KeyedLoadIC::GenerateMiss(masm, false);
+}
+
+
+static void Generate_KeyedLoadIC_MissForceGeneric(MacroAssembler* masm) {
+  KeyedLoadIC::GenerateMiss(masm, true);
 }
 
 
@@ -1428,7 +1418,17 @@
 
 
 static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
-  KeyedStoreIC::GenerateMiss(masm);
+  KeyedStoreIC::GenerateMiss(masm, false);
+}
+
+
+static void Generate_KeyedStoreIC_MissForceGeneric(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateMiss(masm, true);
+}
+
+
+static void Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateSlow(masm);
 }
 
 
diff --git a/src/builtins.h b/src/builtins.h
index bc0facb..eca998c 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -60,122 +60,125 @@
   V(HandleApiCallAsFunction, NO_EXTRA_ARGUMENTS)                    \
   V(HandleApiCallAsConstructor, NO_EXTRA_ARGUMENTS)                 \
                                                                     \
-  V(StrictArgumentsCallee, NO_EXTRA_ARGUMENTS)                      \
-  V(StrictArgumentsCaller, NO_EXTRA_ARGUMENTS)                      \
-  V(StrictFunctionCaller, NO_EXTRA_ARGUMENTS)                       \
-  V(StrictFunctionArguments, NO_EXTRA_ARGUMENTS)
-
+  V(StrictModePoisonPill, NO_EXTRA_ARGUMENTS)
 
 // Define list of builtins implemented in assembly.
-#define BUILTIN_LIST_A(V)                                           \
-  V(ArgumentsAdaptorTrampoline,     BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(JSConstructCall,                BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(JSConstructStubCountdown,       BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(JSConstructStubGeneric,         BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(JSConstructStubApi,             BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(JSEntryTrampoline,              BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(JSConstructEntryTrampoline,     BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(LazyCompile,                    BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(LazyRecompile,                  BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(NotifyDeoptimized,              BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(NotifyLazyDeoptimized,          BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(NotifyOSR,                      BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-                                                                    \
-  V(LoadIC_Miss,                    BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(KeyedLoadIC_Miss,               BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(StoreIC_Miss,                   BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(KeyedStoreIC_Miss,              BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-                                                                    \
-  V(LoadIC_Initialize,              LOAD_IC, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(LoadIC_PreMonomorphic,          LOAD_IC, PREMONOMORPHIC,        \
-                                    Code::kNoExtraICState)          \
-  V(LoadIC_Normal,                  LOAD_IC, MONOMORPHIC,           \
-                                    Code::kNoExtraICState)          \
-  V(LoadIC_ArrayLength,             LOAD_IC, MONOMORPHIC,           \
-                                    Code::kNoExtraICState)          \
-  V(LoadIC_StringLength,            LOAD_IC, MONOMORPHIC,           \
-                                    Code::kNoExtraICState)          \
-  V(LoadIC_StringWrapperLength,     LOAD_IC, MONOMORPHIC,           \
-                                    Code::kNoExtraICState)          \
-  V(LoadIC_FunctionPrototype,       LOAD_IC, MONOMORPHIC,           \
-                                    Code::kNoExtraICState)          \
-  V(LoadIC_Megamorphic,             LOAD_IC, MEGAMORPHIC,           \
-                                    Code::kNoExtraICState)          \
-                                                                    \
-  V(KeyedLoadIC_Initialize,         KEYED_LOAD_IC, UNINITIALIZED,   \
-                                    Code::kNoExtraICState)          \
-  V(KeyedLoadIC_PreMonomorphic,     KEYED_LOAD_IC, PREMONOMORPHIC,  \
-                                    Code::kNoExtraICState)          \
-  V(KeyedLoadIC_Generic,            KEYED_LOAD_IC, MEGAMORPHIC,     \
-                                    Code::kNoExtraICState)          \
-  V(KeyedLoadIC_String,             KEYED_LOAD_IC, MEGAMORPHIC,     \
-                                    Code::kNoExtraICState)          \
-  V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC,     \
-                                    Code::kNoExtraICState)          \
-                                                                    \
-  V(StoreIC_Initialize,             STORE_IC, UNINITIALIZED,        \
-                                    Code::kNoExtraICState)          \
-  V(StoreIC_ArrayLength,            STORE_IC, MONOMORPHIC,          \
-                                    Code::kNoExtraICState)          \
-  V(StoreIC_Normal,                 STORE_IC, MONOMORPHIC,          \
-                                    Code::kNoExtraICState)          \
-  V(StoreIC_Megamorphic,            STORE_IC, MEGAMORPHIC,          \
-                                    Code::kNoExtraICState)          \
-  V(StoreIC_GlobalProxy,            STORE_IC, MEGAMORPHIC,          \
-                                    Code::kNoExtraICState)          \
-  V(StoreIC_Initialize_Strict,      STORE_IC, UNINITIALIZED,        \
-                                    kStrictMode)                    \
-  V(StoreIC_ArrayLength_Strict,     STORE_IC, MONOMORPHIC,          \
-                                    kStrictMode)                    \
-  V(StoreIC_Normal_Strict,          STORE_IC, MONOMORPHIC,          \
-                                    kStrictMode)                    \
-  V(StoreIC_Megamorphic_Strict,     STORE_IC, MEGAMORPHIC,          \
-                                    kStrictMode)                    \
-  V(StoreIC_GlobalProxy_Strict,     STORE_IC, MEGAMORPHIC,          \
-                                    kStrictMode)                    \
-                                                                    \
-  V(KeyedStoreIC_Initialize,        KEYED_STORE_IC, UNINITIALIZED,  \
-                                    Code::kNoExtraICState)          \
-  V(KeyedStoreIC_Generic,           KEYED_STORE_IC, MEGAMORPHIC,    \
-                                    Code::kNoExtraICState)          \
-                                                                    \
-  V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED,  \
-                                    kStrictMode)                    \
-  V(KeyedStoreIC_Generic_Strict,    KEYED_STORE_IC, MEGAMORPHIC,    \
-                                    kStrictMode)                    \
-                                                                    \
-  /* Uses KeyedLoadIC_Initialize; must be after in list. */         \
-  V(FunctionCall,                   BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(FunctionApply,                  BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-                                                                    \
-  V(ArrayCode,                      BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-  V(ArrayConstructCode,             BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-                                                                    \
-  V(StringConstructCode,            BUILTIN, UNINITIALIZED,         \
-                                    Code::kNoExtraICState)          \
-                                                                    \
-  V(OnStackReplacement,             BUILTIN, UNINITIALIZED,         \
+#define BUILTIN_LIST_A(V)                                               \
+  V(ArgumentsAdaptorTrampoline,     BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(JSConstructCall,                BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(JSConstructStubCountdown,       BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(JSConstructStubGeneric,         BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(JSConstructStubApi,             BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(JSEntryTrampoline,              BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(JSConstructEntryTrampoline,     BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(LazyCompile,                    BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(LazyRecompile,                  BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(NotifyDeoptimized,              BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(NotifyLazyDeoptimized,          BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(NotifyOSR,                      BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+                                                                        \
+  V(LoadIC_Miss,                    BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(KeyedLoadIC_Miss,               BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(KeyedLoadIC_MissForceGeneric,   BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(KeyedLoadIC_Slow,               BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(StoreIC_Miss,                   BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(KeyedStoreIC_Miss,              BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(KeyedStoreIC_MissForceGeneric,  BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(KeyedStoreIC_Slow,              BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(LoadIC_Initialize,              LOAD_IC, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(LoadIC_PreMonomorphic,          LOAD_IC, PREMONOMORPHIC,            \
+                                    Code::kNoExtraICState)              \
+  V(LoadIC_Normal,                  LOAD_IC, MONOMORPHIC,               \
+                                    Code::kNoExtraICState)              \
+  V(LoadIC_ArrayLength,             LOAD_IC, MONOMORPHIC,               \
+                                    Code::kNoExtraICState)              \
+  V(LoadIC_StringLength,            LOAD_IC, MONOMORPHIC,               \
+                                    Code::kNoExtraICState)              \
+  V(LoadIC_StringWrapperLength,     LOAD_IC, MONOMORPHIC,               \
+                                    Code::kNoExtraICState)              \
+  V(LoadIC_FunctionPrototype,       LOAD_IC, MONOMORPHIC,               \
+                                    Code::kNoExtraICState)              \
+  V(LoadIC_Megamorphic,             LOAD_IC, MEGAMORPHIC,               \
+                                    Code::kNoExtraICState)              \
+                                                                        \
+  V(KeyedLoadIC_Initialize,         KEYED_LOAD_IC, UNINITIALIZED,       \
+                                    Code::kNoExtraICState)              \
+  V(KeyedLoadIC_PreMonomorphic,     KEYED_LOAD_IC, PREMONOMORPHIC,      \
+                                    Code::kNoExtraICState)              \
+  V(KeyedLoadIC_Generic,            KEYED_LOAD_IC, MEGAMORPHIC,         \
+                                    Code::kNoExtraICState)              \
+  V(KeyedLoadIC_String,             KEYED_LOAD_IC, MEGAMORPHIC,         \
+                                    Code::kNoExtraICState)              \
+  V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC,         \
+                                    Code::kNoExtraICState)              \
+                                                                        \
+  V(StoreIC_Initialize,             STORE_IC, UNINITIALIZED,            \
+                                    Code::kNoExtraICState)              \
+  V(StoreIC_ArrayLength,            STORE_IC, MONOMORPHIC,              \
+                                    Code::kNoExtraICState)              \
+  V(StoreIC_Normal,                 STORE_IC, MONOMORPHIC,              \
+                                    Code::kNoExtraICState)              \
+  V(StoreIC_Megamorphic,            STORE_IC, MEGAMORPHIC,              \
+                                    Code::kNoExtraICState)              \
+  V(StoreIC_GlobalProxy,            STORE_IC, MEGAMORPHIC,              \
+                                    Code::kNoExtraICState)              \
+  V(StoreIC_Initialize_Strict,      STORE_IC, UNINITIALIZED,            \
+                                    kStrictMode)                        \
+  V(StoreIC_ArrayLength_Strict,     STORE_IC, MONOMORPHIC,              \
+                                    kStrictMode)                        \
+  V(StoreIC_Normal_Strict,          STORE_IC, MONOMORPHIC,              \
+                                    kStrictMode)                        \
+  V(StoreIC_Megamorphic_Strict,     STORE_IC, MEGAMORPHIC,              \
+                                    kStrictMode)                        \
+  V(StoreIC_GlobalProxy_Strict,     STORE_IC, MEGAMORPHIC,              \
+                                    kStrictMode)                        \
+                                                                        \
+  V(KeyedStoreIC_Initialize,        KEYED_STORE_IC, UNINITIALIZED,      \
+                                    Code::kNoExtraICState)              \
+  V(KeyedStoreIC_Generic,           KEYED_STORE_IC, MEGAMORPHIC,        \
+                                    Code::kNoExtraICState)              \
+                                                                        \
+  V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED,      \
+                                    kStrictMode)                        \
+  V(KeyedStoreIC_Generic_Strict,    KEYED_STORE_IC, MEGAMORPHIC,        \
+                                    kStrictMode)                        \
+                                                                        \
+  /* Uses KeyedLoadIC_Initialize; must be after in list. */             \
+  V(FunctionCall,                   BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(FunctionApply,                  BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+                                                                        \
+  V(ArrayCode,                      BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+  V(ArrayConstructCode,             BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+                                                                        \
+  V(StringConstructCode,            BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
+                                                                        \
+  V(OnStackReplacement,             BUILTIN, UNINITIALIZED,             \
                                     Code::kNoExtraICState)
 
 
diff --git a/src/char-predicates.h b/src/char-predicates.h
index dac1eb8..5a901a2 100644
--- a/src/char-predicates.h
+++ b/src/char-predicates.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,8 @@
 #ifndef V8_CHAR_PREDICATES_H_
 #define V8_CHAR_PREDICATES_H_
 
+#include "unicode.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index f680c60..d12def8 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,6 +29,7 @@
 
 #include "bootstrapper.h"
 #include "code-stubs.h"
+#include "stub-cache.h"
 #include "factory.h"
 #include "gdb-jit.h"
 #include "macro-assembler.h"
@@ -197,6 +198,12 @@
     case CompareIC::HEAP_NUMBERS:
       GenerateHeapNumbers(masm);
       break;
+    case CompareIC::STRINGS:
+      GenerateStrings(masm);
+      break;
+    case CompareIC::SYMBOLS:
+      GenerateSymbols(masm);
+      break;
     case CompareIC::OBJECTS:
       GenerateObjects(masm);
       break;
@@ -237,4 +244,24 @@
 }
 
 
+void KeyedLoadFastElementStub::Generate(MacroAssembler* masm) {
+  KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
+}
+
+
+void KeyedStoreFastElementStub::Generate(MacroAssembler* masm) {
+  KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_);
+}
+
+
+void KeyedLoadExternalArrayStub::Generate(MacroAssembler* masm) {
+  KeyedLoadStubCompiler::GenerateLoadExternalArray(masm, array_type_);
+}
+
+
+void KeyedStoreExternalArrayStub::Generate(MacroAssembler* masm) {
+  KeyedStoreStubCompiler::GenerateStoreExternalArray(masm, array_type_);
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 56ef072..7ab0b7c 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -28,6 +28,7 @@
 #ifndef V8_CODE_STUBS_H_
 #define V8_CODE_STUBS_H_
 
+#include "allocation.h"
 #include "globals.h"
 
 namespace v8 {
@@ -37,7 +38,8 @@
 // as only the stubs up to and including Instanceof allows nested stub calls.
 #define CODE_STUB_LIST_ALL_PLATFORMS(V)  \
   V(CallFunction)                        \
-  V(TypeRecordingBinaryOp)               \
+  V(UnaryOp)                             \
+  V(BinaryOp)                            \
   V(StringAdd)                           \
   V(SubString)                           \
   V(StringCompare)                       \
@@ -53,7 +55,6 @@
   V(FastNewClosure)                      \
   V(FastNewContext)                      \
   V(FastCloneShallowArray)               \
-  V(GenericUnaryOp)                      \
   V(RevertToNumber)                      \
   V(ToBoolean)                           \
   V(ToNumber)                            \
@@ -64,7 +65,12 @@
   V(NumberToString)                      \
   V(CEntry)                              \
   V(JSEntry)                             \
-  V(DebuggerStatement)
+  V(KeyedLoadFastElement)                \
+  V(KeyedStoreFastElement)               \
+  V(KeyedLoadExternalArray)              \
+  V(KeyedStoreExternalArray)             \
+  V(DebuggerStatement)                   \
+  V(StringDictionaryNegativeLookup)
 
 // List of code stubs only used on ARM platforms.
 #ifdef V8_TARGET_ARCH_ARM
@@ -81,7 +87,8 @@
 // List of code stubs only used on MIPS platforms.
 #ifdef V8_TARGET_ARCH_MIPS
 #define CODE_STUB_LIST_MIPS(V)  \
-  V(RegExpCEntry)
+  V(RegExpCEntry)               \
+  V(DirectCEntry)
 #else
 #define CODE_STUB_LIST_MIPS(V)
 #endif
@@ -162,10 +169,10 @@
   // lazily generated function should be fully optimized or not.
   virtual InLoopFlag InLoop() { return NOT_IN_LOOP; }
 
-  // TypeRecordingBinaryOpStub needs to override this.
+  // BinaryOpStub needs to override this.
   virtual int GetCodeKind();
 
-  // TypeRecordingBinaryOpStub needs to override this.
+  // BinaryOpStub needs to override this.
   virtual InlineCacheState GetICState() {
     return UNINITIALIZED;
   }
@@ -388,54 +395,6 @@
 };
 
 
-enum NegativeZeroHandling {
-  kStrictNegativeZero,
-  kIgnoreNegativeZero
-};
-
-
-enum UnaryOpFlags {
-  NO_UNARY_FLAGS = 0,
-  NO_UNARY_SMI_CODE_IN_STUB = 1 << 0
-};
-
-
-class GenericUnaryOpStub : public CodeStub {
- public:
-  GenericUnaryOpStub(Token::Value op,
-                     UnaryOverwriteMode overwrite,
-                     UnaryOpFlags flags,
-                     NegativeZeroHandling negative_zero = kStrictNegativeZero)
-      : op_(op),
-        overwrite_(overwrite),
-        include_smi_code_((flags & NO_UNARY_SMI_CODE_IN_STUB) == 0),
-        negative_zero_(negative_zero) { }
-
- private:
-  Token::Value op_;
-  UnaryOverwriteMode overwrite_;
-  bool include_smi_code_;
-  NegativeZeroHandling negative_zero_;
-
-  class OverwriteField: public BitField<UnaryOverwriteMode, 0, 1> {};
-  class IncludeSmiCodeField: public BitField<bool, 1, 1> {};
-  class NegativeZeroField: public BitField<NegativeZeroHandling, 2, 1> {};
-  class OpField: public BitField<Token::Value, 3, kMinorBits - 3> {};
-
-  Major MajorKey() { return GenericUnaryOp; }
-  int MinorKey() {
-    return OpField::encode(op_) |
-        OverwriteField::encode(overwrite_) |
-        IncludeSmiCodeField::encode(include_smi_code_) |
-        NegativeZeroField::encode(negative_zero_);
-  }
-
-  void Generate(MacroAssembler* masm);
-
-  const char* GetName();
-};
-
-
 class MathPowStub: public CodeStub {
  public:
   MathPowStub() {}
@@ -471,6 +430,8 @@
 
   void GenerateSmis(MacroAssembler* masm);
   void GenerateHeapNumbers(MacroAssembler* masm);
+  void GenerateSymbols(MacroAssembler* masm);
+  void GenerateStrings(MacroAssembler* masm);
   void GenerateObjects(MacroAssembler* masm);
   void GenerateMiss(MacroAssembler* masm);
 
@@ -784,8 +745,9 @@
   }
 
   InLoopFlag InLoop() { return in_loop_; }
-  bool ReceiverMightBeValue() {
-    return (flags_ & RECEIVER_MIGHT_BE_VALUE) != 0;
+
+  bool ReceiverMightBeImplicit() {
+    return (flags_ & RECEIVER_MIGHT_BE_IMPLICIT) != 0;
   }
 };
 
@@ -964,6 +926,86 @@
   DISALLOW_COPY_AND_ASSIGN(AllowStubCallsScope);
 };
 
+#ifdef DEBUG
+#define DECLARE_ARRAY_STUB_PRINT(name) void Print() { PrintF(#name); }
+#else
+#define DECLARE_ARRAY_STUB_PRINT(name)
+#endif
+
+
+class KeyedLoadFastElementStub : public CodeStub {
+ public:
+  explicit KeyedLoadFastElementStub() {
+  }
+
+  Major MajorKey() { return KeyedLoadFastElement; }
+  int MinorKey() { return 0; }
+
+  void Generate(MacroAssembler* masm);
+
+  const char* GetName() { return "KeyedLoadFastElementStub"; }
+
+  DECLARE_ARRAY_STUB_PRINT(KeyedLoadFastElementStub)
+};
+
+
+class KeyedStoreFastElementStub : public CodeStub {
+ public:
+  explicit KeyedStoreFastElementStub(bool is_js_array)
+      : is_js_array_(is_js_array) { }
+
+  Major MajorKey() { return KeyedStoreFastElement; }
+  int MinorKey() { return is_js_array_ ? 1 : 0; }
+
+  void Generate(MacroAssembler* masm);
+
+  const char* GetName() { return "KeyedStoreFastElementStub"; }
+
+  DECLARE_ARRAY_STUB_PRINT(KeyedStoreFastElementStub)
+
+ private:
+  bool is_js_array_;
+};
+
+
+class KeyedLoadExternalArrayStub : public CodeStub {
+ public:
+  explicit KeyedLoadExternalArrayStub(ExternalArrayType array_type)
+      : array_type_(array_type) { }
+
+  Major MajorKey() { return KeyedLoadExternalArray; }
+  int MinorKey() { return array_type_; }
+
+  void Generate(MacroAssembler* masm);
+
+  const char* GetName() { return "KeyedLoadExternalArrayStub"; }
+
+  DECLARE_ARRAY_STUB_PRINT(KeyedLoadExternalArrayStub)
+
+ protected:
+  ExternalArrayType array_type_;
+};
+
+
+class KeyedStoreExternalArrayStub : public CodeStub {
+ public:
+  explicit KeyedStoreExternalArrayStub(ExternalArrayType array_type)
+      : array_type_(array_type) { }
+
+  Major MajorKey() { return KeyedStoreExternalArray; }
+  int MinorKey() { return array_type_; }
+
+  void Generate(MacroAssembler* masm);
+
+  const char* GetName() { return "KeyedStoreExternalArrayStub"; }
+
+  DECLARE_ARRAY_STUB_PRINT(KeyedStoreExternalArrayStub)
+
+ protected:
+  ExternalArrayType array_type_;
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_CODE_STUBS_H_
diff --git a/src/code.h b/src/code.h
index 072344b..766c932 100644
--- a/src/code.h
+++ b/src/code.h
@@ -28,6 +28,8 @@
 #ifndef V8_CODE_H_
 #define V8_CODE_H_
 
+#include "allocation.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/codegen.cc b/src/codegen.cc
index 4bbe6ae..ad3cf1b 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -34,7 +34,6 @@
 #include "prettyprinter.h"
 #include "rewriter.h"
 #include "runtime.h"
-#include "scopeinfo.h"
 #include "stub-cache.h"
 
 namespace v8 {
@@ -176,7 +175,10 @@
 
 bool CodeGenerator::ShouldGenerateLog(Expression* type) {
   ASSERT(type != NULL);
-  if (!LOGGER->is_logging() && !CpuProfiler::is_profiling()) return false;
+  Isolate* isolate = Isolate::Current();
+  if (!isolate->logger()->is_logging() && !CpuProfiler::is_profiling(isolate)) {
+    return false;
+  }
   Handle<String> name = Handle<String>::cast(type->AsLiteral()->handle());
   if (FLAG_log_regexp) {
     if (name->IsEqualTo(kRegexp))
@@ -202,29 +204,6 @@
 }
 
 
-const char* GenericUnaryOpStub::GetName() {
-  switch (op_) {
-    case Token::SUB:
-      if (negative_zero_ == kStrictNegativeZero) {
-        return overwrite_ == UNARY_OVERWRITE
-            ? "GenericUnaryOpStub_SUB_Overwrite_Strict0"
-            : "GenericUnaryOpStub_SUB_Alloc_Strict0";
-      } else {
-        return overwrite_ == UNARY_OVERWRITE
-            ? "GenericUnaryOpStub_SUB_Overwrite_Ignore0"
-            : "GenericUnaryOpStub_SUB_Alloc_Ignore0";
-      }
-    case Token::BIT_NOT:
-      return overwrite_ == UNARY_OVERWRITE
-          ? "GenericUnaryOpStub_BIT_NOT_Overwrite"
-          : "GenericUnaryOpStub_BIT_NOT_Alloc";
-    default:
-      UNREACHABLE();
-      return "<unknown>";
-  }
-}
-
-
 void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
   switch (type_) {
     case READ_ELEMENT:
diff --git a/src/compiler.cc b/src/compiler.cc
index 6a9bc27..d82bcd0 100755
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -32,7 +32,6 @@
 #include "bootstrapper.h"
 #include "codegen.h"
 #include "compilation-cache.h"
-#include "data-flow.h"
 #include "debug.h"
 #include "full-codegen.h"
 #include "gdb-jit.h"
@@ -95,22 +94,23 @@
 }
 
 
+// Disable optimization for the rest of the compilation pipeline.
 void CompilationInfo::DisableOptimization() {
-  if (FLAG_optimize_closures) {
-    // If we allow closures optimizations and it's an optimizable closure
-    // mark it correspondingly.
-    bool is_closure = closure_.is_null() && !scope_->HasTrivialOuterContext();
-    if (is_closure) {
-      bool is_optimizable_closure =
-          !scope_->outer_scope_calls_eval() && !scope_->inside_with();
-      if (is_optimizable_closure) {
-        SetMode(BASE);
-        return;
-      }
-    }
-  }
+  bool is_optimizable_closure =
+    FLAG_optimize_closures &&
+    closure_.is_null() &&
+    !scope_->HasTrivialOuterContext() &&
+    !scope_->outer_scope_calls_non_strict_eval() &&
+    !scope_->inside_with();
+  SetMode(is_optimizable_closure ? BASE : NONOPT);
+}
 
-  SetMode(NONOPT);
+
+void CompilationInfo::AbortOptimization() {
+  Handle<Code> code(shared_info()->code());
+  SetCode(code);
+  Isolate* isolate = code->GetIsolate();
+  isolate->compilation_cache()->MarkForLazyOptimizing(closure());
 }
 
 
@@ -122,20 +122,23 @@
 // all. However crankshaft support recompilation of functions, so in this case
 // the full compiler need not be be used if a debugger is attached, but only if
 // break points has actually been set.
-static bool AlwaysFullCompiler() {
+static bool is_debugging_active() {
 #ifdef ENABLE_DEBUGGER_SUPPORT
   Isolate* isolate = Isolate::Current();
-  if (V8::UseCrankshaft()) {
-    return FLAG_always_full_compiler || isolate->debug()->has_break_points();
-  } else {
-    return FLAG_always_full_compiler || isolate->debugger()->IsDebuggerActive();
-  }
+  return V8::UseCrankshaft() ?
+    isolate->debug()->has_break_points() :
+    isolate->debugger()->IsDebuggerActive();
 #else
-  return FLAG_always_full_compiler;
+  return false;
 #endif
 }
 
 
+static bool AlwaysFullCompiler() {
+  return FLAG_always_full_compiler || is_debugging_active();
+}
+
+
 static void FinishOptimization(Handle<JSFunction> function, int64_t start) {
   int opt_count = function->shared()->opt_count();
   function->shared()->set_opt_count(opt_count + 1);
@@ -162,31 +165,6 @@
 }
 
 
-static void AbortAndDisable(CompilationInfo* info) {
-  // Disable optimization for the shared function info and mark the
-  // code as non-optimizable. The marker on the shared function info
-  // is there because we flush non-optimized code thereby loosing the
-  // non-optimizable information for the code. When the code is
-  // regenerated and set on the shared function info it is marked as
-  // non-optimizable if optimization is disabled for the shared
-  // function info.
-  Handle<SharedFunctionInfo> shared = info->shared_info();
-  shared->set_optimization_disabled(true);
-  Handle<Code> code = Handle<Code>(shared->code());
-  ASSERT(code->kind() == Code::FUNCTION);
-  code->set_optimizable(false);
-  info->SetCode(code);
-  Isolate* isolate = code->GetIsolate();
-  isolate->compilation_cache()->MarkForLazyOptimizing(info->closure());
-  if (FLAG_trace_opt) {
-    PrintF("[disabled optimization for: ");
-    info->closure()->PrintName();
-    PrintF(" / %" V8PRIxPTR "]\n",
-           reinterpret_cast<intptr_t>(*info->closure()));
-  }
-}
-
-
 static bool MakeCrankshaftCode(CompilationInfo* info) {
   // Test if we can optimize this function when asked to. We can only
   // do this after the scopes are computed.
@@ -220,7 +198,9 @@
   const int kMaxOptCount =
       FLAG_deopt_every_n_times == 0 ? Compiler::kDefaultMaxOptCount : 1000;
   if (info->shared_info()->opt_count() > kMaxOptCount) {
-    AbortAndDisable(info);
+    info->AbortOptimization();
+    Handle<JSFunction> closure = info->closure();
+    info->shared_info()->DisableOptimization(*closure);
     // True indicates the compilation pipeline is still going, not
     // necessarily that we optimized the code.
     return true;
@@ -239,7 +219,9 @@
   if ((scope->num_parameters() + 1) > parameter_limit ||
       (info->osr_ast_id() != AstNode::kNoNumber &&
        scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit)) {
-    AbortAndDisable(info);
+    info->AbortOptimization();
+    Handle<JSFunction> closure = info->closure();
+    info->shared_info()->DisableOptimization(*closure);
     // True indicates the compilation pipeline is still going, not
     // necessarily that we optimized the code.
     return true;
@@ -312,28 +294,32 @@
     }
   }
 
-  // Compilation with the Hydrogen compiler failed. Keep using the
-  // shared code but mark it as unoptimizable.
-  AbortAndDisable(info);
+  // Keep using the shared code.
+  info->AbortOptimization();
+  if (!builder.inline_bailout()) {
+    // Mark the shared code as unoptimizable unless it was an inlined
+    // function that bailed out.
+    Handle<JSFunction> closure = info->closure();
+    info->shared_info()->DisableOptimization(*closure);
+  }
   // True indicates the compilation pipeline is still going, not necessarily
   // that we optimized the code.
   return true;
 }
 
 
+static bool GenerateCode(CompilationInfo* info) {
+  return V8::UseCrankshaft() ?
+    MakeCrankshaftCode(info) :
+    FullCodeGenerator::MakeCode(info);
+}
+
+
 static bool MakeCode(CompilationInfo* info) {
   // Precondition: code has been parsed.  Postcondition: the code field in
   // the compilation info is set if compilation succeeded.
   ASSERT(info->function() != NULL);
-
-  if (Rewriter::Rewrite(info) && Scope::Analyze(info)) {
-    if (V8::UseCrankshaft()) return MakeCrankshaftCode(info);
-    // If crankshaft is not supported fall back to full code generator
-    // for all compilation.
-    return FullCodeGenerator::MakeCode(info);
-  }
-
-  return false;
+  return Rewriter::Rewrite(info) && Scope::Analyze(info) && GenerateCode(info);
 }
 
 
@@ -353,9 +339,8 @@
 
 
 static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
-  CompilationZoneScope zone_scope(DELETE_ON_EXIT);
-
   Isolate* isolate = info->isolate();
+  CompilationZoneScope zone_scope(isolate, DELETE_ON_EXIT);
   PostponeInterruptsScope postpone(isolate);
 
   ASSERT(!isolate->global_context().is_null());
@@ -586,12 +571,13 @@
 
 
 bool Compiler::CompileLazy(CompilationInfo* info) {
-  CompilationZoneScope zone_scope(DELETE_ON_EXIT);
+  Isolate* isolate = info->isolate();
+
+  CompilationZoneScope zone_scope(isolate, DELETE_ON_EXIT);
 
   // The VM is in the COMPILER state until exiting this function.
-  VMState state(info->isolate(), COMPILER);
+  VMState state(isolate, COMPILER);
 
-  Isolate* isolate = info->isolate();
   PostponeInterruptsScope postpone(isolate);
 
   Handle<SharedFunctionInfo> shared = info->shared_info();
@@ -665,7 +651,7 @@
           // version of the function right away - unless the debugger is
           // active as it makes no sense to compile optimized code then.
           if (FLAG_always_opt &&
-              !Isolate::Current()->debug()->has_break_points()) {
+              !Isolate::Current()->DebuggerHasBreakPoints()) {
             CompilationInfo optimized(function);
             optimized.SetOptimizing(AstNode::kNoNumber);
             return CompileLazy(&optimized);
@@ -691,6 +677,7 @@
   CompilationInfo info(script);
   info.SetFunction(literal);
   info.SetScope(literal->scope());
+  if (literal->scope()->is_strict_mode()) info.MarkAsStrictMode();
 
   LiveEditFunctionTracker live_edit_tracker(info.isolate(), literal);
   // Determine if the function can be lazily compiled. This is necessary to
@@ -768,7 +755,8 @@
   // Log the code generation. If source information is available include
   // script name and line number. Check explicitly whether logging is
   // enabled as finding the line number is not free.
-  if (info->isolate()->logger()->is_logging() || CpuProfiler::is_profiling()) {
+  if (info->isolate()->logger()->is_logging() ||
+      CpuProfiler::is_profiling(info->isolate())) {
     Handle<Script> script = info->script();
     Handle<Code> code = info->code();
     if (*code == info->isolate()->builtins()->builtin(Builtins::kLazyCompile))
diff --git a/src/compiler.h b/src/compiler.h
index a6a0d90..ea74d60 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -28,8 +28,8 @@
 #ifndef V8_COMPILER_H_
 #define V8_COMPILER_H_
 
+#include "allocation.h"
 #include "ast.h"
-#include "frame-element.h"
 #include "zone.h"
 
 namespace v8 {
@@ -144,6 +144,10 @@
     return V8::UseCrankshaft() && !closure_.is_null();
   }
 
+  // Disable all optimization attempts of this info for the rest of the
+  // current compilation pipeline.
+  void AbortOptimization();
+
  private:
   Isolate* isolate_;
 
@@ -293,7 +297,9 @@
 // clear this list of handles as well.
 class CompilationZoneScope : public ZoneScope {
  public:
-  explicit CompilationZoneScope(ZoneScopeMode mode) : ZoneScope(mode) { }
+  CompilationZoneScope(Isolate* isolate, ZoneScopeMode mode)
+      : ZoneScope(isolate, mode) {}
+
   virtual ~CompilationZoneScope() {
     if (ShouldDeleteOnExit()) {
       Isolate* isolate = Isolate::Current();
diff --git a/src/contexts.cc b/src/contexts.cc
index 520f3dd..f6031f1 100644
--- a/src/contexts.cc
+++ b/src/contexts.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -162,7 +162,6 @@
         ASSERT(index >= 0);  // arguments must exist and be in the heap context
         Handle<JSObject> arguments(JSObject::cast(context->get(index)),
                                    isolate);
-        ASSERT(arguments->HasLocalProperty(isolate->heap()->length_symbol()));
         if (FLAG_trace_contexts) {
           PrintF("=> found parameter %d in arguments object\n", param_index);
         }
@@ -243,6 +242,27 @@
 }
 
 
+void Context::ComputeEvalScopeInfo(bool* outer_scope_calls_eval,
+                                   bool* outer_scope_calls_non_strict_eval) {
+  Context* context = this;
+  while (true) {
+    Handle<SerializedScopeInfo> scope_info(
+        context->closure()->shared()->scope_info());
+    if (scope_info->CallsEval()) {
+      *outer_scope_calls_eval = true;
+      if (!scope_info->IsStrictMode()) {
+        // No need to go further since the answers will not change
+        // from here.
+        *outer_scope_calls_non_strict_eval = true;
+        return;
+      }
+    }
+    if (context->IsGlobalContext()) break;
+    context = Context::cast(context->closure()->context());
+  }
+}
+
+
 void Context::AddOptimizedFunction(JSFunction* function) {
   ASSERT(IsGlobalContext());
 #ifdef DEBUG
diff --git a/src/contexts.h b/src/contexts.h
index e46619e..b0d3ae4 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -106,7 +106,9 @@
   V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
   V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \
   V(MAP_CACHE_INDEX, Object, map_cache) \
-  V(CONTEXT_DATA_INDEX, Object, data)
+  V(CONTEXT_DATA_INDEX, Object, data) \
+  V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
+  V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap)
 
 // JSFunctions are pairs (context, function code), sometimes also called
 // closures. A Context object is used to represent function contexts and
@@ -236,6 +238,8 @@
     OUT_OF_MEMORY_INDEX,
     MAP_CACHE_INDEX,
     CONTEXT_DATA_INDEX,
+    ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
+    DERIVED_GET_TRAP_INDEX,
 
     // Properties from here are treated as weak references by the full GC.
     // Scavenge treats them as strong references.
@@ -349,6 +353,11 @@
   // eval.
   bool GlobalIfNotShadowedByEval(Handle<String> name);
 
+  // Determine if any function scope in the context call eval and if
+  // any of those calls are in non-strict mode.
+  void ComputeEvalScopeInfo(bool* outer_scope_calls_eval,
+                            bool* outer_scope_calls_non_strict_eval);
+
   // Code generation support.
   static int SlotOffset(int index) {
     return kHeaderSize + index * kPointerSize - kHeapObjectTag;
diff --git a/src/conversions.cc b/src/conversions.cc
index 1458584..353b681 100644
--- a/src/conversions.cc
+++ b/src/conversions.cc
@@ -254,12 +254,12 @@
   if (*current == '+') {
     // Ignore leading sign; skip following spaces.
     ++current;
-    if (!AdvanceToNonspace(unicode_cache, &current, end)) {
+    if (current == end) {
       return JUNK_STRING_VALUE;
     }
   } else if (*current == '-') {
     ++current;
-    if (!AdvanceToNonspace(unicode_cache, &current, end)) {
+    if (current == end) {
       return JUNK_STRING_VALUE;
     }
     negative = true;
@@ -734,6 +734,15 @@
                                 empty_string_val);
 }
 
+double StringToDouble(UnicodeCache* unicode_cache,
+                      Vector<const uc16> str,
+                      int flags,
+                      double empty_string_val) {
+  const uc16* end = str.start() + str.length();
+  return InternalStringToDouble(unicode_cache, str.start(), end, flags,
+                                empty_string_val);
+}
+
 
 const char* DoubleToCString(double v, Vector<char> buffer) {
   switch (fpclassify(v)) {
diff --git a/src/conversions.h b/src/conversions.h
index a14dc9a..4cbeeca 100644
--- a/src/conversions.h
+++ b/src/conversions.h
@@ -101,6 +101,10 @@
                       Vector<const char> str,
                       int flags,
                       double empty_string_val = 0);
+double StringToDouble(UnicodeCache* unicode_cache,
+                      Vector<const uc16> str,
+                      int flags,
+                      double empty_string_val = 0);
 // This version expects a zero-terminated character array.
 double StringToDouble(UnicodeCache* unicode_cache,
                       const char* str,
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index 10a3360..f54e3e8 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -288,14 +288,16 @@
 
 
 CpuProfile* CpuProfiler::StopProfiling(const char* title) {
-  return is_profiling() ?
-      Isolate::Current()->cpu_profiler()->StopCollectingProfile(title) : NULL;
+  Isolate* isolate = Isolate::Current();
+  return is_profiling(isolate) ?
+      isolate->cpu_profiler()->StopCollectingProfile(title) : NULL;
 }
 
 
 CpuProfile* CpuProfiler::StopProfiling(Object* security_token, String* title) {
-  return is_profiling() ?
-      Isolate::Current()->cpu_profiler()->StopCollectingProfile(
+  Isolate* isolate = Isolate::Current();
+  return is_profiling(isolate) ?
+      isolate->cpu_profiler()->StopCollectingProfile(
           security_token, title) : NULL;
 }
 
@@ -336,8 +338,9 @@
 void CpuProfiler::DeleteAllProfiles() {
   Isolate* isolate = Isolate::Current();
   ASSERT(isolate->cpu_profiler() != NULL);
-  if (is_profiling())
+  if (is_profiling(isolate)) {
     isolate->cpu_profiler()->StopProcessor();
+  }
   isolate->cpu_profiler()->ResetProfiles();
 }
 
diff --git a/src/cpu-profiler.h b/src/cpu-profiler.h
index e04cf85..f9f6167 100644
--- a/src/cpu-profiler.h
+++ b/src/cpu-profiler.h
@@ -30,6 +30,7 @@
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
 
+#include "allocation.h"
 #include "atomicops.h"
 #include "circular-queue.h"
 #include "unbound-queue.h"
@@ -133,8 +134,8 @@
 // methods called by event producers: VM and stack sampler threads.
 class ProfilerEventsProcessor : public Thread {
  public:
-  explicit ProfilerEventsProcessor(Isolate* isolate,
-                                   ProfileGenerator* generator);
+  ProfilerEventsProcessor(Isolate* isolate,
+                          ProfileGenerator* generator);
   virtual ~ProfilerEventsProcessor() {}
 
   // Thread control.
@@ -197,12 +198,12 @@
 } }  // namespace v8::internal
 
 
-#define PROFILE(isolate, Call)                         \
-  LOG(isolate, Call);                                  \
-  do {                                                 \
-    if (v8::internal::CpuProfiler::is_profiling()) {   \
-      v8::internal::CpuProfiler::Call;                 \
-    }                                                  \
+#define PROFILE(isolate, Call)                                \
+  LOG(isolate, Call);                                         \
+  do {                                                        \
+    if (v8::internal::CpuProfiler::is_profiling(isolate)) {   \
+      v8::internal::CpuProfiler::Call;                        \
+    }                                                         \
   } while (false)
 #else
 #define PROFILE(isolate, Call) LOG(isolate, Call)
@@ -261,10 +262,6 @@
 
   // TODO(isolates): this doesn't have to use atomics anymore.
 
-  static INLINE(bool is_profiling()) {
-    return is_profiling(Isolate::Current());
-  }
-
   static INLINE(bool is_profiling(Isolate* isolate)) {
     CpuProfiler* profiler = isolate->cpu_profiler();
     return profiler != NULL && NoBarrier_Load(&profiler->is_profiling_);
@@ -292,7 +289,7 @@
   Atomic32 is_profiling_;
 
 #else
-  static INLINE(bool is_profiling()) { return false; }
+  static INLINE(bool is_profiling(Isolate* isolate)) { return false; }
 #endif  // ENABLE_LOGGING_AND_PROFILING
 
  private:
diff --git a/src/cpu.h b/src/cpu.h
index e307302..2525484 100644
--- a/src/cpu.h
+++ b/src/cpu.h
@@ -36,6 +36,8 @@
 #ifndef V8_CPU_H_
 #define V8_CPU_H_
 
+#include "allocation.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/d8-readline.cc b/src/d8-readline.cc
index 67fc9ef..08395e5 100644
--- a/src/d8-readline.cc
+++ b/src/d8-readline.cc
@@ -30,6 +30,8 @@
 #include <readline/readline.h> // NOLINT
 #include <readline/history.h> // NOLINT
 
+// The readline includes leaves RETURN defined which breaks V8 compilation.
+#undef RETURN
 
 #include "d8.h"
 
diff --git a/src/d8.cc b/src/d8.cc
index 7de82b7..f1068cb 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -266,6 +266,11 @@
       printf("^");
     }
     printf("\n");
+    v8::String::Utf8Value stack_trace(try_catch->StackTrace());
+    if (stack_trace.length() > 0) {
+      const char* stack_trace_string = ToCString(stack_trace);
+      printf("%s\n", stack_trace_string);
+    }
   }
 }
 
@@ -791,6 +796,8 @@
 }  // namespace v8
 
 
+#ifndef GOOGLE3
 int main(int argc, char* argv[]) {
   return v8::Shell::Main(argc, argv);
 }
+#endif
diff --git a/src/d8.gyp b/src/d8.gyp
index 901fd65..8b52ed9 100644
--- a/src/d8.gyp
+++ b/src/d8.gyp
@@ -52,6 +52,9 @@
         [ 'OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', {
           'sources': [ 'd8-posix.cc', ]
         }],
+        [ 'OS=="win"', {
+          'sources': [ 'd8-windows.cc', ]
+        }],
       ],
     },
     {
@@ -61,6 +64,7 @@
       'variables': {
         'js_files': [
           'd8.js',
+          'macros.py',
         ],
       },
       'actions': [
@@ -72,7 +76,6 @@
           ],
           'outputs': [
             '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc',
-            '<(SHARED_INTERMEDIATE_DIR)/d8-js-empty.cc',
           ],
           'action': [
             'python',
diff --git a/src/d8.h b/src/d8.h
index de1fe0d..dc02322 100644
--- a/src/d8.h
+++ b/src/d8.h
@@ -28,9 +28,13 @@
 #ifndef V8_D8_H_
 #define V8_D8_H_
 
+#include "allocation.h"
 #include "v8.h"
 #include "hashmap.h"
 
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+#error Using compressed startup data is not supported for D8
+#endif
 
 namespace v8 {
 
diff --git a/src/data-flow.cc b/src/data-flow.cc
index 79339ed..6a3b05c 100644
--- a/src/data-flow.cc
+++ b/src/data-flow.cc
@@ -63,477 +63,4 @@
   current_value_ = val >> 1;
 }
 
-
-bool AssignedVariablesAnalyzer::Analyze(CompilationInfo* info) {
-  Scope* scope = info->scope();
-  int size = scope->num_parameters() + scope->num_stack_slots();
-  if (size == 0) return true;
-  AssignedVariablesAnalyzer analyzer(info, size);
-  return analyzer.Analyze();
-}
-
-
-AssignedVariablesAnalyzer::AssignedVariablesAnalyzer(CompilationInfo* info,
-                                                     int size)
-    : info_(info), av_(size) {
-}
-
-
-bool AssignedVariablesAnalyzer::Analyze() {
-  ASSERT(av_.length() > 0);
-  VisitStatements(info_->function()->body());
-  return !HasStackOverflow();
-}
-
-
-Variable* AssignedVariablesAnalyzer::FindSmiLoopVariable(ForStatement* stmt) {
-  // The loop must have all necessary parts.
-  if (stmt->init() == NULL || stmt->cond() == NULL || stmt->next() == NULL) {
-    return NULL;
-  }
-  // The initialization statement has to be a simple assignment.
-  Assignment* init = stmt->init()->StatementAsSimpleAssignment();
-  if (init == NULL) return NULL;
-
-  // We only deal with local variables.
-  Variable* loop_var = init->target()->AsVariableProxy()->AsVariable();
-  if (loop_var == NULL || !loop_var->IsStackAllocated()) return NULL;
-
-  // Don't try to get clever with const or dynamic variables.
-  if (loop_var->mode() != Variable::VAR) return NULL;
-
-  // The initial value has to be a smi.
-  Literal* init_lit = init->value()->AsLiteral();
-  if (init_lit == NULL || !init_lit->handle()->IsSmi()) return NULL;
-  int init_value = Smi::cast(*init_lit->handle())->value();
-
-  // The condition must be a compare of variable with <, <=, >, or >=.
-  CompareOperation* cond = stmt->cond()->AsCompareOperation();
-  if (cond == NULL) return NULL;
-  if (cond->op() != Token::LT
-      && cond->op() != Token::LTE
-      && cond->op() != Token::GT
-      && cond->op() != Token::GTE) return NULL;
-
-  // The lhs must be the same variable as in the init expression.
-  if (cond->left()->AsVariableProxy()->AsVariable() != loop_var) return NULL;
-
-  // The rhs must be a smi.
-  Literal* term_lit = cond->right()->AsLiteral();
-  if (term_lit == NULL || !term_lit->handle()->IsSmi()) return NULL;
-  int term_value = Smi::cast(*term_lit->handle())->value();
-
-  // The count operation updates the same variable as in the init expression.
-  CountOperation* update = stmt->next()->StatementAsCountOperation();
-  if (update == NULL) return NULL;
-  if (update->expression()->AsVariableProxy()->AsVariable() != loop_var) {
-    return NULL;
-  }
-
-  // The direction of the count operation must agree with the start and the end
-  // value. We currently do not allow the initial value to be the same as the
-  // terminal value. This _would_ be ok as long as the loop body never executes
-  // or executes exactly one time.
-  if (init_value == term_value) return NULL;
-  if (init_value < term_value && update->op() != Token::INC) return NULL;
-  if (init_value > term_value && update->op() != Token::DEC) return NULL;
-
-  // Check that the update operation cannot overflow the smi range. This can
-  // occur in the two cases where the loop bound is equal to the largest or
-  // smallest smi.
-  if (update->op() == Token::INC && term_value == Smi::kMaxValue) return NULL;
-  if (update->op() == Token::DEC && term_value == Smi::kMinValue) return NULL;
-
-  // Found a smi loop variable.
-  return loop_var;
-}
-
-int AssignedVariablesAnalyzer::BitIndex(Variable* var) {
-  ASSERT(var != NULL);
-  ASSERT(var->IsStackAllocated());
-  Slot* slot = var->AsSlot();
-  if (slot->type() == Slot::PARAMETER) {
-    return slot->index();
-  } else {
-    return info_->scope()->num_parameters() + slot->index();
-  }
-}
-
-
-void AssignedVariablesAnalyzer::RecordAssignedVar(Variable* var) {
-  ASSERT(var != NULL);
-  if (var->IsStackAllocated()) {
-    av_.Add(BitIndex(var));
-  }
-}
-
-
-void AssignedVariablesAnalyzer::MarkIfTrivial(Expression* expr) {
-  Variable* var = expr->AsVariableProxy()->AsVariable();
-  if (var != NULL &&
-      var->IsStackAllocated() &&
-      !var->is_arguments() &&
-      var->mode() != Variable::CONST &&
-      (var->is_this() || !av_.Contains(BitIndex(var)))) {
-    expr->AsVariableProxy()->MarkAsTrivial();
-  }
-}
-
-
-void AssignedVariablesAnalyzer::ProcessExpression(Expression* expr) {
-  BitVector saved_av(av_);
-  av_.Clear();
-  Visit(expr);
-  av_.Union(saved_av);
-}
-
-void AssignedVariablesAnalyzer::VisitBlock(Block* stmt) {
-  VisitStatements(stmt->statements());
-}
-
-
-void AssignedVariablesAnalyzer::VisitExpressionStatement(
-    ExpressionStatement* stmt) {
-  ProcessExpression(stmt->expression());
-}
-
-
-void AssignedVariablesAnalyzer::VisitEmptyStatement(EmptyStatement* stmt) {
-  // Do nothing.
-}
-
-
-void AssignedVariablesAnalyzer::VisitIfStatement(IfStatement* stmt) {
-  ProcessExpression(stmt->condition());
-  Visit(stmt->then_statement());
-  Visit(stmt->else_statement());
-}
-
-
-void AssignedVariablesAnalyzer::VisitContinueStatement(
-    ContinueStatement* stmt) {
-  // Nothing to do.
-}
-
-
-void AssignedVariablesAnalyzer::VisitBreakStatement(BreakStatement* stmt) {
-  // Nothing to do.
-}
-
-
-void AssignedVariablesAnalyzer::VisitReturnStatement(ReturnStatement* stmt) {
-  ProcessExpression(stmt->expression());
-}
-
-
-void AssignedVariablesAnalyzer::VisitWithEnterStatement(
-    WithEnterStatement* stmt) {
-  ProcessExpression(stmt->expression());
-}
-
-
-void AssignedVariablesAnalyzer::VisitWithExitStatement(
-    WithExitStatement* stmt) {
-  // Nothing to do.
-}
-
-
-void AssignedVariablesAnalyzer::VisitSwitchStatement(SwitchStatement* stmt) {
-  BitVector result(av_);
-  av_.Clear();
-  Visit(stmt->tag());
-  result.Union(av_);
-  for (int i = 0; i < stmt->cases()->length(); i++) {
-    CaseClause* clause = stmt->cases()->at(i);
-    if (!clause->is_default()) {
-      av_.Clear();
-      Visit(clause->label());
-      result.Union(av_);
-    }
-    VisitStatements(clause->statements());
-  }
-  av_.Union(result);
-}
-
-
-void AssignedVariablesAnalyzer::VisitDoWhileStatement(DoWhileStatement* stmt) {
-  ProcessExpression(stmt->cond());
-  Visit(stmt->body());
-}
-
-
-void AssignedVariablesAnalyzer::VisitWhileStatement(WhileStatement* stmt) {
-  ProcessExpression(stmt->cond());
-  Visit(stmt->body());
-}
-
-
-void AssignedVariablesAnalyzer::VisitForStatement(ForStatement* stmt) {
-  if (stmt->init() != NULL) Visit(stmt->init());
-  if (stmt->cond() != NULL) ProcessExpression(stmt->cond());
-  if (stmt->next() != NULL) Visit(stmt->next());
-
-  // Process loop body. After visiting the loop body av_ contains
-  // the assigned variables of the loop body.
-  BitVector saved_av(av_);
-  av_.Clear();
-  Visit(stmt->body());
-
-  Variable* var = FindSmiLoopVariable(stmt);
-  if (var != NULL && !av_.Contains(BitIndex(var))) {
-    stmt->set_loop_variable(var);
-  }
-  av_.Union(saved_av);
-}
-
-
-void AssignedVariablesAnalyzer::VisitForInStatement(ForInStatement* stmt) {
-  ProcessExpression(stmt->each());
-  ProcessExpression(stmt->enumerable());
-  Visit(stmt->body());
-}
-
-
-void AssignedVariablesAnalyzer::VisitTryCatchStatement(
-    TryCatchStatement* stmt) {
-  Visit(stmt->try_block());
-  Visit(stmt->catch_block());
-}
-
-
-void AssignedVariablesAnalyzer::VisitTryFinallyStatement(
-    TryFinallyStatement* stmt) {
-  Visit(stmt->try_block());
-  Visit(stmt->finally_block());
-}
-
-
-void AssignedVariablesAnalyzer::VisitDebuggerStatement(
-    DebuggerStatement* stmt) {
-  // Nothing to do.
-}
-
-
-void AssignedVariablesAnalyzer::VisitFunctionLiteral(FunctionLiteral* expr) {
-  // Nothing to do.
-  ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitSharedFunctionInfoLiteral(
-    SharedFunctionInfoLiteral* expr) {
-  // Nothing to do.
-  ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitConditional(Conditional* expr) {
-  ASSERT(av_.IsEmpty());
-
-  Visit(expr->condition());
-
-  BitVector result(av_);
-  av_.Clear();
-  Visit(expr->then_expression());
-  result.Union(av_);
-
-  av_.Clear();
-  Visit(expr->else_expression());
-  av_.Union(result);
-}
-
-
-void AssignedVariablesAnalyzer::VisitVariableProxy(VariableProxy* expr) {
-  // Nothing to do.
-  ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitLiteral(Literal* expr) {
-  // Nothing to do.
-  ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitRegExpLiteral(RegExpLiteral* expr) {
-  // Nothing to do.
-  ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitObjectLiteral(ObjectLiteral* expr) {
-  ASSERT(av_.IsEmpty());
-  BitVector result(av_.length());
-  for (int i = 0; i < expr->properties()->length(); i++) {
-    Visit(expr->properties()->at(i)->value());
-    result.Union(av_);
-    av_.Clear();
-  }
-  av_ = result;
-}
-
-
-void AssignedVariablesAnalyzer::VisitArrayLiteral(ArrayLiteral* expr) {
-  ASSERT(av_.IsEmpty());
-  BitVector result(av_.length());
-  for (int i = 0; i < expr->values()->length(); i++) {
-    Visit(expr->values()->at(i));
-    result.Union(av_);
-    av_.Clear();
-  }
-  av_ = result;
-}
-
-
-void AssignedVariablesAnalyzer::VisitCatchExtensionObject(
-    CatchExtensionObject* expr) {
-  ASSERT(av_.IsEmpty());
-  Visit(expr->key());
-  ProcessExpression(expr->value());
-}
-
-
-void AssignedVariablesAnalyzer::VisitAssignment(Assignment* expr) {
-  ASSERT(av_.IsEmpty());
-
-  // There are three kinds of assignments: variable assignments, property
-  // assignments, and reference errors (invalid left-hand sides).
-  Variable* var = expr->target()->AsVariableProxy()->AsVariable();
-  Property* prop = expr->target()->AsProperty();
-  ASSERT(var == NULL || prop == NULL);
-
-  if (var != NULL) {
-    MarkIfTrivial(expr->value());
-    Visit(expr->value());
-    if (expr->is_compound()) {
-      // Left-hand side occurs also as an rvalue.
-      MarkIfTrivial(expr->target());
-      ProcessExpression(expr->target());
-    }
-    RecordAssignedVar(var);
-
-  } else if (prop != NULL) {
-    MarkIfTrivial(expr->value());
-    Visit(expr->value());
-    if (!prop->key()->IsPropertyName()) {
-      MarkIfTrivial(prop->key());
-      ProcessExpression(prop->key());
-    }
-    MarkIfTrivial(prop->obj());
-    ProcessExpression(prop->obj());
-
-  } else {
-    Visit(expr->target());
-  }
-}
-
-
-void AssignedVariablesAnalyzer::VisitThrow(Throw* expr) {
-  ASSERT(av_.IsEmpty());
-  Visit(expr->exception());
-}
-
-
-void AssignedVariablesAnalyzer::VisitProperty(Property* expr) {
-  ASSERT(av_.IsEmpty());
-  if (!expr->key()->IsPropertyName()) {
-    MarkIfTrivial(expr->key());
-    Visit(expr->key());
-  }
-  MarkIfTrivial(expr->obj());
-  ProcessExpression(expr->obj());
-}
-
-
-void AssignedVariablesAnalyzer::VisitCall(Call* expr) {
-  ASSERT(av_.IsEmpty());
-  Visit(expr->expression());
-  BitVector result(av_);
-  for (int i = 0; i < expr->arguments()->length(); i++) {
-    av_.Clear();
-    Visit(expr->arguments()->at(i));
-    result.Union(av_);
-  }
-  av_ = result;
-}
-
-
-void AssignedVariablesAnalyzer::VisitCallNew(CallNew* expr) {
-  ASSERT(av_.IsEmpty());
-  Visit(expr->expression());
-  BitVector result(av_);
-  for (int i = 0; i < expr->arguments()->length(); i++) {
-    av_.Clear();
-    Visit(expr->arguments()->at(i));
-    result.Union(av_);
-  }
-  av_ = result;
-}
-
-
-void AssignedVariablesAnalyzer::VisitCallRuntime(CallRuntime* expr) {
-  ASSERT(av_.IsEmpty());
-  BitVector result(av_);
-  for (int i = 0; i < expr->arguments()->length(); i++) {
-    av_.Clear();
-    Visit(expr->arguments()->at(i));
-    result.Union(av_);
-  }
-  av_ = result;
-}
-
-
-void AssignedVariablesAnalyzer::VisitUnaryOperation(UnaryOperation* expr) {
-  ASSERT(av_.IsEmpty());
-  MarkIfTrivial(expr->expression());
-  Visit(expr->expression());
-}
-
-
-void AssignedVariablesAnalyzer::VisitCountOperation(CountOperation* expr) {
-  ASSERT(av_.IsEmpty());
-  if (expr->is_prefix()) MarkIfTrivial(expr->expression());
-  Visit(expr->expression());
-
-  Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
-  if (var != NULL) RecordAssignedVar(var);
-}
-
-
-void AssignedVariablesAnalyzer::VisitBinaryOperation(BinaryOperation* expr) {
-  ASSERT(av_.IsEmpty());
-  MarkIfTrivial(expr->right());
-  Visit(expr->right());
-  MarkIfTrivial(expr->left());
-  ProcessExpression(expr->left());
-}
-
-
-void AssignedVariablesAnalyzer::VisitCompareOperation(CompareOperation* expr) {
-  ASSERT(av_.IsEmpty());
-  MarkIfTrivial(expr->right());
-  Visit(expr->right());
-  MarkIfTrivial(expr->left());
-  ProcessExpression(expr->left());
-}
-
-
-void AssignedVariablesAnalyzer::VisitCompareToNull(CompareToNull* expr) {
-  ASSERT(av_.IsEmpty());
-  MarkIfTrivial(expr->expression());
-  Visit(expr->expression());
-}
-
-
-void AssignedVariablesAnalyzer::VisitThisFunction(ThisFunction* expr) {
-  // Nothing to do.
-  ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitDeclaration(Declaration* decl) {
-  UNREACHABLE();
-}
-
-
 } }  // namespace v8::internal
diff --git a/src/data-flow.h b/src/data-flow.h
index 573d7d8..d69d6c7 100644
--- a/src/data-flow.h
+++ b/src/data-flow.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,6 +30,7 @@
 
 #include "v8.h"
 
+#include "allocation.h"
 #include "ast.h"
 #include "compiler.h"
 #include "zone-inl.h"
@@ -37,9 +38,6 @@
 namespace v8 {
 namespace internal {
 
-// Forward declarations.
-class Node;
-
 class BitVector: public ZoneObject {
  public:
   // Iterator for the elements of this BitVector.
@@ -201,178 +199,6 @@
   uint32_t* data_;
 };
 
-
-// An implementation of a sparse set whose elements are drawn from integers
-// in the range [0..universe_size[.  It supports constant-time Contains,
-// destructive Add, and destructuve Remove operations and linear-time (in
-// the number of elements) destructive Union.
-class SparseSet: public ZoneObject {
- public:
-  // Iterator for sparse set elements.  Elements should not be added or
-  // removed during iteration.
-  class Iterator BASE_EMBEDDED {
-   public:
-    explicit Iterator(SparseSet* target) : target_(target), current_(0) {
-      ASSERT(++target->iterator_count_ > 0);
-    }
-    ~Iterator() {
-      ASSERT(target_->iterator_count_-- > 0);
-    }
-    bool Done() const { return current_ >= target_->dense_.length(); }
-    void Advance() {
-      ASSERT(!Done());
-      ++current_;
-    }
-    int Current() {
-      ASSERT(!Done());
-      return target_->dense_[current_];
-    }
-
-   private:
-    SparseSet* target_;
-    int current_;
-
-    friend class SparseSet;
-  };
-
-  explicit SparseSet(int universe_size)
-      : dense_(4),
-        sparse_(ZONE->NewArray<int>(universe_size)) {
-#ifdef DEBUG
-    size_ = universe_size;
-    iterator_count_ = 0;
-#endif
-  }
-
-  bool Contains(int n) const {
-    ASSERT(0 <= n && n < size_);
-    int dense_index = sparse_[n];
-    return (0 <= dense_index) &&
-        (dense_index < dense_.length()) &&
-        (dense_[dense_index] == n);
-  }
-
-  void Add(int n) {
-    ASSERT(0 <= n && n < size_);
-    ASSERT(iterator_count_ == 0);
-    if (!Contains(n)) {
-      sparse_[n] = dense_.length();
-      dense_.Add(n);
-    }
-  }
-
-  void Remove(int n) {
-    ASSERT(0 <= n && n < size_);
-    ASSERT(iterator_count_ == 0);
-    if (Contains(n)) {
-      int dense_index = sparse_[n];
-      int last = dense_.RemoveLast();
-      if (dense_index < dense_.length()) {
-        dense_[dense_index] = last;
-        sparse_[last] = dense_index;
-      }
-    }
-  }
-
-  void Union(const SparseSet& other) {
-    for (int i = 0; i < other.dense_.length(); ++i) {
-      Add(other.dense_[i]);
-    }
-  }
-
- private:
-  // The set is implemented as a pair of a growable dense list and an
-  // uninitialized sparse array.
-  ZoneList<int> dense_;
-  int* sparse_;
-#ifdef DEBUG
-  int size_;
-  int iterator_count_;
-#endif
-};
-
-
-// Simple fixed-capacity list-based worklist (managed as a queue) of
-// pointers to T.
-template<typename T>
-class WorkList BASE_EMBEDDED {
- public:
-  // The worklist cannot grow bigger than size.  We keep one item empty to
-  // distinguish between empty and full.
-  explicit WorkList(int size)
-      : capacity_(size + 1), head_(0), tail_(0), queue_(capacity_) {
-    for (int i = 0; i < capacity_; i++) queue_.Add(NULL);
-  }
-
-  bool is_empty() { return head_ == tail_; }
-
-  bool is_full() {
-    // The worklist is full if head is at 0 and tail is at capacity - 1:
-    //   head == 0 && tail == capacity-1 ==> tail - head == capacity - 1
-    // or if tail is immediately to the left of head:
-    //   tail+1 == head  ==> tail - head == -1
-    int diff = tail_ - head_;
-    return (diff == -1 || diff == capacity_ - 1);
-  }
-
-  void Insert(T* item) {
-    ASSERT(!is_full());
-    queue_[tail_++] = item;
-    if (tail_ == capacity_) tail_ = 0;
-  }
-
-  T* Remove() {
-    ASSERT(!is_empty());
-    T* item = queue_[head_++];
-    if (head_ == capacity_) head_ = 0;
-    return item;
-  }
-
- private:
-  int capacity_;  // Including one empty slot.
-  int head_;      // Where the first item is.
-  int tail_;      // Where the next inserted item will go.
-  List<T*> queue_;
-};
-
-
-// Computes the set of assigned variables and annotates variables proxies
-// that are trivial sub-expressions and for-loops where the loop variable
-// is guaranteed to be a smi.
-class AssignedVariablesAnalyzer : public AstVisitor {
- public:
-  static bool Analyze(CompilationInfo* info);
-
- private:
-  AssignedVariablesAnalyzer(CompilationInfo* info, int bits);
-  bool Analyze();
-
-  Variable* FindSmiLoopVariable(ForStatement* stmt);
-
-  int BitIndex(Variable* var);
-
-  void RecordAssignedVar(Variable* var);
-
-  void MarkIfTrivial(Expression* expr);
-
-  // Visits an expression saving the accumulator before, clearing
-  // it before visting and restoring it after visiting.
-  void ProcessExpression(Expression* expr);
-
-  // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
-  AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
-  CompilationInfo* info_;
-
-  // Accumulator for assigned variables set.
-  BitVector av_;
-
-  DISALLOW_COPY_AND_ASSIGN(AssignedVariablesAnalyzer);
-};
-
-
 } }  // namespace v8::internal
 
 
diff --git a/src/date.js b/src/date.js
index 242ab7b..5a2e9a2 100644
--- a/src/date.js
+++ b/src/date.js
@@ -684,7 +684,7 @@
 
 // ECMA 262 - 15.9.5.16
 function DateGetDay() {
-  var t = %_ValueOf(this);
+  var t = DATE_VALUE(this);
   if (NUMBER_IS_NAN(t)) return t;
   return WeekDay(LocalTimeNoCheck(t));
 }
@@ -692,7 +692,7 @@
 
 // ECMA 262 - 15.9.5.17
 function DateGetUTCDay() {
-  var t = %_ValueOf(this);
+  var t = DATE_VALUE(this);
   if (NUMBER_IS_NAN(t)) return t;
   return WeekDay(t);
 }
diff --git a/src/dateparser.h b/src/dateparser.h
index 9d29715..6e87c34 100644
--- a/src/dateparser.h
+++ b/src/dateparser.h
@@ -28,6 +28,7 @@
 #ifndef V8_DATEPARSER_H_
 #define V8_DATEPARSER_H_
 
+#include "allocation.h"
 #include "char-predicates-inl.h"
 #include "scanner-base.h"
 
diff --git a/src/debug-debugger.js b/src/debug-debugger.js
index bc0f966..908fcd2 100644
--- a/src/debug-debugger.js
+++ b/src/debug-debugger.js
@@ -1335,7 +1335,7 @@
   try {
     try {
       // Convert the JSON string to an object.
-      request = %CompileString('(' + json_request + ')')();
+      request = JSON.parse(json_request);
 
       // Create an initial response.
       response = this.createResponse(request);
diff --git a/src/debug.cc b/src/debug.cc
index 6f0431c..85c4b5e 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -92,7 +92,7 @@
 }
 
 
-static Handle<Code> ComputeCallDebugPrepareStepIn(int argc,  Code::Kind kind) {
+static Handle<Code> ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind) {
   Isolate* isolate = Isolate::Current();
   CALL_HEAP_FUNCTION(
       isolate,
@@ -167,7 +167,8 @@
       Address target = original_rinfo()->target_address();
       Code* code = Code::GetCodeFromTargetAddress(target);
       if ((code->is_inline_cache_stub() &&
-           !code->is_type_recording_binary_op_stub() &&
+           !code->is_binary_op_stub() &&
+           !code->is_unary_op_stub() &&
            !code->is_compare_ic_stub()) ||
           RelocInfo::IsConstructCall(rmode())) {
         break_point_++;
@@ -477,21 +478,6 @@
     // calling convention used by the call site.
     Handle<Code> dbgbrk_code(Debug::FindDebugBreak(code, mode));
     rinfo()->set_target_address(dbgbrk_code->entry());
-
-    // For stubs that refer back to an inlined version clear the cached map for
-    // the inlined case to always go through the IC. As long as the break point
-    // is set the patching performed by the runtime system will take place in
-    // the code copy and will therefore have no effect on the running code
-    // keeping it from using the inlined code.
-    if (code->is_keyed_load_stub()) {
-      KeyedLoadIC::ClearInlinedVersion(pc());
-    } else if (code->is_keyed_store_stub()) {
-      KeyedStoreIC::ClearInlinedVersion(pc());
-    } else if (code->is_load_stub()) {
-      LoadIC::ClearInlinedVersion(pc());
-    } else if (code->is_store_stub()) {
-      StoreIC::ClearInlinedVersion(pc());
-    }
   }
 }
 
@@ -499,20 +485,6 @@
 void BreakLocationIterator::ClearDebugBreakAtIC() {
   // Patch the code to the original invoke.
   rinfo()->set_target_address(original_rinfo()->target_address());
-
-  RelocInfo::Mode mode = rmode();
-  if (RelocInfo::IsCodeTarget(mode)) {
-    AssertNoAllocation nogc;
-    Address target = original_rinfo()->target_address();
-    Code* code = Code::GetCodeFromTargetAddress(target);
-
-    // Restore the inlined version of keyed stores to get back to the
-    // fast case.  We need to patch back the keyed store because no
-    // patching happens when running normally.  For keyed loads, the
-    // map check will get patched back when running normally after ICs
-    // have been cleared at GC.
-    if (code->is_keyed_store_stub()) KeyedStoreIC::RestoreInlinedVersion(pc());
-  }
 }
 
 
@@ -843,6 +815,7 @@
   HandleScope scope(isolate_);
   Handle<Context> context =
       isolate_->bootstrapper()->CreateEnvironment(
+          isolate_,
           Handle<Object>::null(),
           v8::Handle<ObjectTemplate>(),
           NULL);
@@ -1017,6 +990,11 @@
   } else if (thread_local_.frame_drop_mode_ ==
       FRAME_DROPPED_IN_DIRECT_CALL) {
     // Nothing to do, after_break_target is not used here.
+  } else if (thread_local_.frame_drop_mode_ ==
+      FRAME_DROPPED_IN_RETURN_CALL) {
+    Code* plain_return = isolate_->builtins()->builtin(
+        Builtins::kFrameDropper_LiveEdit);
+    thread_local_.after_break_target_ = plain_return->entry();
   } else {
     UNREACHABLE();
   }
@@ -1986,8 +1964,8 @@
 }
 
 
-Debugger::Debugger(Isolate* isolate)
-    : debugger_access_(isolate->debugger_access()),
+Debugger::Debugger()
+    : debugger_access_(OS::CreateMutex()),
       event_listener_(Handle<Object>()),
       event_listener_data_(Handle<Object>()),
       compiling_natives_(false),
@@ -2003,12 +1981,13 @@
       agent_(NULL),
       command_queue_(kQueueInitialSize),
       command_received_(OS::CreateSemaphore(0)),
-      event_command_queue_(kQueueInitialSize),
-      isolate_(isolate) {
+      event_command_queue_(kQueueInitialSize) {
 }
 
 
 Debugger::~Debugger() {
+  delete debugger_access_;
+  debugger_access_ = 0;
   delete dispatch_handler_access_;
   dispatch_handler_access_ = 0;
   delete command_received_;
@@ -2387,7 +2366,7 @@
                                  Handle<Object> exec_state,
                                  Handle<Object> event_data,
                                  v8::Debug::ClientData* client_data) {
-  if (event_listener_->IsProxy()) {
+  if (event_listener_->IsForeign()) {
     CallCEventCallback(event, exec_state, event_data, client_data);
   } else {
     CallJSEventCallback(event, exec_state, event_data);
@@ -2399,9 +2378,9 @@
                                   Handle<Object> exec_state,
                                   Handle<Object> event_data,
                                   v8::Debug::ClientData* client_data) {
-  Handle<Proxy> callback_obj(Handle<Proxy>::cast(event_listener_));
+  Handle<Foreign> callback_obj(Handle<Foreign>::cast(event_listener_));
   v8::Debug::EventCallback2 callback =
-      FUNCTION_CAST<v8::Debug::EventCallback2>(callback_obj->proxy());
+      FUNCTION_CAST<v8::Debug::EventCallback2>(callback_obj->address());
   EventDetailsImpl event_details(
       event,
       Handle<JSObject>::cast(exec_state),
diff --git a/src/debug.h b/src/debug.h
index 6be33a6..95dca72 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -28,6 +28,7 @@
 #ifndef V8_DEBUG_H_
 #define V8_DEBUG_H_
 
+#include "allocation.h"
 #include "arguments.h"
 #include "assembler.h"
 #include "debug-agent.h"
@@ -422,7 +423,8 @@
     FRAME_DROPPED_IN_DEBUG_SLOT_CALL,
     // The top JS frame had been calling some C++ function. The return address
     // gets patched automatically.
-    FRAME_DROPPED_IN_DIRECT_CALL
+    FRAME_DROPPED_IN_DIRECT_CALL,
+    FRAME_DROPPED_IN_RETURN_CALL
   };
 
   void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
@@ -809,7 +811,7 @@
   bool IsDebuggerActive();
 
  private:
-  explicit Debugger(Isolate* isolate);
+  Debugger();
 
   void CallEventCallback(v8::DebugEvent event,
                          Handle<Object> exec_state,
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index cb82f44..7c5dfb8 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -30,6 +30,7 @@
 
 #include "v8.h"
 
+#include "allocation.h"
 #include "macro-assembler.h"
 #include "zone-inl.h"
 
diff --git a/src/disassembler.cc b/src/disassembler.cc
index 65e1668..368c3a8 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -282,6 +282,9 @@
         } else {
           out.AddFormatted(" %s", Code::Kind2String(kind));
         }
+        if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
+          out.AddFormatted(" (id = %d)", static_cast<int>(relocinfo.data()));
+        }
       } else if (rmode == RelocInfo::RUNTIME_ENTRY &&
                  Isolate::Current()->deoptimizer_data() != NULL) {
         // A runtime entry reloinfo might be a deoptimization bailout.
diff --git a/src/disassembler.h b/src/disassembler.h
index 68a338d..4a87dca 100644
--- a/src/disassembler.h
+++ b/src/disassembler.h
@@ -28,6 +28,8 @@
 #ifndef V8_DISASSEMBLER_H_
 #define V8_DISASSEMBLER_H_
 
+#include "allocation.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/execution.cc b/src/execution.cc
index 7a2bbc6..e84ab9e 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -132,7 +132,7 @@
   if (*has_pending_exception) {
     isolate->ReportPendingMessages();
     if (isolate->pending_exception() == Failure::OutOfMemoryException()) {
-      if (!isolate->ignore_out_of_memory()) {
+      if (!isolate->handle_scope_implementer()->ignore_out_of_memory()) {
         V8::FatalProcessOutOfMemory("JS", true);
       }
     }
@@ -145,11 +145,16 @@
 }
 
 
-Handle<Object> Execution::Call(Handle<JSFunction> func,
+Handle<Object> Execution::Call(Handle<Object> callable,
                                Handle<Object> receiver,
                                int argc,
                                Object*** args,
                                bool* pending_exception) {
+  if (!callable->IsJSFunction()) {
+    callable = TryGetFunctionDelegate(callable, pending_exception);
+    if (*pending_exception) return callable;
+  }
+  Handle<JSFunction> func = Handle<JSFunction>::cast(callable);
   return Invoke(false, func, receiver, argc, args, pending_exception);
 }
 
@@ -234,6 +239,30 @@
 }
 
 
+Handle<Object> Execution::TryGetFunctionDelegate(Handle<Object> object,
+                                                 bool* has_pending_exception) {
+  ASSERT(!object->IsJSFunction());
+  Isolate* isolate = Isolate::Current();
+
+  // Objects created through the API can have an instance-call handler
+  // that should be used when calling the object as a function.
+  if (object->IsHeapObject() &&
+      HeapObject::cast(*object)->map()->has_instance_call_handler()) {
+    return Handle<JSFunction>(
+        isolate->global_context()->call_as_function_delegate());
+  }
+
+  // If the Object doesn't have an instance-call handler we should
+  // throw a non-callable exception.
+  i::Handle<i::Object> error_obj = isolate->factory()->NewTypeError(
+      "called_non_callable", i::HandleVector<i::Object>(&object, 1));
+  isolate->Throw(*error_obj);
+  *has_pending_exception = true;
+
+  return isolate->factory()->undefined_value();
+}
+
+
 Handle<Object> Execution::GetConstructorDelegate(Handle<Object> object) {
   ASSERT(!object->IsJSFunction());
   Isolate* isolate = Isolate::Current();
@@ -253,6 +282,34 @@
 }
 
 
+Handle<Object> Execution::TryGetConstructorDelegate(
+    Handle<Object> object,
+    bool* has_pending_exception) {
+  ASSERT(!object->IsJSFunction());
+  Isolate* isolate = Isolate::Current();
+
+  // If you return a function from here, it will be called when an
+  // attempt is made to call the given object as a constructor.
+
+  // Objects created through the API can have an instance-call handler
+  // that should be used when calling the object as a function.
+  if (object->IsHeapObject() &&
+      HeapObject::cast(*object)->map()->has_instance_call_handler()) {
+    return Handle<JSFunction>(
+        isolate->global_context()->call_as_constructor_delegate());
+  }
+
+  // If the Object doesn't have an instance-call handler we should
+  // throw a non-callable exception.
+  i::Handle<i::Object> error_obj = isolate->factory()->NewTypeError(
+      "called_non_callable", i::HandleVector<i::Object>(&object, 1));
+  isolate->Throw(*error_obj);
+  *has_pending_exception = true;
+
+  return isolate->factory()->undefined_value();
+}
+
+
 bool StackGuard::IsStackOverflow() {
   ExecutionAccess access(isolate_);
   return (thread_local_.jslimit_ != kInterruptLimit &&
@@ -272,7 +329,7 @@
   ExecutionAccess access(isolate_);
   // If the current limits are special (eg due to a pending interrupt) then
   // leave them alone.
-  uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(limit);
+  uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, limit);
   if (thread_local_.jslimit_ == thread_local_.real_jslimit_) {
     thread_local_.jslimit_ = jslimit;
   }
@@ -428,7 +485,7 @@
 }
 
 
-bool StackGuard::ThreadLocal::Initialize() {
+bool StackGuard::ThreadLocal::Initialize(Isolate* isolate) {
   bool should_set_stack_limits = false;
   if (real_climit_ == kIllegalLimit) {
     // Takes the address of the limit variable in order to find out where
@@ -436,8 +493,8 @@
     const uintptr_t kLimitSize = FLAG_stack_size * KB;
     uintptr_t limit = reinterpret_cast<uintptr_t>(&limit) - kLimitSize;
     ASSERT(reinterpret_cast<uintptr_t>(&limit) > kLimitSize);
-    real_jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
-    jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
+    real_jslimit_ = SimulatorStack::JsLimitFromCLimit(isolate, limit);
+    jslimit_ = SimulatorStack::JsLimitFromCLimit(isolate, limit);
     real_climit_ = limit;
     climit_ = limit;
     should_set_stack_limits = true;
@@ -456,9 +513,10 @@
 
 
 void StackGuard::InitThread(const ExecutionAccess& lock) {
-  if (thread_local_.Initialize()) isolate_->heap()->SetStackLimits();
-  uintptr_t stored_limit =
-      Isolate::CurrentPerIsolateThreadData()->stack_limit();
+  if (thread_local_.Initialize(isolate_)) isolate_->heap()->SetStackLimits();
+  Isolate::PerIsolateThreadData* per_thread =
+      isolate_->FindOrAllocatePerThreadDataForThisThread();
+  uintptr_t stored_limit = per_thread->stack_limit();
   // You should hold the ExecutionAccess lock when you call this.
   if (stored_limit != 0) {
     StackGuard::SetStackLimit(stored_limit);
@@ -681,13 +739,13 @@
     isolate->debug()->PreemptionWhileInDebugger();
   } else {
     // Perform preemption.
-    v8::Unlocker unlocker;
+    v8::Unlocker unlocker(reinterpret_cast<v8::Isolate*>(isolate));
     Thread::YieldCPU();
   }
 #else
   { // NOLINT
     // Perform preemption.
-    v8::Unlocker unlocker;
+    v8::Unlocker unlocker(reinterpret_cast<v8::Isolate*>(isolate));
     Thread::YieldCPU();
   }
 #endif
diff --git a/src/execution.h b/src/execution.h
index d4b80d2..bb5f804 100644
--- a/src/execution.h
+++ b/src/execution.h
@@ -28,6 +28,8 @@
 #ifndef V8_EXECUTION_H_
 #define V8_EXECUTION_H_
 
+#include "allocation.h"
+
 namespace v8 {
 namespace internal {
 
@@ -51,7 +53,7 @@
   // *pending_exception tells whether the invoke resulted in
   // a pending exception.
   //
-  static Handle<Object> Call(Handle<JSFunction> func,
+  static Handle<Object> Call(Handle<Object> callable,
                              Handle<Object> receiver,
                              int argc,
                              Object*** args,
@@ -138,10 +140,14 @@
   // Get a function delegate (or undefined) for the given non-function
   // object. Used for support calling objects as functions.
   static Handle<Object> GetFunctionDelegate(Handle<Object> object);
+  static Handle<Object> TryGetFunctionDelegate(Handle<Object> object,
+                                               bool* has_pending_exception);
 
   // Get a function delegate (or undefined) for the given non-function
   // object. Used for support calling objects as constructors.
   static Handle<Object> GetConstructorDelegate(Handle<Object> object);
+  static Handle<Object> TryGetConstructorDelegate(Handle<Object> object,
+                                                  bool* has_pending_exception);
 };
 
 
@@ -252,7 +258,7 @@
     void Clear();
 
     // Returns true if the heap's stack limits should be set, false if not.
-    bool Initialize();
+    bool Initialize(Isolate* isolate);
 
     // The stack limit is split into a JavaScript and a C++ stack limit. These
     // two are the same except when running on a simulator where the C++ and
diff --git a/src/extensions/experimental/collator.cc b/src/extensions/experimental/collator.cc
new file mode 100644
index 0000000..7d1a21d
--- /dev/null
+++ b/src/extensions/experimental/collator.cc
@@ -0,0 +1,218 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "collator.h"
+
+#include "unicode/coll.h"
+#include "unicode/locid.h"
+#include "unicode/ucol.h"
+
+namespace v8 {
+namespace internal {
+
+v8::Persistent<v8::FunctionTemplate> Collator::collator_template_;
+
+icu::Collator* Collator::UnpackCollator(v8::Handle<v8::Object> obj) {
+  if (collator_template_->HasInstance(obj)) {
+    return static_cast<icu::Collator*>(obj->GetPointerFromInternalField(0));
+  }
+
+  return NULL;
+}
+
+void Collator::DeleteCollator(v8::Persistent<v8::Value> object, void* param) {
+  v8::Persistent<v8::Object> persistent_object =
+      v8::Persistent<v8::Object>::Cast(object);
+
+  // First delete the hidden C++ object.
+  // Unpacking should never return NULL here. That would only happen if
+  // this method is used as the weak callback for persistent handles not
+  // pointing to a collator.
+  delete UnpackCollator(persistent_object);
+
+  // Then dispose of the persistent handle to JS object.
+  persistent_object.Dispose();
+}
+
+// Throws a JavaScript exception.
+static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
+  // Returns undefined, and schedules an exception to be thrown.
+  return v8::ThrowException(v8::Exception::Error(
+      v8::String::New("Collator method called on an object "
+                      "that is not a Collator.")));
+}
+
+// Extract a boolean option named in |option| and set it to |result|.
+// Return true if it's specified. Otherwise, return false.
+static bool ExtractBooleanOption(const v8::Local<v8::Object>& options,
+                                 const char* option,
+                                 bool* result) {
+  v8::HandleScope handle_scope;
+  v8::TryCatch try_catch;
+  v8::Handle<v8::Value> value = options->Get(v8::String::New(option));
+  if (try_catch.HasCaught()) {
+    return false;
+  }
+  // No need to check if |value| is empty because it's taken care of
+  // by TryCatch above.
+  if (!value->IsUndefined() && !value->IsNull()) {
+    if (value->IsBoolean()) {
+      *result = value->BooleanValue();
+      return true;
+    }
+  }
+  return false;
+}
+
+// When there's an ICU error, throw a JavaScript error with |message|.
+static v8::Handle<v8::Value> ThrowExceptionForICUError(const char* message) {
+  return v8::ThrowException(v8::Exception::Error(v8::String::New(message)));
+}
+
+v8::Handle<v8::Value> Collator::CollatorCompare(const v8::Arguments& args) {
+  if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsString()) {
+    return v8::ThrowException(v8::Exception::SyntaxError(
+        v8::String::New("Two string arguments are required.")));
+  }
+
+  icu::Collator* collator = UnpackCollator(args.Holder());
+  if (!collator) {
+    return ThrowUnexpectedObjectError();
+  }
+
+  v8::String::Value string_value1(args[0]);
+  v8::String::Value string_value2(args[1]);
+  const UChar* string1 = reinterpret_cast<const UChar*>(*string_value1);
+  const UChar* string2 = reinterpret_cast<const UChar*>(*string_value2);
+  UErrorCode status = U_ZERO_ERROR;
+  UCollationResult result = collator->compare(
+      string1, string_value1.length(), string2, string_value2.length(), status);
+
+  if (U_FAILURE(status)) {
+    return ThrowExceptionForICUError(
+        "Unexpected failure in Collator.compare.");
+  }
+
+  return v8::Int32::New(result);
+}
+
+v8::Handle<v8::Value> Collator::JSCollator(const v8::Arguments& args) {
+  v8::HandleScope handle_scope;
+
+  if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsObject()) {
+    return v8::ThrowException(v8::Exception::SyntaxError(
+        v8::String::New("Locale and collation options are required.")));
+  }
+
+  v8::String::AsciiValue locale(args[0]);
+  icu::Locale icu_locale(*locale);
+
+  icu::Collator* collator = NULL;
+  UErrorCode status = U_ZERO_ERROR;
+  collator = icu::Collator::createInstance(icu_locale, status);
+
+  if (U_FAILURE(status)) {
+    delete collator;
+    return ThrowExceptionForICUError("Failed to create collator.");
+  }
+
+  v8::Local<v8::Object> options(args[1]->ToObject());
+
+  // Below, we change collation options that are explicitly specified
+  // by a caller in JavaScript. Otherwise, we don't touch because
+  // we don't want to change the locale-dependent default value.
+  // The three options below are very likely to have the same default
+  // across locales, but I haven't checked them all. Others we may add
+  // in the future have certainly locale-dependent default (e.g.
+  // caseFirst is upperFirst for Danish while is off for most other locales).
+
+  bool ignore_case, ignore_accents, numeric;
+
+  if (ExtractBooleanOption(options, "ignoreCase", &ignore_case)) {
+    collator->setAttribute(UCOL_CASE_LEVEL, ignore_case ? UCOL_OFF : UCOL_ON,
+                           status);
+    if (U_FAILURE(status)) {
+      delete collator;
+      return ThrowExceptionForICUError("Failed to set ignoreCase.");
+    }
+  }
+
+  // Accents are taken into account with strength secondary or higher.
+  if (ExtractBooleanOption(options, "ignoreAccents", &ignore_accents)) {
+    if (!ignore_accents) {
+      collator->setStrength(icu::Collator::SECONDARY);
+    } else {
+      collator->setStrength(icu::Collator::PRIMARY);
+    }
+  }
+
+  if (ExtractBooleanOption(options, "numeric", &numeric)) {
+    collator->setAttribute(UCOL_NUMERIC_COLLATION,
+                           numeric ? UCOL_ON : UCOL_OFF, status);
+    if (U_FAILURE(status)) {
+      delete collator;
+      return ThrowExceptionForICUError("Failed to set numeric sort option.");
+    }
+  }
+
+  if (collator_template_.IsEmpty()) {
+    v8::Local<v8::FunctionTemplate> raw_template(v8::FunctionTemplate::New());
+    raw_template->SetClassName(v8::String::New("v8Locale.Collator"));
+
+    // Define internal field count on instance template.
+    v8::Local<v8::ObjectTemplate> object_template =
+        raw_template->InstanceTemplate();
+
+    // Set aside internal fields for icu collator.
+    object_template->SetInternalFieldCount(1);
+
+    // Define all of the prototype methods on prototype template.
+    v8::Local<v8::ObjectTemplate> proto = raw_template->PrototypeTemplate();
+    proto->Set(v8::String::New("compare"),
+               v8::FunctionTemplate::New(CollatorCompare));
+
+    collator_template_ =
+        v8::Persistent<v8::FunctionTemplate>::New(raw_template);
+  }
+
+  // Create an empty object wrapper.
+  v8::Local<v8::Object> local_object =
+      collator_template_->GetFunction()->NewInstance();
+  v8::Persistent<v8::Object> wrapper =
+      v8::Persistent<v8::Object>::New(local_object);
+
+  // Set collator as internal field of the resulting JS object.
+  wrapper->SetPointerInInternalField(0, collator);
+
+  // Make object handle weak so we can delete iterator once GC kicks in.
+  wrapper.MakeWeak(NULL, DeleteCollator);
+
+  return wrapper;
+}
+
+} }  // namespace v8::internal
+
diff --git a/src/extensions/experimental/collator.h b/src/extensions/experimental/collator.h
new file mode 100644
index 0000000..10d6ffb
--- /dev/null
+++ b/src/extensions/experimental/collator.h
@@ -0,0 +1,69 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_COLLATOR_H
+#define V8_EXTENSIONS_EXPERIMENTAL_COLLATOR_H_
+
+#include <v8.h>
+
+#include "unicode/uversion.h"
+
+namespace U_ICU_NAMESPACE {
+class Collator;
+class UnicodeString;
+}
+
+namespace v8 {
+namespace internal {
+
+class Collator {
+ public:
+  static v8::Handle<v8::Value> JSCollator(const v8::Arguments& args);
+
+  // Helper methods for various bindings.
+
+  // Unpacks collator object from corresponding JavaScript object.
+  static icu::Collator* UnpackCollator(v8::Handle<v8::Object> obj);
+
+  // Release memory we allocated for the Collator once the JS object that
+  // holds the pointer gets garbage collected.
+  static void DeleteCollator(v8::Persistent<v8::Value> object, void* param);
+
+  // Compare two strings and returns -1, 0 and 1 depending on
+  // whether string1 is smaller than, equal to or larger than string2.
+  static v8::Handle<v8::Value> CollatorCompare(const v8::Arguments& args);
+
+ private:
+  Collator() {}
+
+  static v8::Persistent<v8::FunctionTemplate> collator_template_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_EXTENSIONS_EXPERIMENTAL_COLLATOR
+
diff --git a/src/extensions/experimental/experimental.gyp b/src/extensions/experimental/experimental.gyp
index a8585fd..2a7775e 100644
--- a/src/extensions/experimental/experimental.gyp
+++ b/src/extensions/experimental/experimental.gyp
@@ -39,9 +39,17 @@
       'sources': [
         'break-iterator.cc',
         'break-iterator.h',
+        'collator.cc',
+        'collator.h',
         'i18n-extension.cc',
         'i18n-extension.h',
-	'<(SHARED_INTERMEDIATE_DIR)/i18n-js.cc',
+        'i18n-locale.cc',
+        'i18n-locale.h',
+        'i18n-utils.cc',
+        'i18n-utils.h',
+        'language-matcher.cc',
+        'language-matcher.h',
+        '<(SHARED_INTERMEDIATE_DIR)/i18n-js.cc',
       ],
       'include_dirs': [
         '<(icu_src_dir)/public/common',
@@ -49,7 +57,7 @@
       ],
       'dependencies': [
         '<(icu_src_dir)/icu.gyp:*',
-	'js2c_i18n#host',
+        'js2c_i18n#host',
         '../../../tools/gyp/v8.gyp:v8',
       ],
     },
@@ -59,28 +67,27 @@
       'toolsets': ['host'],
       'variables': {
         'library_files': [
-	  'i18n.js'
-	],
+          'i18n.js'
+        ],
       },
       'actions': [
         {
-	  'action_name': 'js2c_i18n',
-	  'inputs': [
-	    '../../../tools/js2c.py',
-	    '<@(library_files)',
-	  ],
-	  'outputs': [
-	    '<(SHARED_INTERMEDIATE_DIR)/i18n-js.cc',
-	    '<(SHARED_INTERMEDIATE_DIR)/i18n-js-empty.cc'
-	  ],
-	  'action': [
-	    'python',
-	    '../../../tools/js2c.py',
-	    '<@(_outputs)',
-	    'I18N',
-	    '<@(library_files)'
-	  ],
-	},
+          'action_name': 'js2c_i18n',
+          'inputs': [
+            '../../../tools/js2c.py',
+            '<@(library_files)',
+          ],
+          'outputs': [
+            '<(SHARED_INTERMEDIATE_DIR)/i18n-js.cc',
+          ],
+          'action': [
+            'python',
+            '../../../tools/js2c.py',
+            '<@(_outputs)',
+            'I18N',
+            '<@(library_files)'
+          ],
+        },
       ],
     },
   ],  # targets
diff --git a/src/extensions/experimental/i18n-extension.cc b/src/extensions/experimental/i18n-extension.cc
index 6e3ab15..88c609e 100644
--- a/src/extensions/experimental/i18n-extension.cc
+++ b/src/extensions/experimental/i18n-extension.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -27,13 +27,10 @@
 
 #include "i18n-extension.h"
 
-#include <algorithm>
-#include <string>
-
 #include "break-iterator.h"
+#include "collator.h"
+#include "i18n-locale.h"
 #include "natives.h"
-#include "unicode/locid.h"
-#include "unicode/uloc.h"
 
 namespace v8 {
 namespace internal {
@@ -57,166 +54,16 @@
 v8::Handle<v8::FunctionTemplate> I18NExtension::GetNativeFunction(
     v8::Handle<v8::String> name) {
   if (name->Equals(v8::String::New("NativeJSLocale"))) {
-    return v8::FunctionTemplate::New(JSLocale);
-  } else if (name->Equals(v8::String::New("NativeJSAvailableLocales"))) {
-    return v8::FunctionTemplate::New(JSAvailableLocales);
-  } else if (name->Equals(v8::String::New("NativeJSMaximizedLocale"))) {
-    return v8::FunctionTemplate::New(JSMaximizedLocale);
-  } else if (name->Equals(v8::String::New("NativeJSMinimizedLocale"))) {
-    return v8::FunctionTemplate::New(JSMinimizedLocale);
-  } else if (name->Equals(v8::String::New("NativeJSDisplayLanguage"))) {
-    return v8::FunctionTemplate::New(JSDisplayLanguage);
-  } else if (name->Equals(v8::String::New("NativeJSDisplayScript"))) {
-    return v8::FunctionTemplate::New(JSDisplayScript);
-  } else if (name->Equals(v8::String::New("NativeJSDisplayRegion"))) {
-    return v8::FunctionTemplate::New(JSDisplayRegion);
-  } else if (name->Equals(v8::String::New("NativeJSDisplayName"))) {
-    return v8::FunctionTemplate::New(JSDisplayName);
+    return v8::FunctionTemplate::New(I18NLocale::JSLocale);
   } else if (name->Equals(v8::String::New("NativeJSBreakIterator"))) {
     return v8::FunctionTemplate::New(BreakIterator::JSBreakIterator);
+  } else if (name->Equals(v8::String::New("NativeJSCollator"))) {
+    return v8::FunctionTemplate::New(Collator::JSCollator);
   }
 
   return v8::Handle<v8::FunctionTemplate>();
 }
 
-v8::Handle<v8::Value> I18NExtension::JSLocale(const v8::Arguments& args) {
-  // TODO(cira): Fetch browser locale. Accept en-US as good default for now.
-  // We could possibly pass browser locale as a parameter in the constructor.
-  std::string locale_name("en-US");
-  if (args.Length() == 1 && args[0]->IsString()) {
-    locale_name = *v8::String::Utf8Value(args[0]->ToString());
-  }
-
-  v8::Local<v8::Object> locale = v8::Object::New();
-  locale->Set(v8::String::New("locale"), v8::String::New(locale_name.c_str()));
-
-  icu::Locale icu_locale(locale_name.c_str());
-
-  const char* language = icu_locale.getLanguage();
-  locale->Set(v8::String::New("language"), v8::String::New(language));
-
-  const char* script = icu_locale.getScript();
-  if (strlen(script)) {
-    locale->Set(v8::String::New("script"), v8::String::New(script));
-  }
-
-  const char* region = icu_locale.getCountry();
-  if (strlen(region)) {
-    locale->Set(v8::String::New("region"), v8::String::New(region));
-  }
-
-  return locale;
-}
-
-// TODO(cira): Filter out locales that Chrome doesn't support.
-v8::Handle<v8::Value> I18NExtension::JSAvailableLocales(
-    const v8::Arguments& args) {
-  v8::Local<v8::Array> all_locales = v8::Array::New();
-
-  int count = 0;
-  const icu::Locale* icu_locales = icu::Locale::getAvailableLocales(count);
-  for (int i = 0; i < count; ++i) {
-    all_locales->Set(i, v8::String::New(icu_locales[i].getName()));
-  }
-
-  return all_locales;
-}
-
-// Use - as tag separator, not _ that ICU uses.
-static std::string NormalizeLocale(const std::string& locale) {
-  std::string result(locale);
-  // TODO(cira): remove STL dependency.
-  std::replace(result.begin(), result.end(), '_', '-');
-  return result;
-}
-
-v8::Handle<v8::Value> I18NExtension::JSMaximizedLocale(
-    const v8::Arguments& args) {
-  if (!args.Length() || !args[0]->IsString()) {
-    return v8::Undefined();
-  }
-
-  UErrorCode status = U_ZERO_ERROR;
-  std::string locale_name = *v8::String::Utf8Value(args[0]->ToString());
-  char max_locale[ULOC_FULLNAME_CAPACITY];
-  uloc_addLikelySubtags(locale_name.c_str(), max_locale,
-                        sizeof(max_locale), &status);
-  if (U_FAILURE(status)) {
-    return v8::Undefined();
-  }
-
-  return v8::String::New(NormalizeLocale(max_locale).c_str());
-}
-
-v8::Handle<v8::Value> I18NExtension::JSMinimizedLocale(
-    const v8::Arguments& args) {
-  if (!args.Length() || !args[0]->IsString()) {
-    return v8::Undefined();
-  }
-
-  UErrorCode status = U_ZERO_ERROR;
-  std::string locale_name = *v8::String::Utf8Value(args[0]->ToString());
-  char min_locale[ULOC_FULLNAME_CAPACITY];
-  uloc_minimizeSubtags(locale_name.c_str(), min_locale,
-                       sizeof(min_locale), &status);
-  if (U_FAILURE(status)) {
-    return v8::Undefined();
-  }
-
-  return v8::String::New(NormalizeLocale(min_locale).c_str());
-}
-
-// Common code for JSDisplayXXX methods.
-static v8::Handle<v8::Value> GetDisplayItem(const v8::Arguments& args,
-                                            const std::string& item) {
-  if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsString()) {
-    return v8::Undefined();
-  }
-
-  std::string base_locale = *v8::String::Utf8Value(args[0]->ToString());
-  icu::Locale icu_locale(base_locale.c_str());
-  icu::Locale display_locale =
-      icu::Locale(*v8::String::Utf8Value(args[1]->ToString()));
-  icu::UnicodeString result;
-  if (item == "language") {
-    icu_locale.getDisplayLanguage(display_locale, result);
-  } else if (item == "script") {
-    icu_locale.getDisplayScript(display_locale, result);
-  } else if (item == "region") {
-    icu_locale.getDisplayCountry(display_locale, result);
-  } else if (item == "name") {
-    icu_locale.getDisplayName(display_locale, result);
-  } else {
-    return v8::Undefined();
-  }
-
-  if (result.length()) {
-    return v8::String::New(
-        reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length());
-  }
-
-  return v8::Undefined();
-}
-
-v8::Handle<v8::Value> I18NExtension::JSDisplayLanguage(
-    const v8::Arguments& args) {
-  return GetDisplayItem(args, "language");
-}
-
-v8::Handle<v8::Value> I18NExtension::JSDisplayScript(
-    const v8::Arguments& args) {
-  return GetDisplayItem(args, "script");
-}
-
-v8::Handle<v8::Value> I18NExtension::JSDisplayRegion(
-    const v8::Arguments& args) {
-  return GetDisplayItem(args, "region");
-}
-
-v8::Handle<v8::Value> I18NExtension::JSDisplayName(const v8::Arguments& args) {
-  return GetDisplayItem(args, "name");
-}
-
 I18NExtension* I18NExtension::get() {
   if (!extension_) {
     extension_ = new I18NExtension();
diff --git a/src/extensions/experimental/i18n-extension.h b/src/extensions/experimental/i18n-extension.h
index 54c973f..b4dc7c3 100644
--- a/src/extensions/experimental/i18n-extension.h
+++ b/src/extensions/experimental/i18n-extension.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -41,16 +41,6 @@
   virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
       v8::Handle<v8::String> name);
 
-  // Implementations of window.Locale methods.
-  static v8::Handle<v8::Value> JSLocale(const v8::Arguments& args);
-  static v8::Handle<v8::Value> JSAvailableLocales(const v8::Arguments& args);
-  static v8::Handle<v8::Value> JSMaximizedLocale(const v8::Arguments& args);
-  static v8::Handle<v8::Value> JSMinimizedLocale(const v8::Arguments& args);
-  static v8::Handle<v8::Value> JSDisplayLanguage(const v8::Arguments& args);
-  static v8::Handle<v8::Value> JSDisplayScript(const v8::Arguments& args);
-  static v8::Handle<v8::Value> JSDisplayRegion(const v8::Arguments& args);
-  static v8::Handle<v8::Value> JSDisplayName(const v8::Arguments& args);
-
   // V8 code prefers Register, while Chrome and WebKit use get kind of methods.
   static void Register();
   static I18NExtension* get();
diff --git a/src/extensions/experimental/i18n-locale.cc b/src/extensions/experimental/i18n-locale.cc
new file mode 100644
index 0000000..cf17812
--- /dev/null
+++ b/src/extensions/experimental/i18n-locale.cc
@@ -0,0 +1,112 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "i18n-locale.h"
+
+#include "i18n-utils.h"
+#include "language-matcher.h"
+#include "unicode/locid.h"
+#include "unicode/uloc.h"
+#include "utils.h"
+
+namespace v8 {
+namespace internal {
+
+const char* const I18NLocale::kLocaleID = "localeID";
+const char* const I18NLocale::kRegionID = "regionID";
+const char* const I18NLocale::kICULocaleID = "icuLocaleID";
+
+v8::Handle<v8::Value> I18NLocale::JSLocale(const v8::Arguments& args) {
+  v8::HandleScope handle_scope;
+
+  if (args.Length() != 1 || !args[0]->IsObject()) {
+    return v8::Undefined();
+  }
+
+  v8::Local<v8::Object> settings = args[0]->ToObject();
+
+  // Get best match for locale.
+  v8::TryCatch try_catch;
+  v8::Handle<v8::Value> locale_id = settings->Get(v8::String::New(kLocaleID));
+  if (try_catch.HasCaught()) {
+    return v8::Undefined();
+  }
+
+  LocaleIDMatch result;
+  if (locale_id->IsArray()) {
+    LanguageMatcher::GetBestMatchForPriorityList(
+        v8::Handle<v8::Array>::Cast(locale_id), &result);
+  } else if (locale_id->IsString()) {
+    LanguageMatcher::GetBestMatchForString(locale_id->ToString(), &result);
+  } else {
+    LanguageMatcher::GetBestMatchForString(v8::String::New(""), &result);
+  }
+
+  // Get best match for region.
+  char region_id[ULOC_COUNTRY_CAPACITY];
+  I18NUtils::StrNCopy(region_id, ULOC_COUNTRY_CAPACITY, "");
+
+  v8::Handle<v8::Value> region = settings->Get(v8::String::New(kRegionID));
+  if (try_catch.HasCaught()) {
+    return v8::Undefined();
+  }
+
+  if (!GetBestMatchForRegionID(result.icu_id, region, region_id)) {
+    // Set region id to empty string because region couldn't be inferred.
+    I18NUtils::StrNCopy(region_id, ULOC_COUNTRY_CAPACITY, "");
+  }
+
+  // Build JavaScript object that contains bcp and icu locale ID and region ID.
+  v8::Handle<v8::Object> locale = v8::Object::New();
+  locale->Set(v8::String::New(kLocaleID), v8::String::New(result.bcp47_id));
+  locale->Set(v8::String::New(kICULocaleID), v8::String::New(result.icu_id));
+  locale->Set(v8::String::New(kRegionID), v8::String::New(region_id));
+
+  return handle_scope.Close(locale);
+}
+
+bool I18NLocale::GetBestMatchForRegionID(
+    const char* locale_id, v8::Handle<v8::Value> region_id, char* result) {
+  if (region_id->IsString() && region_id->ToString()->Length() != 0) {
+    icu::Locale user_locale(
+        icu::Locale("und", *v8::String::Utf8Value(region_id->ToString())));
+    I18NUtils::StrNCopy(
+        result, ULOC_COUNTRY_CAPACITY, user_locale.getCountry());
+    return true;
+  }
+  // Maximize locale_id to infer the region (e.g. expand "de" to "de-Latn-DE"
+  // and grab "DE" from the result).
+  UErrorCode status = U_ZERO_ERROR;
+  char maximized_locale[ULOC_FULLNAME_CAPACITY];
+  uloc_addLikelySubtags(
+      locale_id, maximized_locale, ULOC_FULLNAME_CAPACITY, &status);
+  uloc_getCountry(maximized_locale, result, ULOC_COUNTRY_CAPACITY, &status);
+
+  return !U_FAILURE(status);
+}
+
+} }  // namespace v8::internal
diff --git a/src/mips/register-allocator-mips.h b/src/extensions/experimental/i18n-locale.h
similarity index 63%
rename from src/mips/register-allocator-mips.h
rename to src/extensions/experimental/i18n-locale.h
index c448923..053886b 100644
--- a/src/mips/register-allocator-mips.h
+++ b/src/extensions/experimental/i18n-locale.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,23 +25,36 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
-#define V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_LOCALE_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_I18N_LOCALE_H_
 
-#include "mips/constants-mips.h"
+#include <v8.h>
 
 namespace v8 {
 namespace internal {
 
-class RegisterAllocatorConstants : public AllStatic {
+class I18NLocale {
  public:
-  // No registers are currently managed by the register allocator on MIPS.
-  static const int kNumRegisters = 0;
-  static const int kInvalidRegister = -1;
-};
+  I18NLocale() {}
 
+  // Implementations of window.Locale methods.
+  static v8::Handle<v8::Value> JSLocale(const v8::Arguments& args);
+
+  // Infers region id given the locale id, or uses user specified region id.
+  // Result is canonicalized.
+  // Returns status of ICU operation (maximizing locale or get region call).
+  static bool GetBestMatchForRegionID(
+      const char* locale_id, v8::Handle<v8::Value> regions, char* result);
+
+ private:
+  // Key name for localeID parameter.
+  static const char* const kLocaleID;
+  // Key name for regionID parameter.
+  static const char* const kRegionID;
+  // Key name for the icuLocaleID result.
+  static const char* const kICULocaleID;
+};
 
 } }  // namespace v8::internal
 
-#endif  // V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
-
+#endif  // V8_EXTENSIONS_EXPERIMENTAL_I18N_LOCALE_H_
diff --git a/src/frame-element.cc b/src/extensions/experimental/i18n-utils.cc
similarity index 84%
rename from src/frame-element.cc
rename to src/extensions/experimental/i18n-utils.cc
index f629900..a82c8eb 100644
--- a/src/frame-element.cc
+++ b/src/extensions/experimental/i18n-utils.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,19 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "v8.h"
+#include "i18n-utils.h"
 
-#include "frame-element.h"
-#include "zone-inl.h"
+#include <string.h>
 
 namespace v8 {
 namespace internal {
 
+// static
+void I18NUtils::StrNCopy(char* dest, int length, const char* src) {
+  if (!dest || !src) return;
+
+  strncpy(dest, src, length);
+  dest[length - 1] = '\0';
+}
 
 } }  // namespace v8::internal
diff --git a/src/mips/virtual-frame-mips-inl.h b/src/extensions/experimental/i18n-utils.h
similarity index 71%
rename from src/mips/virtual-frame-mips-inl.h
rename to src/extensions/experimental/i18n-utils.h
index f0d2fab..7702708 100644
--- a/src/mips/virtual-frame-mips-inl.h
+++ b/src/extensions/experimental/i18n-utils.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,34 +25,25 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef V8_VIRTUAL_FRAME_MIPS_INL_H_
-#define V8_VIRTUAL_FRAME_MIPS_INL_H_
-
-#include "assembler-mips.h"
-#include "virtual-frame-mips.h"
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_UTILS_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_I18N_UTILS_H_
 
 namespace v8 {
 namespace internal {
 
+class I18NUtils {
+ public:
+  // Safe string copy. Null terminates the destination. Copies at most
+  // (length - 1) bytes.
+  // We can't use snprintf since it's not supported on all relevant platforms.
+  // We can't use OS::SNPrintF, it's only for internal code.
+  // TODO(cira): Find a way to use OS::SNPrintF instead.
+  static void StrNCopy(char* dest, int length, const char* src);
 
-MemOperand VirtualFrame::ParameterAt(int index) {
-  UNIMPLEMENTED_MIPS();
-  return MemOperand(zero_reg, 0);
-}
-
-
-// The receiver frame slot.
-MemOperand VirtualFrame::Receiver() {
-  UNIMPLEMENTED_MIPS();
-  return MemOperand(zero_reg, 0);
-}
-
-
-void VirtualFrame::Forget(int count) {
-  UNIMPLEMENTED_MIPS();
-}
-
+ private:
+  I18NUtils() {}
+};
 
 } }  // namespace v8::internal
 
-#endif  // V8_VIRTUAL_FRAME_MIPS_INL_H_
+#endif  // V8_EXTENSIONS_EXPERIMENTAL_I18N_UTILS_H_
diff --git a/src/extensions/experimental/i18n.js b/src/extensions/experimental/i18n.js
index baf3859..0fa7ae7 100644
--- a/src/extensions/experimental/i18n.js
+++ b/src/extensions/experimental/i18n.js
@@ -25,70 +25,71 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// TODO(cira): Remove v8 prefix from v8Locale once we have stable API.
-v8Locale = function(optLocale) {
+// TODO(cira): Rename v8Locale into LocaleInfo once we have stable API.
+/**
+ * LocaleInfo class is an aggregate class of all i18n API calls.
+ * @param {Object} settings - localeID and regionID to create LocaleInfo from.
+ *   {Array.<string>|string} settings.localeID -
+ *     Unicode identifier of the locale.
+ *     See http://unicode.org/reports/tr35/#BCP_47_Conformance
+ *   {string} settings.regionID - ISO3166 region ID with addition of
+ *     invalid, undefined and reserved region codes.
+ * @constructor
+ */
+v8Locale = function(settings) {
   native function NativeJSLocale();
-  var properties = NativeJSLocale(optLocale);
-  this.locale = properties.locale;
-  this.language = properties.language;
-  this.script = properties.script;
-  this.region = properties.region;
-};
 
-v8Locale.availableLocales = function() {
-  native function NativeJSAvailableLocales();
-  return NativeJSAvailableLocales();
-};
-
-v8Locale.prototype.maximizedLocale = function() {
-  native function NativeJSMaximizedLocale();
-  return new v8Locale(NativeJSMaximizedLocale(this.locale));
-};
-
-v8Locale.prototype.minimizedLocale = function() {
-  native function NativeJSMinimizedLocale();
-  return new v8Locale(NativeJSMinimizedLocale(this.locale));
-};
-
-v8Locale.prototype.displayLocale_ = function(displayLocale) {
-  var result = this.locale;
-  if (displayLocale !== undefined) {
-    result = displayLocale.locale;
+  // Assume user wanted to do v8Locale("sr");
+  if (typeof(settings) === "string") {
+    settings = {'localeID': settings};
   }
-  return result;
+
+  var properties = NativeJSLocale(
+      v8Locale.createSettingsOrDefault_(settings, {'localeID': 'root'}));
+
+  // Keep the resolved ICU locale ID around to avoid resolving localeID to
+  // ICU locale ID every time BreakIterator, Collator and so forth are called.
+  this.__icuLocaleID__ = properties.icuLocaleID;
+  this.options = {'localeID': properties.localeID,
+                  'regionID': properties.regionID};
 };
 
-v8Locale.prototype.displayLanguage = function(optDisplayLocale) {
-  var displayLocale = this.displayLocale_(optDisplayLocale);
-  native function NativeJSDisplayLanguage();
-  return NativeJSDisplayLanguage(this.locale, displayLocale);
+/**
+ * Clones existing locale with possible overrides for some of the options.
+ * @param {!Object} settings - overrides for current locale settings.
+ * @returns {Object} - new LocaleInfo object.
+ */
+v8Locale.prototype.derive = function(settings) {
+  return new v8Locale(
+      v8Locale.createSettingsOrDefault_(settings, this.options));
 };
 
-v8Locale.prototype.displayScript = function(optDisplayLocale) {
-  var displayLocale = this.displayLocale_(optDisplayLocale);
-  native function NativeJSDisplayScript();
-  return NativeJSDisplayScript(this.locale, displayLocale);
-};
-
-v8Locale.prototype.displayRegion = function(optDisplayLocale) {
-  var displayLocale = this.displayLocale_(optDisplayLocale);
-  native function NativeJSDisplayRegion();
-  return NativeJSDisplayRegion(this.locale, displayLocale);
-};
-
-v8Locale.prototype.displayName = function(optDisplayLocale) {
-  var displayLocale = this.displayLocale_(optDisplayLocale);
-  native function NativeJSDisplayName();
-  return NativeJSDisplayName(this.locale, displayLocale);
-};
-
+/**
+ * v8BreakIterator class implements locale aware segmenatation.
+ * It is not part of EcmaScript proposal.
+ * @param {Object} locale - locale object to pass to break
+ *   iterator implementation.
+ * @param {string} type - type of segmenatation:
+ *   - character
+ *   - word
+ *   - sentence
+ *   - line
+ * @constructor
+ */
 v8Locale.v8BreakIterator = function(locale, type) {
   native function NativeJSBreakIterator();
-  var iterator = NativeJSBreakIterator(locale, type);
+
+  locale = v8Locale.createLocaleOrDefault_(locale);
+  // BCP47 ID would work in this case, but we use ICU locale for consistency.
+  var iterator = NativeJSBreakIterator(locale.__icuLocaleID__, type);
   iterator.type = type;
   return iterator;
 };
 
+/**
+ * Type of the break we encountered during previous iteration.
+ * @type{Enum}
+ */
 v8Locale.v8BreakIterator.BreakType = {
   'unknown': -1,
   'none': 0,
@@ -98,6 +99,82 @@
   'ideo': 400
 };
 
+/**
+ * Creates new v8BreakIterator based on current locale.
+ * @param {string} - type of segmentation. See constructor.
+ * @returns {Object} - new v8BreakIterator object.
+ */
 v8Locale.prototype.v8CreateBreakIterator = function(type) {
-  return new v8Locale.v8BreakIterator(this.locale, type);
+  return new v8Locale.v8BreakIterator(this, type);
+};
+
+// TODO(jungshik): Set |collator.options| to actually recognized / resolved
+// values.
+/**
+ * Collator class implements locale-aware sort.
+ * @param {Object} locale - locale object to pass to collator implementation.
+ * @param {Object} settings - collation flags:
+ *   - ignoreCase
+ *   - ignoreAccents
+ *   - numeric
+ * @constructor
+ */
+v8Locale.Collator = function(locale, settings) {
+  native function NativeJSCollator();
+
+  locale = v8Locale.createLocaleOrDefault_(locale);
+  var collator = NativeJSCollator(
+      locale.__icuLocaleID__, v8Locale.createSettingsOrDefault_(settings, {}));
+  return collator;
+};
+
+/**
+ * Creates new Collator based on current locale.
+ * @param {Object} - collation flags. See constructor.
+ * @returns {Object} - new v8BreakIterator object.
+ */
+v8Locale.prototype.createCollator = function(settings) {
+  return new v8Locale.Collator(this, settings);
+};
+
+/**
+ * Merges user settings and defaults.
+ * Settings that are not of object type are rejected.
+ * Actual property values are not validated, but whitespace is trimmed if they
+ * are strings.
+ * @param {!Object} settings - user provided settings.
+ * @param {!Object} defaults - default values for this type of settings.
+ * @returns {Object} - valid settings object.
+ */
+v8Locale.createSettingsOrDefault_ = function(settings, defaults) {
+  if (!settings || typeof(settings) !== 'object' ) {
+    return defaults;
+  }
+  for (var key in defaults) {
+    if (!settings.hasOwnProperty(key)) {
+      settings[key] = defaults[key];
+    }
+  }
+  // Clean up values, like trimming whitespace.
+  for (var key in settings) {
+    if (typeof(settings[key]) === "string") {
+      settings[key] = settings[key].trim();
+    }
+  }
+
+  return settings;
+};
+
+/**
+ * If locale is valid (defined and of v8Locale type) we return it. If not
+ * we create default locale and return it.
+ * @param {!Object} locale - user provided locale.
+ * @returns {Object} - v8Locale object.
+ */
+v8Locale.createLocaleOrDefault_ = function(locale) {
+  if (!locale || !(locale instanceof v8Locale)) {
+    return new v8Locale();
+  } else {
+    return locale;
+  }
 };
diff --git a/src/extensions/experimental/language-matcher.cc b/src/extensions/experimental/language-matcher.cc
new file mode 100644
index 0000000..385ebff
--- /dev/null
+++ b/src/extensions/experimental/language-matcher.cc
@@ -0,0 +1,251 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// TODO(cira): Remove LanguageMatcher from v8 when ICU implements
+// language matching API.
+
+#include "language-matcher.h"
+
+#include "i18n-utils.h"
+#include "unicode/datefmt.h"  // For getAvailableLocales
+#include "unicode/locid.h"
+#include "unicode/uloc.h"
+#include "utils.h"
+
+namespace v8 {
+namespace internal {
+
+const unsigned int LanguageMatcher::kLanguageWeight = 75;
+const unsigned int LanguageMatcher::kScriptWeight = 20;
+const unsigned int LanguageMatcher::kRegionWeight = 5;
+const unsigned int LanguageMatcher::kThreshold = 50;
+const unsigned int LanguageMatcher::kPositionBonus = 1;
+const char* const LanguageMatcher::kDefaultLocale = "root";
+
+static const char* GetLanguageException(const char*);
+static bool BCP47ToICUFormat(const char*, char*);
+static int CompareLocaleSubtags(const char*, const char*);
+static bool BuildLocaleName(const char*, const char*, LocaleIDMatch*);
+
+LocaleIDMatch::LocaleIDMatch()
+    : score(-1) {
+  I18NUtils::StrNCopy(
+      bcp47_id, ULOC_FULLNAME_CAPACITY, LanguageMatcher::kDefaultLocale);
+
+  I18NUtils::StrNCopy(
+      icu_id, ULOC_FULLNAME_CAPACITY, LanguageMatcher::kDefaultLocale);
+}
+
+LocaleIDMatch& LocaleIDMatch::operator=(const LocaleIDMatch& rhs) {
+  I18NUtils::StrNCopy(this->bcp47_id, ULOC_FULLNAME_CAPACITY, rhs.bcp47_id);
+  I18NUtils::StrNCopy(this->icu_id, ULOC_FULLNAME_CAPACITY, rhs.icu_id);
+  this->score = rhs.score;
+
+  return *this;
+}
+
+// static
+void LanguageMatcher::GetBestMatchForPriorityList(
+    v8::Handle<v8::Array> locales, LocaleIDMatch* result) {
+  v8::HandleScope handle_scope;
+
+  unsigned int position_bonus = locales->Length() * kPositionBonus;
+
+  int max_score = 0;
+  LocaleIDMatch match;
+  for (unsigned int i = 0; i < locales->Length(); ++i) {
+    position_bonus -= kPositionBonus;
+
+    v8::TryCatch try_catch;
+    v8::Local<v8::Value> locale_id = locales->Get(v8::Integer::New(i));
+
+    // Return default if exception is raised when reading parameter.
+    if (try_catch.HasCaught()) break;
+
+    // JavaScript arrays can be heterogenous so check each item
+    // if it's a string.
+    if (!locale_id->IsString()) continue;
+
+    if (!CompareToSupportedLocaleIDList(locale_id->ToString(), &match)) {
+      continue;
+    }
+
+    // Skip items under threshold.
+    if (match.score < kThreshold) continue;
+
+    match.score += position_bonus;
+    if (match.score > max_score) {
+      *result = match;
+
+      max_score = match.score;
+    }
+  }
+}
+
+// static
+void LanguageMatcher::GetBestMatchForString(
+    v8::Handle<v8::String> locale, LocaleIDMatch* result) {
+  LocaleIDMatch match;
+
+  if (CompareToSupportedLocaleIDList(locale, &match) &&
+      match.score >= kThreshold) {
+    *result = match;
+  }
+}
+
+// static
+bool LanguageMatcher::CompareToSupportedLocaleIDList(
+    v8::Handle<v8::String> locale_id, LocaleIDMatch* result) {
+  static int32_t available_count = 0;
+  // Depending on how ICU data is built, locales returned by
+  // Locale::getAvailableLocale() are not guaranteed to support DateFormat,
+  // Collation and other services.  We can call getAvailableLocale() of all the
+  // services we want to support and take the intersection of them all, but
+  // using DateFormat::getAvailableLocales() should suffice.
+  // TODO(cira): Maybe make this thread-safe?
+  static const icu::Locale* available_locales =
+      icu::DateFormat::getAvailableLocales(available_count);
+
+  // Skip this locale_id if it's not in ASCII.
+  static LocaleIDMatch default_match;
+  v8::String::AsciiValue ascii_value(locale_id);
+  if (*ascii_value == NULL) return false;
+
+  char locale[ULOC_FULLNAME_CAPACITY];
+  if (!BCP47ToICUFormat(*ascii_value, locale)) return false;
+
+  icu::Locale input_locale(locale);
+
+  // Position of the best match locale in list of available locales.
+  int position = -1;
+  const char* language = GetLanguageException(input_locale.getLanguage());
+  const char* script = input_locale.getScript();
+  const char* region = input_locale.getCountry();
+  for (int32_t i = 0; i < available_count; ++i) {
+    int current_score = 0;
+    int sign =
+        CompareLocaleSubtags(language, available_locales[i].getLanguage());
+    current_score += sign * kLanguageWeight;
+
+    sign = CompareLocaleSubtags(script, available_locales[i].getScript());
+    current_score += sign * kScriptWeight;
+
+    sign = CompareLocaleSubtags(region, available_locales[i].getCountry());
+    current_score += sign * kRegionWeight;
+
+    if (current_score >= kThreshold && current_score > result->score) {
+      result->score = current_score;
+      position = i;
+    }
+  }
+
+  // Didn't find any good matches so use defaults.
+  if (position == -1) return false;
+
+  return BuildLocaleName(available_locales[position].getBaseName(),
+                         input_locale.getName(), result);
+}
+
+// For some unsupported language subtags it is better to fallback to related
+// language that is supported than to default.
+static const char* GetLanguageException(const char* language) {
+  // Serbo-croatian to Serbian.
+  if (!strcmp(language, "sh")) return "sr";
+
+  // Norweigan to Norweiaan to Norwegian Bokmal.
+  if (!strcmp(language, "no")) return "nb";
+
+  // Moldavian to Romanian.
+  if (!strcmp(language, "mo")) return "ro";
+
+  // Tagalog to Filipino.
+  if (!strcmp(language, "tl")) return "fil";
+
+  return language;
+}
+
+// Converts user input from BCP47 locale id format to ICU compatible format.
+// Returns false if uloc_forLanguageTag call fails or if extension is too long.
+static bool BCP47ToICUFormat(const char* locale_id, char* result) {
+  UErrorCode status = U_ZERO_ERROR;
+  int32_t locale_size = 0;
+
+  char locale[ULOC_FULLNAME_CAPACITY];
+  I18NUtils::StrNCopy(locale, ULOC_FULLNAME_CAPACITY, locale_id);
+
+  // uloc_forLanguageTag has a bug where long extension can crash the code.
+  // We need to check if extension part of language id conforms to the length.
+  // ICU bug: http://bugs.icu-project.org/trac/ticket/8519
+  const char* extension = strstr(locale_id, "-u-");
+  if (extension != NULL &&
+      strlen(extension) > ULOC_KEYWORD_AND_VALUES_CAPACITY) {
+    // Truncate to get non-crashing string, but still preserve base language.
+    int base_length = strlen(locale_id) - strlen(extension);
+    locale[base_length] = '\0';
+  }
+
+  uloc_forLanguageTag(locale, result, ULOC_FULLNAME_CAPACITY,
+                      &locale_size, &status);
+  return !U_FAILURE(status);
+}
+
+// Compares locale id subtags.
+// Returns 1 for match or -1 for mismatch.
+static int CompareLocaleSubtags(const char* lsubtag, const char* rsubtag) {
+  return strcmp(lsubtag, rsubtag) == 0 ? 1 : -1;
+}
+
+// Builds a BCP47 compliant locale id from base name of matched locale and
+// full user specified locale.
+// Returns false if uloc_toLanguageTag failed to convert locale id.
+// Example:
+//   base_name of matched locale (ICU ID): de_DE
+//   input_locale_name (ICU ID): de_AT@collation=phonebk
+//   result (ICU ID): de_DE@collation=phonebk
+//   result (BCP47 ID): de-DE-u-co-phonebk
+static bool BuildLocaleName(const char* base_name,
+                            const char* input_locale_name,
+                            LocaleIDMatch* result) {
+  I18NUtils::StrNCopy(result->icu_id, ULOC_LANG_CAPACITY, base_name);
+
+  // Get extensions (if any) from the original locale.
+  const char* extension = strchr(input_locale_name, ULOC_KEYWORD_SEPARATOR);
+  if (extension != NULL) {
+    I18NUtils::StrNCopy(result->icu_id + strlen(base_name),
+                        ULOC_KEYWORD_AND_VALUES_CAPACITY, extension);
+  } else {
+    I18NUtils::StrNCopy(result->icu_id, ULOC_LANG_CAPACITY, base_name);
+  }
+
+  // Convert ICU locale name into BCP47 format.
+  UErrorCode status = U_ZERO_ERROR;
+  uloc_toLanguageTag(result->icu_id, result->bcp47_id,
+                     ULOC_FULLNAME_CAPACITY, false, &status);
+  return !U_FAILURE(status);
+}
+
+} }  // namespace v8::internal
diff --git a/src/extensions/experimental/language-matcher.h b/src/extensions/experimental/language-matcher.h
new file mode 100644
index 0000000..b5336a2
--- /dev/null
+++ b/src/extensions/experimental/language-matcher.h
@@ -0,0 +1,95 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_LANGUAGE_MATCHER_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_LANGUAGE_MATCHER_H_
+
+#include <v8.h>
+
+#include "unicode/uloc.h"
+
+namespace v8 {
+namespace internal {
+
+struct LocaleIDMatch {
+  LocaleIDMatch();
+
+  LocaleIDMatch& operator=(const LocaleIDMatch& rhs);
+
+  // Bcp47 locale id - "de-Latn-DE-u-co-phonebk".
+  char bcp47_id[ULOC_FULLNAME_CAPACITY];
+
+  // ICU locale id - "de_Latn_DE@collation=phonebk".
+  char icu_id[ULOC_FULLNAME_CAPACITY];
+
+  // Score for this locale.
+  int score;
+};
+
+class LanguageMatcher {
+ public:
+  // Default locale.
+  static const char* const kDefaultLocale;
+
+  // Finds best supported locale for a given a list of locale identifiers.
+  // It preserves the extension for the locale id.
+  static void GetBestMatchForPriorityList(
+      v8::Handle<v8::Array> locale_list, LocaleIDMatch* result);
+
+  // Finds best supported locale for a single locale identifier.
+  // It preserves the extension for the locale id.
+  static void GetBestMatchForString(
+      v8::Handle<v8::String> locale_id, LocaleIDMatch* result);
+
+ private:
+  // If langauge subtags match add this amount to the score.
+  static const unsigned int kLanguageWeight;
+
+  // If script subtags match add this amount to the score.
+  static const unsigned int kScriptWeight;
+
+  // If region subtags match add this amount to the score.
+  static const unsigned int kRegionWeight;
+
+  // LocaleID match score has to be over this number to accept the match.
+  static const unsigned int kThreshold;
+
+  // For breaking ties in priority queue.
+  static const unsigned int kPositionBonus;
+
+  LanguageMatcher();
+
+  // Compares locale_id to the supported list of locales and returns best
+  // match.
+  // Returns false if it fails to convert locale id from ICU to BCP47 format.
+  static bool CompareToSupportedLocaleIDList(v8::Handle<v8::String> locale_id,
+                                             LocaleIDMatch* result);
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_EXTENSIONS_EXPERIMENTAL_LANGUAGE_MATCHER_H_
diff --git a/src/factory.cc b/src/factory.cc
index 7dee66f..06d1655 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -111,12 +111,31 @@
                      String);
 }
 
+// Symbols are created in the old generation (data space).
+Handle<String> Factory::LookupSymbol(Handle<String> string) {
+  CALL_HEAP_FUNCTION(isolate(),
+                     isolate()->heap()->LookupSymbol(*string),
+                     String);
+}
+
 Handle<String> Factory::LookupAsciiSymbol(Vector<const char> string) {
   CALL_HEAP_FUNCTION(isolate(),
                      isolate()->heap()->LookupAsciiSymbol(string),
                      String);
 }
 
+
+Handle<String> Factory::LookupAsciiSymbol(Handle<SeqAsciiString> string,
+                                          int from,
+                                          int length) {
+  CALL_HEAP_FUNCTION(isolate(),
+                     isolate()->heap()->LookupAsciiSymbol(string,
+                                                          from,
+                                                          length),
+                     String);
+}
+
+
 Handle<String> Factory::LookupTwoByteSymbol(Vector<const uc16> string) {
   CALL_HEAP_FUNCTION(isolate(),
                      isolate()->heap()->LookupTwoByteSymbol(string),
@@ -266,7 +285,7 @@
   heap->SetLastScriptId(Smi::FromInt(id));
 
   // Create and initialize script object.
-  Handle<Proxy> wrapper = NewProxy(0, TENURED);
+  Handle<Foreign> wrapper = NewForeign(0, TENURED);
   Handle<Script> script = Handle<Script>::cast(NewStruct(SCRIPT_TYPE));
   script->set_source(*source);
   script->set_name(heap->undefined_value());
@@ -286,15 +305,15 @@
 }
 
 
-Handle<Proxy> Factory::NewProxy(Address addr, PretenureFlag pretenure) {
+Handle<Foreign> Factory::NewForeign(Address addr, PretenureFlag pretenure) {
   CALL_HEAP_FUNCTION(isolate(),
-                     isolate()->heap()->AllocateProxy(addr, pretenure),
-                     Proxy);
+                     isolate()->heap()->AllocateForeign(addr, pretenure),
+                     Foreign);
 }
 
 
-Handle<Proxy> Factory::NewProxy(const AccessorDescriptor* desc) {
-  return NewProxy((Address) desc, TENURED);
+Handle<Foreign> Factory::NewForeign(const AccessorDescriptor* desc) {
+  return NewForeign((Address) desc, TENURED);
 }
 
 
@@ -712,7 +731,7 @@
 
 
 // Allocate the new array.
-Handle<DescriptorArray> Factory::CopyAppendProxyDescriptor(
+Handle<DescriptorArray> Factory::CopyAppendForeignDescriptor(
     Handle<DescriptorArray> array,
     Handle<String> key,
     Handle<Object> value,
@@ -832,6 +851,15 @@
 }
 
 
+Handle<JSProxy> Factory::NewJSProxy(Handle<Object> handler,
+                                    Handle<Object> prototype) {
+  CALL_HEAP_FUNCTION(
+      isolate(),
+      isolate()->heap()->AllocateJSProxy(*handler, *prototype),
+      JSProxy);
+}
+
+
 Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
     Handle<String> name,
     int number_of_literals,
@@ -1161,12 +1189,14 @@
                                     JSRegExp::Flags flags,
                                     int capture_count) {
   Handle<FixedArray> store = NewFixedArray(JSRegExp::kIrregexpDataSize);
-
+  Smi* uninitialized = Smi::FromInt(JSRegExp::kUninitializedValue);
   store->set(JSRegExp::kTagIndex, Smi::FromInt(type));
   store->set(JSRegExp::kSourceIndex, *source);
   store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags.value()));
-  store->set(JSRegExp::kIrregexpASCIICodeIndex, HEAP->the_hole_value());
-  store->set(JSRegExp::kIrregexpUC16CodeIndex, HEAP->the_hole_value());
+  store->set(JSRegExp::kIrregexpASCIICodeIndex, uninitialized);
+  store->set(JSRegExp::kIrregexpUC16CodeIndex, uninitialized);
+  store->set(JSRegExp::kIrregexpASCIICodeSavedIndex, uninitialized);
+  store->set(JSRegExp::kIrregexpUC16CodeSavedIndex, uninitialized);
   store->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(0));
   store->set(JSRegExp::kIrregexpCaptureCountIndex,
              Smi::FromInt(capture_count));
diff --git a/src/factory.h b/src/factory.h
index 71bfdc4..55d1e9a 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -62,7 +62,11 @@
       PretenureFlag pretenure);
 
   Handle<String> LookupSymbol(Vector<const char> str);
+  Handle<String> LookupSymbol(Handle<String> str);
   Handle<String> LookupAsciiSymbol(Vector<const char> str);
+  Handle<String> LookupAsciiSymbol(Handle<SeqAsciiString>,
+                                   int from,
+                                   int length);
   Handle<String> LookupTwoByteSymbol(Vector<const uc16> str);
   Handle<String> LookupAsciiSymbol(const char* str) {
     return LookupSymbol(CStrVector(str));
@@ -156,13 +160,13 @@
 
   Handle<Script> NewScript(Handle<String> source);
 
-  // Proxies are pretenured when allocated by the bootstrapper.
-  Handle<Proxy> NewProxy(Address addr,
-                         PretenureFlag pretenure = NOT_TENURED);
+  // Foreign objects are pretenured when allocated by the bootstrapper.
+  Handle<Foreign> NewForeign(Address addr,
+                             PretenureFlag pretenure = NOT_TENURED);
 
-  // Allocate a new proxy.  The proxy is pretenured (allocated directly in
-  // the old generation).
-  Handle<Proxy> NewProxy(const AccessorDescriptor* proxy);
+  // Allocate a new foreign object.  The foreign is pretenured (allocated
+  // directly in the old generation).
+  Handle<Foreign> NewForeign(const AccessorDescriptor* foreign);
 
   Handle<ByteArray> NewByteArray(int length,
                                  PretenureFlag pretenure = NOT_TENURED);
@@ -231,6 +235,8 @@
       Handle<FixedArray> elements,
       PretenureFlag pretenure = NOT_TENURED);
 
+  Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
+
   Handle<JSFunction> NewFunction(Handle<String> name,
                                  Handle<Object> prototype);
 
@@ -314,7 +320,7 @@
   Handle<JSFunction> NewFunctionWithoutPrototype(Handle<String> name,
                                                  Handle<Code> code);
 
-  Handle<DescriptorArray> CopyAppendProxyDescriptor(
+  Handle<DescriptorArray> CopyAppendForeignDescriptor(
       Handle<DescriptorArray> array,
       Handle<String> key,
       Handle<Object> value,
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 17e2015..a85f5fe 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -96,6 +96,9 @@
 //
 #define FLAG FLAG_FULL
 
+// Flags for experimental language features.
+DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
+
 // Flags for Crankshaft.
 #ifdef V8_TARGET_ARCH_MIPS
   DEFINE_bool(crankshaft, false, "use crankshaft")
@@ -144,7 +147,6 @@
 DEFINE_bool(debug_code, false,
             "generate extra code (assertions) for debugging")
 DEFINE_bool(code_comments, false, "emit comments in code disassembly")
-DEFINE_bool(emit_branch_hints, false, "emit branch hints")
 DEFINE_bool(peephole_optimization, true,
             "perform peephole optimizations in assembly code")
 DEFINE_bool(print_peephole_optimization, false,
@@ -285,10 +287,9 @@
 DEFINE_bool(always_compact, false, "Perform compaction on every full GC")
 DEFINE_bool(never_compact, false,
             "Never perform compaction on full GC - testing only")
-DEFINE_bool(cleanup_ics_at_gc, true,
-            "Flush inline caches prior to mark compact collection.")
-DEFINE_bool(cleanup_caches_in_maps_at_gc, true,
-            "Flush code caches in maps during mark compact cycle.")
+DEFINE_bool(cleanup_code_caches_at_gc, true,
+            "Flush inline caches prior to mark compact collection and "
+            "flush code caches in maps during mark compact cycle.")
 DEFINE_int(random_seed, 0,
            "Default seed for initializing random generator "
            "(0, the default, means to use system random).")
@@ -325,7 +326,7 @@
 DEFINE_int(sim_stack_alignment, 8,
            "Stack alingment in bytes in simulator (4 or 8, 8 is default)")
 
-// top.cc
+// isolate.cc
 DEFINE_bool(trace_exception, false,
             "print stack trace when throwing exceptions")
 DEFINE_bool(preallocate_message_memory, false,
diff --git a/src/frame-element.h b/src/frame-element.h
deleted file mode 100644
index 0c7d010..0000000
--- a/src/frame-element.h
+++ /dev/null
@@ -1,269 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_FRAME_ELEMENT_H_
-#define V8_FRAME_ELEMENT_H_
-
-#include "type-info.h"
-#include "macro-assembler.h"
-#include "zone.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Virtual frame elements
-//
-// The internal elements of the virtual frames.  There are several kinds of
-// elements:
-//   * Invalid: elements that are uninitialized or not actually part
-//     of the virtual frame.  They should not be read.
-//   * Memory: an element that resides in the actual frame.  Its address is
-//     given by its position in the virtual frame.
-//   * Register: an element that resides in a register.
-//   * Constant: an element whose value is known at compile time.
-
-class FrameElement BASE_EMBEDDED {
- public:
-  enum SyncFlag {
-    NOT_SYNCED,
-    SYNCED
-  };
-
-  inline TypeInfo type_info() {
-    // Copied elements do not have type info. Instead
-    // we have to inspect their backing element in the frame.
-    ASSERT(!is_copy());
-    return TypeInfo::FromInt(TypeInfoField::decode(value_));
-  }
-
-  inline void set_type_info(TypeInfo info) {
-    // Copied elements do not have type info. Instead
-    // we have to inspect their backing element in the frame.
-    ASSERT(!is_copy());
-    value_ = value_ & ~TypeInfoField::mask();
-    value_ = value_ | TypeInfoField::encode(info.ToInt());
-  }
-
-  // The default constructor creates an invalid frame element.
-  FrameElement() {
-    value_ = TypeField::encode(INVALID)
-        | CopiedField::encode(false)
-        | SyncedField::encode(false)
-        | TypeInfoField::encode(TypeInfo::Uninitialized().ToInt())
-        | DataField::encode(0);
-  }
-
-  // Factory function to construct an invalid frame element.
-  static FrameElement InvalidElement() {
-    FrameElement result;
-    return result;
-  }
-
-  // Factory function to construct an in-memory frame element.
-  static FrameElement MemoryElement(TypeInfo info) {
-    FrameElement result(MEMORY, no_reg, SYNCED, info);
-    return result;
-  }
-
-  // Factory function to construct an in-register frame element.
-  static FrameElement RegisterElement(Register reg,
-                                      SyncFlag is_synced,
-                                      TypeInfo info) {
-    return FrameElement(REGISTER, reg, is_synced, info);
-  }
-
-  // Factory function to construct a frame element whose value is known at
-  // compile time.
-  static FrameElement ConstantElement(Handle<Object> value,
-                                      SyncFlag is_synced) {
-    TypeInfo info = TypeInfo::TypeFromValue(value);
-    FrameElement result(value, is_synced, info);
-    return result;
-  }
-
-  static bool ConstantPoolOverflowed() {
-    return !DataField::is_valid(
-        Isolate::Current()->frame_element_constant_list()->length());
-  }
-
-  bool is_synced() const { return SyncedField::decode(value_); }
-
-  void set_sync() {
-    ASSERT(type() != MEMORY);
-    value_ = value_ | SyncedField::encode(true);
-  }
-
-  void clear_sync() {
-    ASSERT(type() != MEMORY);
-    value_ = value_ & ~SyncedField::mask();
-  }
-
-  bool is_valid() const { return type() != INVALID; }
-  bool is_memory() const { return type() == MEMORY; }
-  bool is_register() const { return type() == REGISTER; }
-  bool is_constant() const { return type() == CONSTANT; }
-  bool is_copy() const { return type() == COPY; }
-
-  bool is_copied() const { return CopiedField::decode(value_); }
-  void set_copied() { value_ = value_ | CopiedField::encode(true); }
-  void clear_copied() { value_ = value_ & ~CopiedField::mask(); }
-
-  // An untagged int32 FrameElement represents a signed int32
-  // on the stack.  These are only allowed in a side-effect-free
-  // int32 calculation, and if a non-int32 input shows up or an overflow
-  // occurs, we bail out and drop all the int32 values.
-  void set_untagged_int32(bool value) {
-    value_ &= ~UntaggedInt32Field::mask();
-    value_ |= UntaggedInt32Field::encode(value);
-  }
-  bool is_untagged_int32() const { return UntaggedInt32Field::decode(value_); }
-
-  Register reg() const {
-    ASSERT(is_register());
-    uint32_t reg = DataField::decode(value_);
-    Register result;
-    result.code_ = reg;
-    return result;
-  }
-
-  Handle<Object> handle() const {
-    ASSERT(is_constant());
-    return Isolate::Current()->frame_element_constant_list()->
-        at(DataField::decode(value_));
-  }
-
-  int index() const {
-    ASSERT(is_copy());
-    return DataField::decode(value_);
-  }
-
-  bool Equals(FrameElement other) {
-    uint32_t masked_difference = (value_ ^ other.value_) & ~CopiedField::mask();
-    if (!masked_difference) {
-      // The elements are equal if they agree exactly except on copied field.
-      return true;
-    } else {
-      // If two constants have the same value, and agree otherwise, return true.
-       return !(masked_difference & ~DataField::mask()) &&
-              is_constant() &&
-              handle().is_identical_to(other.handle());
-    }
-  }
-
-  // Test if two FrameElements refer to the same memory or register location.
-  bool SameLocation(FrameElement* other) {
-    if (type() == other->type()) {
-      if (value_ == other->value_) return true;
-      if (is_constant() && handle().is_identical_to(other->handle())) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  // Given a pair of non-null frame element pointers, return one of them
-  // as an entry frame candidate or null if they are incompatible.
-  FrameElement* Combine(FrameElement* other) {
-    // If either is invalid, the result is.
-    if (!is_valid()) return this;
-    if (!other->is_valid()) return other;
-
-    if (!SameLocation(other)) return NULL;
-    // If either is unsynced, the result is.
-    FrameElement* result = is_synced() ? other : this;
-    return result;
-  }
-
- private:
-  enum Type {
-    INVALID,
-    MEMORY,
-    REGISTER,
-    CONSTANT,
-    COPY
-  };
-
-  // Used to construct memory and register elements.
-  FrameElement(Type type,
-               Register reg,
-               SyncFlag is_synced,
-               TypeInfo info) {
-    value_ = TypeField::encode(type)
-        | CopiedField::encode(false)
-        | SyncedField::encode(is_synced != NOT_SYNCED)
-        | TypeInfoField::encode(info.ToInt())
-        | DataField::encode(reg.code_ > 0 ? reg.code_ : 0);
-  }
-
-  // Used to construct constant elements.
-  FrameElement(Handle<Object> value, SyncFlag is_synced, TypeInfo info) {
-    ZoneObjectList* constant_list =
-        Isolate::Current()->frame_element_constant_list();
-    value_ = TypeField::encode(CONSTANT)
-        | CopiedField::encode(false)
-        | SyncedField::encode(is_synced != NOT_SYNCED)
-        | TypeInfoField::encode(info.ToInt())
-        | DataField::encode(constant_list->length());
-    constant_list->Add(value);
-  }
-
-  Type type() const { return TypeField::decode(value_); }
-  void set_type(Type type) {
-    value_ = value_ & ~TypeField::mask();
-    value_ = value_ | TypeField::encode(type);
-  }
-
-  void set_index(int new_index) {
-    ASSERT(is_copy());
-    value_ = value_ & ~DataField::mask();
-    value_ = value_ | DataField::encode(new_index);
-  }
-
-  void set_reg(Register new_reg) {
-    ASSERT(is_register());
-    value_ = value_ & ~DataField::mask();
-    value_ = value_ | DataField::encode(new_reg.code_);
-  }
-
-  // Encode type, copied, synced and data in one 32 bit integer.
-  uint32_t value_;
-
-  // Declare BitFields with template parameters <type, start, size>.
-  class TypeField: public BitField<Type, 0, 3> {};
-  class CopiedField: public BitField<bool, 3, 1> {};
-  class SyncedField: public BitField<bool, 4, 1> {};
-  class UntaggedInt32Field: public BitField<bool, 5, 1> {};
-  class TypeInfoField: public BitField<int, 6, 7> {};
-  class DataField: public BitField<uint32_t, 13, 32 - 13> {};
-
-  friend class VirtualFrame;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_FRAME_ELEMENT_H_
diff --git a/src/frames.cc b/src/frames.cc
index e0517c8..d81d5af 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -742,24 +742,30 @@
       // at the first position. Since we are always at a call when we need
       // to construct a stack trace, the receiver is always in a stack slot.
       opcode = static_cast<Translation::Opcode>(it.Next());
-      ASSERT(opcode == Translation::STACK_SLOT);
-      int input_slot_index = it.Next();
+      ASSERT(opcode == Translation::STACK_SLOT ||
+             opcode == Translation::LITERAL);
+      int index = it.Next();
 
       // Get the correct receiver in the optimized frame.
       Object* receiver = NULL;
-      // Positive index means the value is spilled to the locals area. Negative
-      // means it is stored in the incoming parameter area.
-      if (input_slot_index >= 0) {
-        receiver = GetExpression(input_slot_index);
+      if (opcode == Translation::LITERAL) {
+        receiver = data->LiteralArray()->get(index);
       } else {
-        // Index -1 overlaps with last parameter, -n with the first parameter,
-        // (-n - 1) with the receiver with n being the number of parameters
-        // of the outermost, optimized frame.
-        int parameter_count = ComputeParametersCount();
-        int parameter_index = input_slot_index + parameter_count;
-        receiver = (parameter_index == -1)
-            ? this->receiver()
-            : this->GetParameter(parameter_index);
+        // Positive index means the value is spilled to the locals
+        // area. Negative means it is stored in the incoming parameter
+        // area.
+        if (index >= 0) {
+          receiver = GetExpression(index);
+        } else {
+          // Index -1 overlaps with last parameter, -n with the first parameter,
+          // (-n - 1) with the receiver with n being the number of parameters
+          // of the outermost, optimized frame.
+          int parameter_count = ComputeParametersCount();
+          int parameter_index = index + parameter_count;
+          receiver = (parameter_index == -1)
+              ? this->receiver()
+              : this->GetParameter(parameter_index);
+        }
       }
 
       Code* code = function->shared()->code();
@@ -938,6 +944,10 @@
     accumulator->Add("\n");
     return;
   }
+  if (is_optimized()) {
+    accumulator->Add(" {\n// optimized frame\n}\n");
+    return;
+  }
   accumulator->Add(" {\n");
 
   // Compute the number of locals and expression stack elements.
diff --git a/src/frames.h b/src/frames.h
index 6fe6a63..aa91026 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -28,6 +28,7 @@
 #ifndef V8_FRAMES_H_
 #define V8_FRAMES_H_
 
+#include "allocation.h"
 #include "handles.h"
 #include "safepoint-table.h"
 
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 5f97421..2b43e89 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -747,7 +747,7 @@
       if (ShouldInlineSmiCase(op)) {
         EmitInlineSmiBinaryOp(expr, op, mode, left, right);
       } else {
-        EmitBinaryOp(op, mode);
+        EmitBinaryOp(expr, op, mode);
       }
       break;
     }
diff --git a/src/full-codegen.h b/src/full-codegen.h
index d6ed1b9..0d26e38 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,6 +30,7 @@
 
 #include "v8.h"
 
+#include "allocation.h"
 #include "ast.h"
 #include "code-stubs.h"
 #include "codegen.h"
@@ -299,10 +300,19 @@
 
   // Helper function to split control flow and avoid a branch to the
   // fall-through label if it is set up.
+#ifdef V8_TARGET_ARCH_MIPS
+  void Split(Condition cc,
+             Register lhs,
+             const Operand&  rhs,
+             Label* if_true,
+             Label* if_false,
+             Label* fall_through);
+#else  // All non-mips arch.
   void Split(Condition cc,
              Label* if_true,
              Label* if_false,
              Label* fall_through);
+#endif  // V8_TARGET_ARCH_MIPS
 
   void Move(Slot* dst, Register source, Register scratch1, Register scratch2);
   void Move(Register dst, Slot* source);
@@ -395,9 +405,9 @@
   void EmitReturnSequence();
 
   // Platform-specific code sequences for calls
-  void EmitCallWithStub(Call* expr);
+  void EmitCallWithStub(Call* expr, CallFunctionFlags flags);
   void EmitCallWithIC(Call* expr, Handle<Object> name, RelocInfo::Mode mode);
-  void EmitKeyedCallWithIC(Call* expr, Expression* key, RelocInfo::Mode mode);
+  void EmitKeyedCallWithIC(Call* expr, Expression* key);
 
   // Platform-specific code for inline runtime calls.
   InlineFunctionGenerator FindInlineFunctionGenerator(Runtime::FunctionId id);
@@ -445,12 +455,13 @@
 
   // Apply the compound assignment operator. Expects the left operand on top
   // of the stack and the right one in the accumulator.
-  void EmitBinaryOp(Token::Value op,
+  void EmitBinaryOp(BinaryOperation* expr,
+                    Token::Value op,
                     OverwriteMode mode);
 
   // Helper functions for generating inlined smi code for certain
   // binary operations.
-  void EmitInlineSmiBinaryOp(Expression* expr,
+  void EmitInlineSmiBinaryOp(BinaryOperation* expr,
                              Token::Value op,
                              OverwriteMode mode,
                              Expression* left,
@@ -512,12 +523,16 @@
   static Register context_register();
 
   // Helper for calling an IC stub.
-  void EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode);
+  void EmitCallIC(Handle<Code> ic,
+                  RelocInfo::Mode mode,
+                  unsigned ast_id);
 
   // Calling an IC stub with a patch site. Passing NULL for patch_site
   // or non NULL patch_site which is not activated indicates no inlined smi code
   // and emits a nop after the IC call.
-  void EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site);
+  void EmitCallIC(Handle<Code> ic,
+                  JumpPatchSite* patch_site,
+                  unsigned ast_id);
 
   // Set fields in the stack frame. Offsets are the frame pointer relative
   // offsets defined in, e.g., StandardFrameConstants.
@@ -531,6 +546,9 @@
 #define DECLARE_VISIT(type) virtual void Visit##type(type* node);
   AST_NODE_LIST(DECLARE_VISIT)
 #undef DECLARE_VISIT
+
+  void EmitUnaryOperation(UnaryOperation* expr, const char* comment);
+
   // Handles the shortcutted logical binary operations in VisitBinaryOperation.
   void EmitLogicalOperation(BinaryOperation* expr);
 
diff --git a/src/func-name-inferrer.cc b/src/func-name-inferrer.cc
index c094251..ebac4b9 100644
--- a/src/func-name-inferrer.cc
+++ b/src/func-name-inferrer.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,6 +29,7 @@
 
 #include "ast.h"
 #include "func-name-inferrer.h"
+#include "list-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/gdb-jit.h b/src/gdb-jit.h
index de6928f..0c80fb6 100644
--- a/src/gdb-jit.h
+++ b/src/gdb-jit.h
@@ -28,6 +28,8 @@
 #ifndef V8_GDB_JIT_H_
 #define V8_GDB_JIT_H_
 
+#include "allocation.h"
+
 //
 // Basic implementation of GDB JIT Interface client.
 // GBD JIT Interface is supported in GDB 7.0 and above.
diff --git a/src/global-handles.cc b/src/global-handles.cc
index c4e8f13..e4bbc95 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -48,6 +48,7 @@
     // Set the initial value of the handle.
     object_ = object;
     class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
+    independent_ = false;
     state_  = NORMAL;
     parameter_or_next_free_.parameter = NULL;
     callback_ = NULL;
@@ -138,6 +139,13 @@
     set_parameter(NULL);
   }
 
+  void MarkIndependent(GlobalHandles* global_handles) {
+    LOG(global_handles->isolate(),
+        HandleEvent("GlobalHandle::MarkIndependent", handle().location()));
+    ASSERT(state_ != DESTROYED);
+    independent_ = true;
+  }
+
   bool IsNearDeath() {
     // Check for PENDING to ensure correct answer when processing callbacks.
     return state_ == PENDING || state_ == NEAR_DEATH;
@@ -222,6 +230,8 @@
   };
   State state_ : 4;  // Need one more bit for MSVC as it treats enums as signed.
 
+  bool independent_ : 1;
+
  private:
   // Handle specific callback.
   WeakReferenceCallback callback_;
@@ -364,6 +374,11 @@
 }
 
 
+void GlobalHandles::MarkIndependent(Object** location) {
+  Node::FromLocation(location)->MarkIndependent(this);
+}
+
+
 bool GlobalHandles::IsNearDeath(Object** location) {
   return Node::FromLocation(location)->IsNearDeath();
 }
@@ -381,7 +396,7 @@
 
 void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) {
   // Traversal of GC roots in the global handle list that are marked as
-  // WEAK or PENDING.
+  // WEAK, PENDING or NEAR_DEATH.
   for (Node* current = head_; current != NULL; current = current->next()) {
     if (current->state_ == Node::WEAK
       || current->state_ == Node::PENDING
@@ -392,6 +407,20 @@
 }
 
 
+void GlobalHandles::IterateWeakIndependentRoots(ObjectVisitor* v) {
+  // Traversal of GC roots in the global handle list that are independent
+  // and marked as WEAK, PENDING or NEAR_DEATH.
+  for (Node* current = head_; current != NULL; current = current->next()) {
+    if (!current->independent_) continue;
+    if (current->state_ == Node::WEAK
+      || current->state_ == Node::PENDING
+      || current->state_ == Node::NEAR_DEATH) {
+      v->VisitPointer(&current->object_);
+    }
+  }
+}
+
+
 void GlobalHandles::IterateWeakRoots(WeakReferenceGuest f,
                                      WeakReferenceCallback callback) {
   for (Node* current = head_; current != NULL; current = current->next()) {
@@ -415,7 +444,21 @@
 }
 
 
-bool GlobalHandles::PostGarbageCollectionProcessing() {
+void GlobalHandles::IdentifyWeakIndependentHandles(WeakSlotCallbackWithHeap f) {
+  for (Node* current = head_; current != NULL; current = current->next()) {
+    if (current->state_ == Node::WEAK && current->independent_) {
+      if (f(isolate_->heap(), &current->object_)) {
+        current->state_ = Node::PENDING;
+        LOG(isolate_,
+            HandleEvent("GlobalHandle::Pending", current->handle().location()));
+      }
+    }
+  }
+}
+
+
+bool GlobalHandles::PostGarbageCollectionProcessing(
+    GarbageCollector collector) {
   // Process weak global handle callbacks. This must be done after the
   // GC is completely done, because the callbacks may invoke arbitrary
   // API functions.
@@ -425,6 +468,14 @@
   bool next_gc_likely_to_collect_more = false;
   Node** p = &head_;
   while (*p != NULL) {
+    // Skip dependent handles. Their weak callbacks might expect to be
+    // called between two global garbage collection callbacks which
+    // are not called for minor collections.
+    if (collector == SCAVENGER && !(*p)->independent_) {
+      p = (*p)->next_addr();
+      continue;
+    }
+
     if ((*p)->PostGarbageCollectionProcessing(isolate_, this)) {
       if (initial_post_gc_processing_count != post_gc_processing_count_) {
         // Weak callback triggered another GC and another round of
@@ -476,6 +527,16 @@
 }
 
 
+void GlobalHandles::IterateStrongAndDependentRoots(ObjectVisitor* v) {
+  for (Node* current = head_; current != NULL; current = current->next()) {
+    if ((current->independent_ && current->state_ == Node::NORMAL) ||
+        (!current->independent_ && current->state_ != Node::DESTROYED)) {
+      v->VisitPointer(&current->object_);
+    }
+  }
+}
+
+
 void GlobalHandles::IterateAllRootsWithClassIds(ObjectVisitor* v) {
   for (Node* current = head_; current != NULL; current = current->next()) {
     if (current->class_id_ != v8::HeapProfiler::kPersistentHandleNoClassId &&
@@ -558,6 +619,11 @@
 void GlobalHandles::AddObjectGroup(Object*** handles,
                                    size_t length,
                                    v8::RetainedObjectInfo* info) {
+#ifdef DEBUG
+  for (size_t i = 0; i < length; ++i) {
+    ASSERT(!Node::FromLocation(handles[i])->independent_);
+  }
+#endif
   if (length == 0) {
     if (info != NULL) info->Dispose();
     return;
@@ -569,6 +635,12 @@
 void GlobalHandles::AddImplicitReferences(HeapObject** parent,
                                           Object*** children,
                                           size_t length) {
+#ifdef DEBUG
+  ASSERT(!Node::FromLocation(BitCast<Object**>(parent))->independent_);
+  for (size_t i = 0; i < length; ++i) {
+    ASSERT(!Node::FromLocation(children[i])->independent_);
+  }
+#endif
   if (length == 0) return;
   implicit_ref_groups_.Add(ImplicitRefGroup::New(parent, children, length));
 }
diff --git a/src/global-handles.h b/src/global-handles.h
index 2171b2c..3477bca 100644
--- a/src/global-handles.h
+++ b/src/global-handles.h
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,7 +30,7 @@
 
 #include "../include/v8-profiler.h"
 
-#include "list-inl.h"
+#include "list.h"
 
 namespace v8 {
 namespace internal {
@@ -146,6 +146,9 @@
   // Clear the weakness of a global handle.
   void ClearWeakness(Object** location);
 
+  // Clear the weakness of a global handle.
+  void MarkIndependent(Object** location);
+
   // Tells whether global handle is near death.
   static bool IsNearDeath(Object** location);
 
@@ -154,11 +157,14 @@
 
   // Process pending weak handles.
   // Returns true if next major GC is likely to collect more garbage.
-  bool PostGarbageCollectionProcessing();
+  bool PostGarbageCollectionProcessing(GarbageCollector collector);
 
   // Iterates over all strong handles.
   void IterateStrongRoots(ObjectVisitor* v);
 
+  // Iterates over all strong and dependent handles.
+  void IterateStrongAndDependentRoots(ObjectVisitor* v);
+
   // Iterates over all handles.
   void IterateAllRoots(ObjectVisitor* v);
 
@@ -168,6 +174,9 @@
   // Iterates over all weak roots in heap.
   void IterateWeakRoots(ObjectVisitor* v);
 
+  // Iterates over all weak independent roots in heap.
+  void IterateWeakIndependentRoots(ObjectVisitor* v);
+
   // Iterates over weak roots that are bound to a given callback.
   void IterateWeakRoots(WeakReferenceGuest f,
                         WeakReferenceCallback callback);
@@ -176,6 +185,10 @@
   // them as pending.
   void IdentifyWeakHandles(WeakSlotCallback f);
 
+  // Find all weak independent handles satisfying the callback predicate, mark
+  // them as pending.
+  void IdentifyWeakIndependentHandles(WeakSlotCallbackWithHeap f);
+
   // Add an object group.
   // Should be only used in GC callback function before a collection.
   // All groups are destroyed after a mark-compact collection.
diff --git a/src/handles.cc b/src/handles.cc
index 326de86..b03b642 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -362,6 +362,17 @@
 
 
 Handle<Object> GetProperty(Handle<Object> obj,
+                           const char* name,
+                           LookupResult* result) {
+  Isolate* isolate = Isolate::Current();
+  Handle<String> str = isolate->factory()->LookupAsciiSymbol(name);
+  PropertyAttributes attributes;
+  CALL_HEAP_FUNCTION(
+      isolate, obj->GetProperty(*obj, result, *str, &attributes), Object);
+}
+
+
+Handle<Object> GetProperty(Handle<Object> obj,
                            Handle<Object> key) {
   Isolate* isolate = Isolate::Current();
   CALL_HEAP_FUNCTION(isolate,
@@ -533,7 +544,7 @@
 
 
 // Wrappers for scripts are kept alive and cached in weak global
-// handles referred from proxy objects held by the scripts as long as
+// handles referred from foreign objects held by the scripts as long as
 // they are used. When they are not used anymore, the garbage
 // collector will call the weak callback on the global handle
 // associated with the wrapper and get rid of both the wrapper and the
@@ -546,9 +557,9 @@
 #endif
   Handle<Object> cache = Utils::OpenHandle(*handle);
   JSValue* wrapper = JSValue::cast(*cache);
-  Proxy* proxy = Script::cast(wrapper->value())->wrapper();
-  ASSERT(proxy->proxy() == reinterpret_cast<Address>(cache.location()));
-  proxy->set_proxy(0);
+  Foreign* foreign = Script::cast(wrapper->value())->wrapper();
+  ASSERT(foreign->address() == reinterpret_cast<Address>(cache.location()));
+  foreign->set_address(0);
   Isolate* isolate = Isolate::Current();
   isolate->global_handles()->Destroy(cache.location());
   isolate->counters()->script_wrappers()->Decrement();
@@ -556,10 +567,10 @@
 
 
 Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
-  if (script->wrapper()->proxy() != NULL) {
+  if (script->wrapper()->address() != NULL) {
     // Return the script wrapper directly from the cache.
     return Handle<JSValue>(
-        reinterpret_cast<JSValue**>(script->wrapper()->proxy()));
+        reinterpret_cast<JSValue**>(script->wrapper()->address()));
   }
   Isolate* isolate = Isolate::Current();
   // Construct a new script wrapper.
@@ -575,7 +586,7 @@
   Handle<Object> handle = isolate->global_handles()->Create(*result);
   isolate->global_handles()->MakeWeak(handle.location(), NULL,
                                       &ClearWrapperCache);
-  script->wrapper()->set_proxy(reinterpret_cast<Address>(handle.location()));
+  script->wrapper()->set_address(reinterpret_cast<Address>(handle.location()));
   return result;
 }
 
diff --git a/src/handles.h b/src/handles.h
index 3839f37..3d930fd 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -28,6 +28,7 @@
 #ifndef V8_HANDLES_H_
 #define V8_HANDLES_H_
 
+#include "allocation.h"
 #include "apiutils.h"
 
 namespace v8 {
@@ -242,6 +243,10 @@
                            const char* name);
 
 Handle<Object> GetProperty(Handle<Object> obj,
+                           const char* name,
+                           LookupResult* result);
+
+Handle<Object> GetProperty(Handle<Object> obj,
                            Handle<Object> key);
 
 Handle<Object> GetProperty(Handle<JSObject> obj,
diff --git a/src/hashmap.h b/src/hashmap.h
index bb3e3ce..5c13212 100644
--- a/src/hashmap.h
+++ b/src/hashmap.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,8 @@
 #ifndef V8_HASHMAP_H_
 #define V8_HASHMAP_H_
 
+#include "allocation.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 296cb05..3860fad 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,8 +29,9 @@
 #define V8_HEAP_INL_H_
 
 #include "heap.h"
-#include "objects.h"
 #include "isolate.h"
+#include "list-inl.h"
+#include "objects.h"
 #include "v8-counters.h"
 
 namespace v8 {
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index 4815f82..ec078ed 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -474,7 +474,7 @@
 
 
 ConstructorHeapProfile::ConstructorHeapProfile()
-    : zscope_(DELETE_ON_EXIT) {
+    : zscope_(Isolate::Current(), DELETE_ON_EXIT) {
 }
 
 
@@ -584,7 +584,7 @@
 
 
 ClustersCoarser::ClustersCoarser()
-    : zscope_(DELETE_ON_EXIT),
+    : zscope_(Isolate::Current(), DELETE_ON_EXIT),
       sim_list_(ClustersCoarser::kInitialSimilarityListCapacity),
       current_pair_(NULL),
       current_set_(NULL),
@@ -699,7 +699,7 @@
 
 
 RetainerHeapProfile::RetainerHeapProfile()
-    : zscope_(DELETE_ON_EXIT),
+    : zscope_(Isolate::Current(), DELETE_ON_EXIT),
       aggregator_(NULL) {
   JSObjectsCluster roots(JSObjectsCluster::ROOTS);
   ReferencesExtractor extractor(roots, this);
@@ -1027,7 +1027,7 @@
     if (coarser_ != NULL &&
         !coarser_->GetCoarseEquivalent(cluster).is_null()) return;
     JSObjectsClusterTree* tree_to_iterate = tree;
-    ZoneScope zs(DELETE_ON_EXIT);
+    ZoneScope zs(Isolate::Current(), DELETE_ON_EXIT);
     JSObjectsClusterTree dest_tree_;
     if (coarser_ != NULL) {
       RetainersAggregator retainers_aggregator(coarser_, &dest_tree_);
diff --git a/src/heap-profiler.h b/src/heap-profiler.h
index 89a2e8a..c1e93c0 100644
--- a/src/heap-profiler.h
+++ b/src/heap-profiler.h
@@ -28,6 +28,7 @@
 #ifndef V8_HEAP_PROFILER_H_
 #define V8_HEAP_PROFILER_H_
 
+#include "allocation.h"
 #include "isolate.h"
 #include "zone-inl.h"
 
diff --git a/src/heap.cc b/src/heap.cc
index 2b6c11f..f82c83c 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -33,8 +33,8 @@
 #include "codegen.h"
 #include "compilation-cache.h"
 #include "debug.h"
-#include "heap-profiler.h"
 #include "global-handles.h"
+#include "heap-profiler.h"
 #include "liveobjectlist-inl.h"
 #include "mark-compact.h"
 #include "natives.h"
@@ -69,11 +69,11 @@
     : isolate_(NULL),
 // semispace_size_ should be a power of 2 and old_generation_size_ should be
 // a multiple of Page::kPageSize.
-#if defined(ANDROID)
+#if 0//defined(ANDROID)
       reserved_semispace_size_(2*MB),
       max_semispace_size_(2*MB),
       initial_semispace_size_(128*KB),
-      max_old_generation_size_(192*MB),
+      max_old_generation_size_(512*MB),
       max_executable_size_(max_old_generation_size_),
       code_range_size_(0),
 #elif defined(V8_TARGET_ARCH_X64)
@@ -96,6 +96,7 @@
 // Will be 4 * reserved_semispace_size_ to ensure that young
 // generation can be aligned to its size.
       survived_since_last_expansion_(0),
+      sweep_generation_(0),
       always_allocate_scope_depth_(0),
       linear_allocation_scope_depth_(0),
       contexts_disposed_(0),
@@ -736,7 +737,7 @@
   if (collector == MARK_COMPACTOR) {
     // Perform mark-sweep with optional compaction.
     MarkCompact(tracer);
-
+    sweep_generation_++;
     bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
         IsStableOrIncreasingSurvivalTrend();
 
@@ -771,11 +772,10 @@
 
   isolate_->counters()->objs_since_last_young()->Set(0);
 
-  if (collector == MARK_COMPACTOR) {
-    DisableAssertNoAllocation allow_allocation;
+  { DisableAssertNoAllocation allow_allocation;
     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
     next_gc_likely_to_collect_more =
-        isolate_->global_handles()->PostGarbageCollectionProcessing();
+        isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
   }
 
   // Update relocatables.
@@ -935,6 +935,12 @@
 }
 
 
+static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
+  return heap->InNewSpace(*p) &&
+      !HeapObject::cast(*p)->map_word().IsForwardingAddress();
+}
+
+
 void Heap::Scavenge() {
 #ifdef DEBUG
   if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
@@ -1029,6 +1035,11 @@
   scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
 
   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+  isolate_->global_handles()->IdentifyWeakIndependentHandles(
+      &IsUnscavengedHeapObject);
+  isolate_->global_handles()->IterateWeakIndependentRoots(&scavenge_visitor);
+  new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+
 
   UpdateNewSpaceReferencesInExternalStringTable(
       &UpdateNewSpaceReferenceInExternalStringTableEntry);
@@ -1282,6 +1293,10 @@
                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
                         template VisitSpecialized<SharedFunctionInfo::kSize>);
 
+    table_.Register(kVisitJSRegExp,
+                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
+                    Visit);
+
     table_.Register(kVisitJSFunction,
                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
                         template VisitSpecialized<JSFunction::kSize>);
@@ -1348,7 +1363,7 @@
 #if defined(ENABLE_LOGGING_AND_PROFILING)
       Isolate* isolate = heap->isolate();
       if (isolate->logger()->is_logging() ||
-          isolate->cpu_profiler()->is_profiling()) {
+          CpuProfiler::is_profiling(isolate)) {
         if (target->IsSharedFunctionInfo()) {
           PROFILE(isolate, SharedFunctionInfoMoveEvent(
               source->address(), target->address()));
@@ -1523,8 +1538,8 @@
     return;
   }
 
-  if (isolate()->logger()->is_logging() ||
-      isolate()->cpu_profiler()->is_profiling() ||
+  if (isolate()->logger()->is_logging() |
+      CpuProfiler::is_profiling(isolate()) ||
       (isolate()->heap_profiler() != NULL &&
        isolate()->heap_profiler()->is_profiling())) {
     // If one of the isolates is doing scavenge at this moment of time
@@ -1593,7 +1608,7 @@
   map->set_instance_size(instance_size);
   map->set_inobject_properties(0);
   map->set_pre_allocated_property_fields(0);
-  map->set_instance_descriptors(empty_descriptor_array());
+  map->init_instance_descriptors();
   map->set_code_cache(empty_fixed_array());
   map->set_prototype_transitions(empty_fixed_array());
   map->set_unused_property_fields(0);
@@ -1686,15 +1701,15 @@
   set_empty_descriptor_array(DescriptorArray::cast(obj));
 
   // Fix the instance_descriptors for the existing maps.
-  meta_map()->set_instance_descriptors(empty_descriptor_array());
+  meta_map()->init_instance_descriptors();
   meta_map()->set_code_cache(empty_fixed_array());
   meta_map()->set_prototype_transitions(empty_fixed_array());
 
-  fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
+  fixed_array_map()->init_instance_descriptors();
   fixed_array_map()->set_code_cache(empty_fixed_array());
   fixed_array_map()->set_prototype_transitions(empty_fixed_array());
 
-  oddball_map()->set_instance_descriptors(empty_descriptor_array());
+  oddball_map()->init_instance_descriptors();
   oddball_map()->set_code_cache(empty_fixed_array());
   oddball_map()->set_prototype_transitions(empty_fixed_array());
 
@@ -1720,10 +1735,10 @@
   }
   set_heap_number_map(Map::cast(obj));
 
-  { MaybeObject* maybe_obj = AllocateMap(PROXY_TYPE, Proxy::kSize);
+  { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_proxy_map(Map::cast(obj));
+  set_foreign_map(Map::cast(obj));
 
   for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
     const StringTypeTable& entry = string_type_table[i];
@@ -1805,6 +1820,12 @@
   }
   set_external_float_array_map(Map::cast(obj));
 
+  { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
+                                         ExternalArray::kAlignedSize);
+    if (!maybe_obj->ToObject(&obj)) return false;
+  }
+  set_external_double_array_map(Map::cast(obj));
+
   { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
@@ -2102,12 +2123,12 @@
   }
   hidden_symbol_ = String::cast(obj);
 
-  // Allocate the proxy for __proto__.
+  // Allocate the foreign for __proto__.
   { MaybeObject* maybe_obj =
-        AllocateProxy((Address) &Accessors::ObjectPrototype);
+        AllocateForeign((Address) &Accessors::ObjectPrototype);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_prototype_accessors(Proxy::cast(obj));
+  set_prototype_accessors(Foreign::cast(obj));
 
   // Allocate the code_stubs dictionary. The initial size is set to avoid
   // expanding the dictionary during bootstrapping.
@@ -2293,6 +2314,8 @@
       return kExternalUnsignedIntArrayMapRootIndex;
     case kExternalFloatArray:
       return kExternalFloatArrayMapRootIndex;
+    case kExternalDoubleArray:
+      return kExternalDoubleArrayMapRootIndex;
     case kExternalPixelArray:
       return kExternalPixelArrayMapRootIndex;
     default:
@@ -2323,16 +2346,16 @@
 }
 
 
-MaybeObject* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
-  // Statically ensure that it is safe to allocate proxies in paged spaces.
-  STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
+MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
+  // Statically ensure that it is safe to allocate foreigns in paged spaces.
+  STATIC_ASSERT(Foreign::kSize <= Page::kMaxHeapObjectSize);
   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
   Object* result;
-  { MaybeObject* maybe_result = Allocate(proxy_map(), space);
+  { MaybeObject* maybe_result = Allocate(foreign_map(), space);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  Proxy::cast(result)->set_proxy(proxy);
+  Foreign::cast(result)->set_address(address);
   return result;
 }
 
@@ -2370,6 +2393,7 @@
   share->set_num_literals(0);
   share->set_end_position(0);
   share->set_function_token_position(0);
+  share->set_es5_native(false);
   return result;
 }
 
@@ -2792,6 +2816,7 @@
     code->set_check_type(RECEIVER_MAP_CHECK);
   }
   code->set_deoptimization_data(empty_fixed_array());
+  code->set_next_code_flushing_candidate(undefined_value());
   // Allow self references to created code object by patching the handle to
   // point to the newly allocated Code object.
   if (!self_reference.is_null()) {
@@ -3204,6 +3229,26 @@
 }
 
 
+MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
+  // Allocate map.
+  // TODO(rossberg): Once we optimize proxies, think about a scheme to share
+  // maps. Will probably depend on the identity of the handler object, too.
+  Map* map;
+  MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
+  if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
+  map->set_prototype(prototype);
+  map->set_pre_allocated_property_fields(1);
+  map->set_inobject_properties(1);
+
+  // Allocate the proxy object.
+  Object* result;
+  MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
+  if (!maybe_result->ToObject(&result)) return maybe_result;
+  JSProxy::cast(result)->set_handler(handler);
+  return result;
+}
+
+
 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
   ASSERT(constructor->has_initial_map());
   Map* map = constructor->initial_map();
@@ -3267,7 +3312,7 @@
 
   // Setup the global object as a normalized object.
   global->set_map(new_map);
-  global->map()->set_instance_descriptors(empty_descriptor_array());
+  global->map()->clear_instance_descriptors();
   global->set_properties(dictionary);
 
   // Make sure result is a global object with properties in dictionary.
@@ -4139,6 +4184,26 @@
 }
 
 
+MaybeObject* Heap::LookupAsciiSymbol(Handle<SeqAsciiString> string,
+                                     int from,
+                                     int length) {
+  Object* symbol = NULL;
+  Object* new_table;
+  { MaybeObject* maybe_new_table =
+        symbol_table()->LookupSubStringAsciiSymbol(string,
+                                                   from,
+                                                   length,
+                                                   &symbol);
+    if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
+  }
+  // Can't use set_symbol_table because SymbolTable::cast knows that
+  // SymbolTable is a singleton and checks for identity.
+  roots_[kSymbolTableRootIndex] = new_table;
+  ASSERT(symbol != NULL);
+  return symbol;
+}
+
+
 MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
   Object* symbol = NULL;
   Object* new_table;
@@ -4461,7 +4526,8 @@
 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
   v->Synchronize("symbol_table");
-  if (mode != VISIT_ALL_IN_SCAVENGE) {
+  if (mode != VISIT_ALL_IN_SCAVENGE &&
+      mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
     // Scavenge collections have special processing for this.
     external_string_table_.Iterate(v);
   }
@@ -4497,16 +4563,24 @@
   // Iterate over the builtin code objects and code stubs in the
   // heap. Note that it is not necessary to iterate over code objects
   // on scavenge collections.
-  if (mode != VISIT_ALL_IN_SCAVENGE) {
+  if (mode != VISIT_ALL_IN_SCAVENGE &&
+      mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
     isolate_->builtins()->IterateBuiltins(v);
   }
   v->Synchronize("builtins");
 
   // Iterate over global handles.
-  if (mode == VISIT_ONLY_STRONG) {
-    isolate_->global_handles()->IterateStrongRoots(v);
-  } else {
-    isolate_->global_handles()->IterateAllRoots(v);
+  switch (mode) {
+    case VISIT_ONLY_STRONG:
+      isolate_->global_handles()->IterateStrongRoots(v);
+      break;
+    case VISIT_ALL_IN_SCAVENGE:
+      isolate_->global_handles()->IterateStrongAndDependentRoots(v);
+      break;
+    case VISIT_ALL_IN_SWEEP_NEWSPACE:
+    case VISIT_ALL:
+      isolate_->global_handles()->IterateAllRoots(v);
+      break;
   }
   v->Synchronize("globalhandles");
 
diff --git a/src/heap.h b/src/heap.h
index ae4e9e7..79ae996 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,6 +30,7 @@
 
 #include <math.h>
 
+#include "allocation.h"
 #include "globals.h"
 #include "list.h"
 #include "mark-compact.h"
@@ -103,6 +104,7 @@
   V(Map, external_int_array_map, ExternalIntArrayMap)                          \
   V(Map, external_unsigned_int_array_map, ExternalUnsignedIntArrayMap)         \
   V(Map, external_float_array_map, ExternalFloatArrayMap)                      \
+  V(Map, external_double_array_map, ExternalDoubleArrayMap)                    \
   V(Map, context_map, ContextMap)                                              \
   V(Map, catch_context_map, CatchContextMap)                                   \
   V(Map, code_map, CodeMap)                                                    \
@@ -110,12 +112,12 @@
   V(Map, global_property_cell_map, GlobalPropertyCellMap)                      \
   V(Map, shared_function_info_map, SharedFunctionInfoMap)                      \
   V(Map, message_object_map, JSMessageObjectMap)                               \
-  V(Map, proxy_map, ProxyMap)                                                  \
+  V(Map, foreign_map, ForeignMap)                                              \
   V(Object, nan_value, NanValue)                                               \
   V(Object, minus_zero_value, MinusZeroValue)                                  \
   V(Map, neander_map, NeanderMap)                                              \
   V(JSObject, message_listeners, MessageListeners)                             \
-  V(Proxy, prototype_accessors, PrototypeAccessors)                            \
+  V(Foreign, prototype_accessors, PrototypeAccessors)                          \
   V(NumberDictionary, code_stubs, CodeStubs)                                   \
   V(NumberDictionary, non_monomorphic_cache, NonMonomorphicCache)              \
   V(Code, js_entry_code, JsEntryCode)                                          \
@@ -176,8 +178,14 @@
   V(value_of_symbol, "valueOf")                                          \
   V(InitializeVarGlobal_symbol, "InitializeVarGlobal")                   \
   V(InitializeConstGlobal_symbol, "InitializeConstGlobal")               \
-  V(KeyedLoadSpecialized_symbol, "KeyedLoadSpecialized")                 \
-  V(KeyedStoreSpecialized_symbol, "KeyedStoreSpecialized")               \
+  V(KeyedLoadSpecializedMonomorphic_symbol,                              \
+    "KeyedLoadSpecializedMonomorphic")                                   \
+  V(KeyedLoadSpecializedPolymorphic_symbol,                              \
+    "KeyedLoadSpecializedPolymorphic")                                   \
+  V(KeyedStoreSpecializedMonomorphic_symbol,                             \
+    "KeyedStoreSpecializedMonomorphic")                                  \
+  V(KeyedStoreSpecializedPolymorphic_symbol,                             \
+    "KeyedStoreSpecializedPolymorphic")                                  \
   V(stack_overflow_symbol, "kStackOverflowBoilerplate")                  \
   V(illegal_access_symbol, "illegal access")                             \
   V(out_of_memory_symbol, "out-of-memory")                               \
@@ -205,30 +213,7 @@
   V(global_eval_symbol, "GlobalEval")                                    \
   V(identity_hash_symbol, "v8::IdentityHash")                            \
   V(closure_symbol, "(closure)")                                         \
-  V(use_strict, "use strict")                                            \
-  V(KeyedLoadExternalByteArray_symbol, "KeyedLoadExternalByteArray")     \
-  V(KeyedLoadExternalUnsignedByteArray_symbol,                           \
-      "KeyedLoadExternalUnsignedByteArray")                              \
-  V(KeyedLoadExternalShortArray_symbol,                                  \
-      "KeyedLoadExternalShortArray")                                     \
-  V(KeyedLoadExternalUnsignedShortArray_symbol,                          \
-      "KeyedLoadExternalUnsignedShortArray")                             \
-  V(KeyedLoadExternalIntArray_symbol, "KeyedLoadExternalIntArray")       \
-  V(KeyedLoadExternalUnsignedIntArray_symbol,                            \
-       "KeyedLoadExternalUnsignedIntArray")                              \
-  V(KeyedLoadExternalFloatArray_symbol, "KeyedLoadExternalFloatArray")   \
-  V(KeyedLoadExternalPixelArray_symbol, "KeyedLoadExternalPixelArray")   \
-  V(KeyedStoreExternalByteArray_symbol, "KeyedStoreExternalByteArray")   \
-  V(KeyedStoreExternalUnsignedByteArray_symbol,                          \
-        "KeyedStoreExternalUnsignedByteArray")                           \
-  V(KeyedStoreExternalShortArray_symbol, "KeyedStoreExternalShortArray") \
-  V(KeyedStoreExternalUnsignedShortArray_symbol,                         \
-        "KeyedStoreExternalUnsignedShortArray")                          \
-  V(KeyedStoreExternalIntArray_symbol, "KeyedStoreExternalIntArray")     \
-  V(KeyedStoreExternalUnsignedIntArray_symbol,                           \
-        "KeyedStoreExternalUnsignedIntArray")                            \
-  V(KeyedStoreExternalFloatArray_symbol, "KeyedStoreExternalFloatArray") \
-  V(KeyedStoreExternalPixelArray_symbol, "KeyedStoreExternalPixelArray")
+  V(use_strict, "use strict")
 
 // Forward declarations.
 class GCTracer;
@@ -448,6 +433,13 @@
   // Please note this does not perform a garbage collection.
   MUST_USE_RESULT MaybeObject* AllocateFunctionPrototype(JSFunction* function);
 
+  // Allocates a Harmony Proxy.
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  MUST_USE_RESULT MaybeObject* AllocateJSProxy(Object* handler,
+                                               Object* prototype);
+
   // Reinitialize an JSGlobalProxy based on a constructor.  The object
   // must have the same size as objects allocated using the
   // constructor.  The object is reinitialized and behaves as an
@@ -696,12 +688,12 @@
   // Please note this does not perform a garbage collection.
   MUST_USE_RESULT inline MaybeObject* NumberFromUint32(uint32_t value);
 
-  // Allocates a new proxy object.
+  // Allocates a new foreign object.
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
   // failed.
   // Please note this does not perform a garbage collection.
-  MUST_USE_RESULT MaybeObject* AllocateProxy(
-      Address proxy, PretenureFlag pretenure = NOT_TENURED);
+  MUST_USE_RESULT MaybeObject* AllocateForeign(
+      Address address, PretenureFlag pretenure = NOT_TENURED);
 
   // Allocates a new SharedFunctionInfo object.
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -798,6 +790,10 @@
     return LookupSymbol(CStrVector(str));
   }
   MUST_USE_RESULT MaybeObject* LookupSymbol(String* str);
+  MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Handle<SeqAsciiString> string,
+                                                 int from,
+                                                 int length);
+
   bool LookupSymbolIfExists(String* str, String** symbol);
   bool LookupTwoCharsSymbolIfExists(String* str, String** symbol);
 
@@ -1232,6 +1228,11 @@
     return &external_string_table_;
   }
 
+  // Returns the current sweep generation.
+  int sweep_generation() {
+    return sweep_generation_;
+  }
+
   inline Isolate* isolate();
   bool is_safe_to_read_maps() { return is_safe_to_read_maps_; }
 
@@ -1261,6 +1262,9 @@
   // scavenge since last new space expansion.
   int survived_since_last_expansion_;
 
+  // For keeping track on when to flush RegExp code.
+  int sweep_generation_;
+
   int always_allocate_scope_depth_;
   int linear_allocation_scope_depth_;
 
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 2cf60d7..682bdb2 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -60,12 +60,20 @@
     case kDouble: return "d";
     case kInteger32: return "i";
     case kExternal: return "x";
-    case kNumRepresentations:
+    default:
       UNREACHABLE();
       return NULL;
   }
-  UNREACHABLE();
-  return NULL;
+}
+
+
+void HValue::AssumeRepresentation(Representation r) {
+  if (CheckFlag(kFlexibleRepresentation)) {
+    ChangeRepresentation(r);
+    // The representation of the value is dictated by type feedback and
+    // will not be changed later.
+    ClearFlag(kFlexibleRepresentation);
+  }
 }
 
 
@@ -264,31 +272,60 @@
 }
 
 
-int HValue::LookupOperandIndex(int occurrence_index, HValue* op) {
-  for (int i = 0; i < OperandCount(); ++i) {
-    if (OperandAt(i) == op) {
-      if (occurrence_index == 0) return i;
-      --occurrence_index;
-    }
-  }
-  return -1;
-}
-
-
 bool HValue::IsDefinedAfter(HBasicBlock* other) const {
   return block()->block_id() > other->block_id();
 }
 
 
-bool HValue::UsesMultipleTimes(HValue* op) {
-  bool seen = false;
-  for (int i = 0; i < OperandCount(); ++i) {
-    if (OperandAt(i) == op) {
-      if (seen) return true;
-      seen = true;
-    }
+HUseIterator::HUseIterator(HUseListNode* head) : next_(head) {
+  Advance();
+}
+
+
+void HUseIterator::Advance() {
+  current_ = next_;
+  if (current_ != NULL) {
+    next_ = current_->tail();
+    value_ = current_->value();
+    index_ = current_->index();
   }
-  return false;
+}
+
+
+int HValue::UseCount() const {
+  int count = 0;
+  for (HUseIterator it(uses()); !it.Done(); it.Advance()) ++count;
+  return count;
+}
+
+
+HUseListNode* HValue::RemoveUse(HValue* value, int index) {
+  HUseListNode* previous = NULL;
+  HUseListNode* current = use_list_;
+  while (current != NULL) {
+    if (current->value() == value && current->index() == index) {
+      if (previous == NULL) {
+        use_list_ = current->tail();
+      } else {
+        previous->set_tail(current->tail());
+      }
+      break;
+    }
+
+    previous = current;
+    current = current->tail();
+  }
+
+#ifdef DEBUG
+  // Do not reuse use list nodes in debug mode, zap them.
+  if (current != NULL) {
+    HUseListNode* temp =
+        new HUseListNode(current->value(), current->index(), NULL);
+    current->Zap();
+    current = temp;
+  }
+#endif
+  return current;
 }
 
 
@@ -317,71 +354,48 @@
 }
 
 
+const char* HValue::Mnemonic() const {
+  switch (opcode()) {
+#define MAKE_CASE(type) case k##type: return #type;
+    HYDROGEN_CONCRETE_INSTRUCTION_LIST(MAKE_CASE)
+#undef MAKE_CASE
+    case kPhi: return "Phi";
+    default: return "";
+  }
+}
+
+
 void HValue::SetOperandAt(int index, HValue* value) {
-  ASSERT(value == NULL || !value->representation().IsNone());
   RegisterUse(index, value);
   InternalSetOperandAt(index, value);
 }
 
 
-void HValue::ReplaceAndDelete(HValue* other) {
-  if (other != NULL) ReplaceValue(other);
-  Delete();
-}
-
-
-void HValue::ReplaceValue(HValue* other) {
-  for (int i = 0; i < uses_.length(); ++i) {
-    HValue* use = uses_[i];
-    ASSERT(!use->block()->IsStartBlock());
-    InternalReplaceAtUse(use, other);
-    other->uses_.Add(use);
-  }
-  uses_.Rewind(0);
-}
-
-
-void HValue::ClearOperands() {
-  for (int i = 0; i < OperandCount(); ++i) {
-    SetOperandAt(i, NULL);
-  }
-}
-
-
-void HValue::Delete() {
+void HValue::DeleteAndReplaceWith(HValue* other) {
+  // We replace all uses first, so Delete can assert that there are none.
+  if (other != NULL) ReplaceAllUsesWith(other);
   ASSERT(HasNoUses());
   ClearOperands();
   DeleteFromGraph();
 }
 
 
-void HValue::ReplaceAtUse(HValue* use, HValue* other) {
-  for (int i = 0; i < use->OperandCount(); ++i) {
-    if (use->OperandAt(i) == this) {
-      use->SetOperandAt(i, other);
-    }
+void HValue::ReplaceAllUsesWith(HValue* other) {
+  while (use_list_ != NULL) {
+    HUseListNode* list_node = use_list_;
+    HValue* value = list_node->value();
+    ASSERT(!value->block()->IsStartBlock());
+    value->InternalSetOperandAt(list_node->index(), other);
+    use_list_ = list_node->tail();
+    list_node->set_tail(other->use_list_);
+    other->use_list_ = list_node;
   }
 }
 
 
-void HValue::ReplaceFirstAtUse(HValue* use, HValue* other, Representation r) {
-  for (int i = 0; i < use->OperandCount(); ++i) {
-    if (use->RequiredInputRepresentation(i).Equals(r) &&
-        use->OperandAt(i) == this) {
-      use->SetOperandAt(i, other);
-      return;
-    }
-  }
-}
-
-
-void HValue::InternalReplaceAtUse(HValue* use, HValue* other) {
-  for (int i = 0; i < use->OperandCount(); ++i) {
-    if (use->OperandAt(i) == this) {
-      // Call internal method that does not update use lists. The caller is
-      // responsible for doing so.
-      use->InternalSetOperandAt(i, other);
-    }
+void HValue::ClearOperands() {
+  for (int i = 0; i < OperandCount(); ++i) {
+    SetOperandAt(i, NULL);
   }
 }
 
@@ -395,8 +409,39 @@
 }
 
 
-void HValue::PrintTypeTo(HType type, StringStream* stream) {
-  stream->Add(type.ToShortString());
+void HValue::PrintTypeTo(StringStream* stream) {
+  if (!representation().IsTagged() || type().Equals(HType::Tagged())) return;
+  stream->Add(" type[%s]", type().ToString());
+}
+
+
+void HValue::PrintRangeTo(StringStream* stream) {
+  if (range() == NULL || range()->IsMostGeneric()) return;
+  stream->Add(" range[%d,%d,m0=%d]",
+              range()->lower(),
+              range()->upper(),
+              static_cast<int>(range()->CanBeMinusZero()));
+}
+
+
+void HValue::PrintChangesTo(StringStream* stream) {
+  int changes_flags = (flags() & HValue::ChangesFlagsMask());
+  if (changes_flags == 0) return;
+  stream->Add(" changes[");
+  if (changes_flags == AllSideEffects()) {
+    stream->Add("*");
+  } else {
+    bool add_comma = false;
+#define PRINT_DO(type)                         \
+    if (changes_flags & (1 << kChanges##type)) { \
+      if (add_comma) stream->Add(",");           \
+      add_comma = true;                          \
+      stream->Add(#type);                        \
+    }
+    GVN_FLAG_LIST(PRINT_DO);
+#undef PRINT_DO
+  }
+  stream->Add("]");
 }
 
 
@@ -416,9 +461,20 @@
 void HValue::RegisterUse(int index, HValue* new_value) {
   HValue* old_value = OperandAt(index);
   if (old_value == new_value) return;
-  if (old_value != NULL) old_value->uses_.RemoveElement(this);
+
+  HUseListNode* removed = NULL;
+  if (old_value != NULL) {
+    removed = old_value->RemoveUse(this, index);
+  }
+
   if (new_value != NULL) {
-    new_value->uses_.Add(this);
+    if (removed == NULL) {
+      new_value->use_list_ =
+          new HUseListNode(this, index, new_value->use_list_);
+    } else {
+      removed->set_tail(new_value->use_list_);
+      new_value->use_list_ = removed;
+    }
   }
 }
 
@@ -447,28 +503,18 @@
 
 
 void HInstruction::PrintTo(StringStream* stream) {
+  PrintMnemonicTo(stream);
+  PrintDataTo(stream);
+  PrintRangeTo(stream);
+  PrintChangesTo(stream);
+  PrintTypeTo(stream);
+}
+
+
+void HInstruction::PrintMnemonicTo(StringStream* stream) {
   stream->Add("%s", Mnemonic());
   if (HasSideEffects()) stream->Add("*");
   stream->Add(" ");
-  PrintDataTo(stream);
-
-  if (range() != NULL &&
-      !range()->IsMostGeneric() &&
-      !range()->CanBeMinusZero()) {
-    stream->Add(" range[%d,%d,m0=%d]",
-                range()->lower(),
-                range()->upper(),
-                static_cast<int>(range()->CanBeMinusZero()));
-  }
-
-  int changes_flags = (flags() & HValue::ChangesFlagsMask());
-  if (changes_flags != 0) {
-    stream->Add(" changes[0x%x]", changes_flags);
-  }
-
-  if (representation().IsTagged() && !type().Equals(HType::Tagged())) {
-    stream->Add(" type[%s]", type().ToString());
-  }
 }
 
 
@@ -553,6 +599,8 @@
         ASSERT(cur == other_operand);
       }
     } else {
+      // If the following assert fires, you may have forgotten an
+      // AddInstruction.
       ASSERT(other_block->Dominates(cur_block));
     }
   }
@@ -733,10 +781,38 @@
 }
 
 
-HCheckInstanceType* HCheckInstanceType::NewIsJSObjectOrJSFunction(
-    HValue* value)  {
-  STATIC_ASSERT((LAST_JS_OBJECT_TYPE + 1) == JS_FUNCTION_TYPE);
-  return new HCheckInstanceType(value, FIRST_JS_OBJECT_TYPE, JS_FUNCTION_TYPE);
+void HCheckInstanceType::GetCheckInterval(InstanceType* first,
+                                          InstanceType* last) {
+  ASSERT(is_interval_check());
+  switch (check_) {
+    case IS_JS_OBJECT_OR_JS_FUNCTION:
+      STATIC_ASSERT((LAST_JS_OBJECT_TYPE + 1) == JS_FUNCTION_TYPE);
+      *first = FIRST_JS_OBJECT_TYPE;
+      *last = JS_FUNCTION_TYPE;
+      return;
+    case IS_JS_ARRAY:
+      *first = *last = JS_ARRAY_TYPE;
+      return;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) {
+  ASSERT(!is_interval_check());
+  switch (check_) {
+    case IS_STRING:
+      *mask = kIsNotStringMask;
+      *tag = kStringTag;
+      return;
+    case IS_SYMBOL:
+      *mask = kIsSymbolMask;
+      *tag = kSymbolTag;
+      return;
+    default:
+      UNREACHABLE();
+  }
 }
 
 
@@ -915,7 +991,7 @@
     stream->Add(" ");
   }
   stream->Add(" uses%d_%di_%dd_%dt]",
-              uses()->length(),
+              UseCount(),
               int32_non_phi_uses() + int32_indirect_uses(),
               double_non_phi_uses() + double_indirect_uses(),
               tagged_non_phi_uses() + tagged_indirect_uses());
@@ -933,8 +1009,8 @@
 
 
 bool HPhi::HasRealUses() {
-  for (int i = 0; i < uses()->length(); i++) {
-    if (!uses()->at(i)->IsPhi()) return true;
+  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+    if (!it.value()->IsPhi()) return true;
   }
   return false;
 }
@@ -967,12 +1043,11 @@
 void HPhi::InitRealUses(int phi_id) {
   // Initialize real uses.
   phi_id_ = phi_id;
-  for (int j = 0; j < uses()->length(); j++) {
-    HValue* use = uses()->at(j);
-    if (!use->IsPhi()) {
-      int index = use->LookupOperandIndex(0, this);
-      Representation req_rep = use->RequiredInputRepresentation(index);
-      non_phi_uses_[req_rep.kind()]++;
+  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+    HValue* value = it.value();
+    if (!value->IsPhi()) {
+      Representation rep = value->RequiredInputRepresentation(it.index());
+      ++non_phi_uses_[rep.kind()];
     }
   }
 }
@@ -1017,10 +1092,9 @@
 
 HConstant::HConstant(Handle<Object> handle, Representation r)
     : handle_(handle),
-      constant_type_(HType::TypeFromValue(handle)),
       has_int32_value_(false),
-      int32_value_(0),
       has_double_value_(false),
+      int32_value_(0),
       double_value_(0)  {
   set_representation(r);
   SetFlag(kUseGVN);
@@ -1194,13 +1268,23 @@
     Handle<Map> map = types->at(i);
     LookupResult lookup;
     map->LookupInDescriptors(NULL, *name, &lookup);
-    if (lookup.IsProperty() && lookup.type() == FIELD) {
-      types_.Add(types->at(i));
-      int index = lookup.GetLocalFieldIndexFromMap(*map);
-      if (index < 0) {
-        SetFlag(kDependsOnInobjectFields);
-      } else {
-        SetFlag(kDependsOnBackingStoreFields);
+    if (lookup.IsProperty()) {
+      switch (lookup.type()) {
+        case FIELD: {
+          int index = lookup.GetLocalFieldIndexFromMap(*map);
+          if (index < 0) {
+            SetFlag(kDependsOnInobjectFields);
+          } else {
+            SetFlag(kDependsOnBackingStoreFields);
+          }
+          types_.Add(types->at(i));
+          break;
+        }
+        case CONSTANT_FUNCTION:
+          types_.Add(types->at(i));
+          break;
+        default:
+          break;
       }
     }
   }
@@ -1241,6 +1325,15 @@
 }
 
 
+bool HLoadKeyedFastElement::RequiresHoleCheck() const {
+  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+    HValue* use = it.value();
+    if (!use->IsChange()) return true;
+  }
+  return false;
+}
+
+
 void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) {
   object()->PrintNameTo(stream);
   stream->Add("[");
@@ -1275,6 +1368,9 @@
     case kExternalFloatArray:
       stream->Add("float");
       break;
+    case kExternalDoubleArray:
+      stream->Add("double");
+      break;
     case kExternalPixelArray:
       stream->Add("pixel");
       break;
@@ -1352,6 +1448,9 @@
     case kExternalFloatArray:
       stream->Add("float");
       break;
+    case kExternalDoubleArray:
+      stream->Add("double");
+      break;
     case kExternalPixelArray:
       stream->Add("pixel");
       break;
@@ -1439,7 +1538,7 @@
 
 
 HType HConstant::CalculateInferredType() {
-  return constant_type_;
+  return HType::TypeFromValue(handle_);
 }
 
 
@@ -1543,6 +1642,13 @@
 }
 
 
+HValue* HForceRepresentation::EnsureAndPropagateNotMinusZero(
+    BitVector* visited) {
+  visited->Add(id());
+  return value();
+}
+
+
 HValue* HMod::EnsureAndPropagateNotMinusZero(BitVector* visited) {
   visited->Add(id());
   if (range() == NULL || range()->CanBeMinusZero()) {
@@ -1593,6 +1699,13 @@
 }
 
 
+void HIn::PrintDataTo(StringStream* stream) {
+  key()->PrintNameTo(stream);
+  stream->Add(" ");
+  object()->PrintNameTo(stream);
+}
+
+
 // Node-specific verification code is only included in debug mode.
 #ifdef DEBUG
 
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index d5f4ea1..4d32edd 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -30,7 +30,9 @@
 
 #include "v8.h"
 
+#include "allocation.h"
 #include "code-stubs.h"
+#include "data-flow.h"
 #include "small-pointer-list.h"
 #include "string-stream.h"
 #include "zone.h"
@@ -48,18 +50,10 @@
 class LChunkBuilder;
 
 
-#define HYDROGEN_ALL_INSTRUCTION_LIST(V)       \
-  V(ArithmeticBinaryOperation)                 \
-  V(BinaryCall)                                \
-  V(BinaryOperation)                           \
+#define HYDROGEN_ABSTRACT_INSTRUCTION_LIST(V)  \
   V(BitwiseBinaryOperation)                    \
   V(ControlInstruction)                        \
   V(Instruction)                               \
-  V(Phi)                                       \
-  V(UnaryCall)                                 \
-  V(UnaryControlInstruction)                   \
-  V(UnaryOperation)                            \
-  HYDROGEN_CONCRETE_INSTRUCTION_LIST(V)
 
 
 #define HYDROGEN_CONCRETE_INSTRUCTION_LIST(V)  \
@@ -93,10 +87,12 @@
   V(CheckNonSmi)                               \
   V(CheckPrototypeMaps)                        \
   V(CheckSmi)                                  \
+  V(ClampToUint8)                              \
   V(ClassOfTest)                               \
   V(Compare)                                   \
   V(CompareJSObjectEq)                         \
   V(CompareMap)                                \
+  V(CompareSymbolEq)                           \
   V(Constant)                                  \
   V(Context)                                   \
   V(DeleteProperty)                            \
@@ -105,6 +101,7 @@
   V(EnterInlined)                              \
   V(ExternalArrayLength)                       \
   V(FixedArrayLength)                          \
+  V(ForceRepresentation)                       \
   V(FunctionLiteral)                           \
   V(GetCachedArrayIndex)                       \
   V(GlobalObject)                              \
@@ -112,12 +109,15 @@
   V(Goto)                                      \
   V(HasInstanceType)                           \
   V(HasCachedArrayIndex)                       \
+  V(In)                                        \
   V(InstanceOf)                                \
   V(InstanceOfKnownGlobal)                     \
+  V(InvokeFunction)                            \
+  V(IsConstructCall)                           \
   V(IsNull)                                    \
   V(IsObject)                                  \
   V(IsSmi)                                     \
-  V(IsConstructCall)                           \
+  V(IsUndetectable)                            \
   V(JSArrayLength)                             \
   V(LeaveInlined)                              \
   V(LoadContextSlot)                           \
@@ -155,6 +155,7 @@
   V(StoreKeyedGeneric)                         \
   V(StoreNamedField)                           \
   V(StoreNamedGeneric)                         \
+  V(StringAdd)                                 \
   V(StringCharCodeAt)                          \
   V(StringCharFromCode)                        \
   V(StringLength)                              \
@@ -180,19 +181,21 @@
   V(ContextSlots)                              \
   V(OsrEntries)
 
-#define DECLARE_INSTRUCTION(type)                   \
+#define DECLARE_ABSTRACT_INSTRUCTION(type)          \
   virtual bool Is##type() const { return true; }    \
   static H##type* cast(HValue* value) {             \
     ASSERT(value->Is##type());                      \
     return reinterpret_cast<H##type*>(value);       \
-  }                                                 \
-  Opcode opcode() const { return HValue::k##type; }
+  }
 
 
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
+#define DECLARE_CONCRETE_INSTRUCTION(type)                        \
   virtual LInstruction* CompileToLithium(LChunkBuilder* builder); \
-  virtual const char* Mnemonic() const { return mnemonic; }       \
-  DECLARE_INSTRUCTION(type)
+  static H##type* cast(HValue* value) {                           \
+    ASSERT(value->Is##type());                                    \
+    return reinterpret_cast<H##type*>(value);                     \
+  }                                                               \
+  virtual Opcode opcode() const { return HValue::k##type; }
 
 
 class Range: public ZoneObject {
@@ -407,6 +410,62 @@
 };
 
 
+class HUseListNode: public ZoneObject {
+ public:
+  HUseListNode(HValue* value, int index, HUseListNode* tail)
+      : tail_(tail), value_(value), index_(index) {
+  }
+
+  HUseListNode* tail() const { return tail_; }
+  HValue* value() const { return value_; }
+  int index() const { return index_; }
+
+  void set_tail(HUseListNode* list) { tail_ = list; }
+
+#ifdef DEBUG
+  void Zap() {
+    tail_ = reinterpret_cast<HUseListNode*>(1);
+    value_ = NULL;
+    index_ = -1;
+  }
+#endif
+
+ private:
+  HUseListNode* tail_;
+  HValue* value_;
+  int index_;
+};
+
+
+// We reuse use list nodes behind the scenes as uses are added and deleted.
+// This class is the safe way to iterate uses while deleting them.
+class HUseIterator BASE_EMBEDDED {
+ public:
+  bool Done() { return current_ == NULL; }
+  void Advance();
+
+  HValue* value() {
+    ASSERT(!Done());
+    return value_;
+  }
+
+  int index() {
+    ASSERT(!Done());
+    return index_;
+  }
+
+ private:
+  explicit HUseIterator(HUseListNode* head);
+
+  HUseListNode* current_;
+  HUseListNode* next_;
+  HValue* value_;
+  int index_;
+
+  friend class HValue;
+};
+
+
 class HValue: public ZoneObject {
  public:
   static const int kNoNumber = -1;
@@ -455,15 +514,30 @@
 
   enum Opcode {
     // Declare a unique enum value for each hydrogen instruction.
-  #define DECLARE_DO(type) k##type,
-    HYDROGEN_ALL_INSTRUCTION_LIST(DECLARE_DO)
-  #undef DECLARE_DO
-    kMaxInstructionClass
+  #define DECLARE_OPCODE(type) k##type,
+    HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+    kPhi
+  #undef DECLARE_OPCODE
   };
+  virtual Opcode opcode() const = 0;
+
+  // Declare a non-virtual predicates for each concrete HInstruction or HValue.
+  #define DECLARE_PREDICATE(type) \
+    bool Is##type() const { return opcode() == k##type; }
+    HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+  #undef DECLARE_PREDICATE
+    bool IsPhi() const { return opcode() == kPhi; }
+
+  // Declare virtual predicates for abstract HInstruction or HValue
+  #define DECLARE_PREDICATE(type) \
+    virtual bool Is##type() const { return false; }
+    HYDROGEN_ABSTRACT_INSTRUCTION_LIST(DECLARE_PREDICATE)
+  #undef DECLARE_PREDICATE
 
   HValue() : block_(NULL),
              id_(kNoNumber),
              type_(HType::Tagged()),
+             use_list_(NULL),
              range_(NULL),
              flags_(0) {}
   virtual ~HValue() {}
@@ -474,22 +548,24 @@
   int id() const { return id_; }
   void set_id(int id) { id_ = id; }
 
-  SmallPointerList<HValue>* uses() { return &uses_; }
+  HUseIterator uses() const { return HUseIterator(use_list_); }
 
   virtual bool EmitAtUses() { return false; }
   Representation representation() const { return representation_; }
   void ChangeRepresentation(Representation r) {
     // Representation was already set and is allowed to be changed.
-    ASSERT(!representation_.IsNone());
     ASSERT(!r.IsNone());
     ASSERT(CheckFlag(kFlexibleRepresentation));
     RepresentationChanged(r);
     representation_ = r;
   }
+  void AssumeRepresentation(Representation r);
+
+  virtual bool IsConvertibleToInteger() const { return true; }
 
   HType type() const { return type_; }
   void set_type(HType type) {
-    ASSERT(uses_.length() == 0);
+    ASSERT(HasNoUses());
     type_ = type;
   }
 
@@ -515,16 +591,14 @@
   virtual HValue* OperandAt(int index) = 0;
   void SetOperandAt(int index, HValue* value);
 
-  int LookupOperandIndex(int occurrence_index, HValue* op);
-  bool UsesMultipleTimes(HValue* op);
-
-  void ReplaceAndDelete(HValue* other);
-  void ReplaceValue(HValue* other);
-  void ReplaceAtUse(HValue* use, HValue* other);
-  void ReplaceFirstAtUse(HValue* use, HValue* other, Representation r);
-  bool HasNoUses() const { return uses_.is_empty(); }
+  void DeleteAndReplaceWith(HValue* other);
+  void ReplaceAllUsesWith(HValue* other);
+  bool HasNoUses() const { return use_list_ == NULL; }
+  bool HasMultipleUses() const {
+    return use_list_ != NULL && use_list_->tail() != NULL;
+  }
+  int UseCount() const;
   void ClearOperands();
-  void Delete();
 
   int flags() const { return flags_; }
   void SetFlag(Flag f) { flags_ |= (1 << f); }
@@ -554,21 +628,17 @@
   // then return it.  Return NULL to have the instruction deleted.
   virtual HValue* Canonicalize() { return this; }
 
-  // Declare virtual type testers.
-#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
-  HYDROGEN_ALL_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
   bool Equals(HValue* other);
   virtual intptr_t Hashcode();
 
   // Printing support.
   virtual void PrintTo(StringStream* stream) = 0;
   void PrintNameTo(StringStream* stream);
-  static void PrintTypeTo(HType type, StringStream* stream);
+  void PrintTypeTo(StringStream* stream);
+  void PrintRangeTo(StringStream* stream);
+  void PrintChangesTo(StringStream* stream);
 
-  virtual const char* Mnemonic() const = 0;
-  virtual Opcode opcode() const = 0;
+  const char* Mnemonic() const;
 
   // Updated the inferred type of this instruction and returns true if
   // it has changed.
@@ -608,7 +678,10 @@
     return ChangesFlagsMask() & ~(1 << kChangesOsrEntries);
   }
 
-  void InternalReplaceAtUse(HValue* use, HValue* other);
+  // Remove the matching use from the use list if present.  Returns the
+  // removed list node or NULL.
+  HUseListNode* RemoveUse(HValue* value, int index);
+
   void RegisterUse(int index, HValue* new_value);
 
   HBasicBlock* block_;
@@ -619,7 +692,7 @@
 
   Representation representation_;
   HType type_;
-  SmallPointerList<HValue> uses_;
+  HUseListNode* use_list_;
   Range* range_;
   int flags_;
 
@@ -656,7 +729,7 @@
 
   virtual bool IsCall() { return false; }
 
-  DECLARE_INSTRUCTION(Instruction)
+  DECLARE_ABSTRACT_INSTRUCTION(Instruction)
 
  protected:
   HInstruction()
@@ -674,6 +747,8 @@
     SetBlock(block);
   }
 
+  void PrintMnemonicTo(StringStream* stream);
+
   HInstruction* next_;
   HInstruction* previous_;
   int position_;
@@ -693,7 +768,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  DECLARE_INSTRUCTION(ControlInstruction)
+  DECLARE_ABSTRACT_INSTRUCTION(ControlInstruction)
 
  private:
   HBasicBlock* first_successor_;
@@ -765,7 +840,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(BlockEntry, "block_entry")
+  DECLARE_CONCRETE_INSTRUCTION(BlockEntry)
 };
 
 
@@ -787,7 +862,12 @@
     SetOperandAt(values_.length() - 1, value);
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+  DECLARE_CONCRETE_INSTRUCTION(Deoptimize)
+
+  enum UseEnvironment {
+    kNoUses,
+    kUseAll
+  };
 
  protected:
   virtual void InternalSetOperandAt(int index, HValue* value) {
@@ -814,7 +894,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+  DECLARE_CONCRETE_INSTRUCTION(Goto)
 
  private:
   bool include_stack_check_;
@@ -833,8 +913,6 @@
   virtual void PrintDataTo(StringStream* stream);
 
   HValue* value() { return OperandAt(0); }
-
-  DECLARE_INSTRUCTION(UnaryControlInstruction)
 };
 
 
@@ -849,7 +927,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Test, "test")
+  DECLARE_CONCRETE_INSTRUCTION(Test)
 };
 
 
@@ -874,7 +952,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(CompareMap, "compare_map")
+  DECLARE_CONCRETE_INSTRUCTION(CompareMap)
 
  private:
   Handle<Map> map_;
@@ -891,7 +969,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+  DECLARE_CONCRETE_INSTRUCTION(Return)
 };
 
 
@@ -903,7 +981,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(AbnormalExit, "abnormal_exit")
+  DECLARE_CONCRETE_INSTRUCTION(AbnormalExit)
 };
 
 
@@ -915,8 +993,6 @@
 
   HValue* value() { return OperandAt(0); }
   virtual void PrintDataTo(StringStream* stream);
-
-  DECLARE_INSTRUCTION(UnaryOperation)
 };
 
 
@@ -930,7 +1006,26 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+  DECLARE_CONCRETE_INSTRUCTION(Throw)
+};
+
+
+class HForceRepresentation: public HTemplateInstruction<1> {
+ public:
+  HForceRepresentation(HValue* value, Representation required_representation) {
+    SetOperandAt(0, value);
+    set_representation(required_representation);
+  }
+
+  HValue* value() { return OperandAt(0); }
+
+  virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return representation();  // Same as the output representation.
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForceRepresentation)
 };
 
 
@@ -968,8 +1063,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  DECLARE_CONCRETE_INSTRUCTION(Change,
-                               CanTruncateToInt32() ? "truncate" : "change")
+  DECLARE_CONCRETE_INSTRUCTION(Change)
 
  protected:
   virtual bool DataEquals(HValue* other) {
@@ -986,6 +1080,46 @@
 };
 
 
+class HClampToUint8: public HUnaryOperation {
+ public:
+  explicit HClampToUint8(HValue* value)
+      : HUnaryOperation(value),
+        input_rep_(Representation::None()) {
+    SetFlag(kFlexibleRepresentation);
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return input_rep_;
+  }
+
+  virtual Representation InferredRepresentation() {
+    // TODO(danno): Inference on input types should happen separately from
+    // return representation.
+    Representation new_rep = value()->representation();
+    if (input_rep_.IsNone()) {
+      if (!new_rep.IsNone()) {
+        input_rep_ = new_rep;
+        return Representation::Integer32();
+      } else {
+        return Representation::None();
+      }
+    } else {
+      return Representation::Integer32();
+    }
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampToUint8)
+
+ protected:
+  virtual bool DataEquals(HValue* other) { return true; }
+
+ private:
+  Representation input_rep_;
+};
+
+
 class HSimulate: public HInstruction {
  public:
   HSimulate(int ast_id, int pop_count)
@@ -1026,7 +1160,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Simulate, "simulate")
+  DECLARE_CONCRETE_INSTRUCTION(Simulate)
 
 #ifdef DEBUG
   virtual void Verify();
@@ -1062,30 +1196,36 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack_check")
+  DECLARE_CONCRETE_INSTRUCTION(StackCheck)
 };
 
 
 class HEnterInlined: public HTemplateInstruction<0> {
  public:
-  HEnterInlined(Handle<JSFunction> closure, FunctionLiteral* function)
-      : closure_(closure), function_(function) {
+  HEnterInlined(Handle<JSFunction> closure,
+                FunctionLiteral* function,
+                CallKind call_kind)
+      : closure_(closure),
+        function_(function),
+        call_kind_(call_kind) {
   }
 
   virtual void PrintDataTo(StringStream* stream);
 
   Handle<JSFunction> closure() const { return closure_; }
   FunctionLiteral* function() const { return function_; }
+  CallKind call_kind() const { return call_kind_; }
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(EnterInlined, "enter_inlined")
+  DECLARE_CONCRETE_INSTRUCTION(EnterInlined)
 
  private:
   Handle<JSFunction> closure_;
   FunctionLiteral* function_;
+  CallKind call_kind_;
 };
 
 
@@ -1097,7 +1237,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(LeaveInlined, "leave_inlined")
+  DECLARE_CONCRETE_INSTRUCTION(LeaveInlined)
 };
 
 
@@ -1113,7 +1253,7 @@
 
   HValue* argument() { return OperandAt(0); }
 
-  DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push_argument")
+  DECLARE_CONCRETE_INSTRUCTION(PushArgument)
 };
 
 
@@ -1128,7 +1268,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Context, "context");
+  DECLARE_CONCRETE_INSTRUCTION(Context);
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -1142,7 +1282,7 @@
     SetFlag(kUseGVN);
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer_context");
+  DECLARE_CONCRETE_INSTRUCTION(OuterContext);
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
@@ -1160,7 +1300,7 @@
     SetFlag(kUseGVN);
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global_object")
+  DECLARE_CONCRETE_INSTRUCTION(GlobalObject)
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
@@ -1179,7 +1319,7 @@
     SetFlag(kUseGVN);
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global_receiver")
+  DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver)
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
@@ -1224,8 +1364,6 @@
   virtual void PrintDataTo(StringStream* stream);
 
   HValue* value() { return OperandAt(0); }
-
-  DECLARE_INSTRUCTION(UnaryCall)
 };
 
 
@@ -1245,8 +1383,23 @@
 
   HValue* first() { return OperandAt(0); }
   HValue* second() { return OperandAt(1); }
+};
 
-  DECLARE_INSTRUCTION(BinaryCall)
+
+class HInvokeFunction: public HBinaryCall {
+ public:
+  HInvokeFunction(HValue* context, HValue* function, int argument_count)
+      : HBinaryCall(context, function, argument_count) {
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  HValue* context() { return first(); }
+  HValue* function() { return second(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction)
 };
 
 
@@ -1268,7 +1421,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call_constant_function")
+  DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction)
 
  private:
   Handle<JSFunction> function_;
@@ -1288,7 +1441,7 @@
   HValue* context() { return first(); }
   HValue* key() { return second(); }
 
-  DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call_keyed")
+  DECLARE_CONCRETE_INSTRUCTION(CallKeyed)
 };
 
 
@@ -1303,7 +1456,7 @@
   HValue* context() { return value(); }
   Handle<String> name() const { return name_; }
 
-  DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call_named")
+  DECLARE_CONCRETE_INSTRUCTION(CallNamed)
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
@@ -1326,7 +1479,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call_function")
+  DECLARE_CONCRETE_INSTRUCTION(CallFunction)
 };
 
 
@@ -1345,7 +1498,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call_global")
+  DECLARE_CONCRETE_INSTRUCTION(CallGlobal)
 
  private:
   Handle<String> name_;
@@ -1365,7 +1518,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call_known_global")
+  DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal)
 
  private:
   Handle<JSFunction> target_;
@@ -1385,7 +1538,7 @@
   HValue* context() { return first(); }
   HValue* constructor() { return second(); }
 
-  DECLARE_CONCRETE_INSTRUCTION(CallNew, "call_new")
+  DECLARE_CONCRETE_INSTRUCTION(CallNew)
 };
 
 
@@ -1404,7 +1557,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call_runtime")
+  DECLARE_CONCRETE_INSTRUCTION(CallRuntime)
 
  private:
   const Runtime::Function* c_function_;
@@ -1428,7 +1581,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js_array_length")
+  DECLARE_CONCRETE_INSTRUCTION(JSArrayLength)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -1447,7 +1600,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed_array_length")
+  DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -1468,7 +1621,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(ExternalArrayLength, "external_array_length")
+  DECLARE_CONCRETE_INSTRUCTION(ExternalArrayLength)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -1488,7 +1641,7 @@
   }
   virtual HType CalculateInferredType();
 
-  DECLARE_CONCRETE_INSTRUCTION(BitNot, "bit_not")
+  DECLARE_CONCRETE_INSTRUCTION(BitNot)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -1560,7 +1713,7 @@
   BuiltinFunctionId op() const { return op_; }
   const char* OpName() const;
 
-  DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary_math_operation")
+  DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation)
 
  protected:
   virtual bool DataEquals(HValue* other) {
@@ -1585,7 +1738,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
+  DECLARE_CONCRETE_INSTRUCTION(LoadElements)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -1608,8 +1761,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
-                               "load-external-array-pointer")
+  DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -1639,7 +1791,7 @@
 
   Handle<Map> map() const { return map_; }
 
-  DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check_map")
+  DECLARE_CONCRETE_INSTRUCTION(CheckMap)
 
  protected:
   virtual bool DataEquals(HValue* other) {
@@ -1674,7 +1826,7 @@
 
   Handle<JSFunction> target() const { return target_; }
 
-  DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check_function")
+  DECLARE_CONCRETE_INSTRUCTION(CheckFunction)
 
  protected:
   virtual bool DataEquals(HValue* other) {
@@ -1689,19 +1841,17 @@
 
 class HCheckInstanceType: public HUnaryOperation {
  public:
-  // Check that the instance type is in the range [first, last] where
-  // both first and last are included.
-  HCheckInstanceType(HValue* value, InstanceType first, InstanceType last)
-      : HUnaryOperation(value), first_(first), last_(last) {
-    ASSERT(first <= last);
-    set_representation(Representation::Tagged());
-    SetFlag(kUseGVN);
-    if ((FIRST_STRING_TYPE < first && last <= LAST_STRING_TYPE) ||
-        (FIRST_STRING_TYPE <= first && last < LAST_STRING_TYPE)) {
-      // A particular string instance type can change because of GC or
-      // externalization, but the value still remains a string.
-      SetFlag(kDependsOnMaps);
-    }
+  static HCheckInstanceType* NewIsJSObjectOrJSFunction(HValue* value) {
+    return new HCheckInstanceType(value, IS_JS_OBJECT_OR_JS_FUNCTION);
+  }
+  static HCheckInstanceType* NewIsJSArray(HValue* value) {
+    return new HCheckInstanceType(value, IS_JS_ARRAY);
+  }
+  static HCheckInstanceType* NewIsString(HValue* value) {
+    return new HCheckInstanceType(value, IS_STRING);
+  }
+  static HCheckInstanceType* NewIsSymbol(HValue* value) {
+    return new HCheckInstanceType(value, IS_SYMBOL);
   }
 
   virtual bool IsCheckInstruction() const { return true; }
@@ -1714,12 +1864,20 @@
   virtual void Verify();
 #endif
 
-  static HCheckInstanceType* NewIsJSObjectOrJSFunction(HValue* value);
+  virtual HValue* Canonicalize() {
+    if (!value()->type().IsUninitialized() &&
+        value()->type().IsString() &&
+        check_ == IS_STRING) {
+      return NULL;
+    }
+    return this;
+  }
 
-  InstanceType first() const { return first_; }
-  InstanceType last() const { return last_; }
+  bool is_interval_check() const { return check_ <= LAST_INTERVAL_CHECK; }
+  void GetCheckInterval(InstanceType* first, InstanceType* last);
+  void GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag);
 
-  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check_instance_type")
+  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType)
 
  protected:
   // TODO(ager): It could be nice to allow the ommision of instance
@@ -1727,12 +1885,25 @@
   // with a larger range.
   virtual bool DataEquals(HValue* other) {
     HCheckInstanceType* b = HCheckInstanceType::cast(other);
-    return (first_ == b->first()) && (last_ == b->last());
+    return check_ == b->check_;
   }
 
  private:
-  InstanceType first_;
-  InstanceType last_;
+  enum Check {
+    IS_JS_OBJECT_OR_JS_FUNCTION,
+    IS_JS_ARRAY,
+    IS_STRING,
+    IS_SYMBOL,
+    LAST_INTERVAL_CHECK = IS_JS_ARRAY
+  };
+
+  HCheckInstanceType(HValue* value, Check check)
+      : HUnaryOperation(value), check_(check) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+  }
+
+  const Check check_;
 };
 
 
@@ -1755,7 +1926,19 @@
   virtual void Verify();
 #endif
 
-  DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check_non_smi")
+  virtual HValue* Canonicalize() {
+    HType value_type = value()->type();
+    if (!value_type.IsUninitialized() &&
+        (value_type.IsHeapNumber() ||
+         value_type.IsString() ||
+         value_type.IsBoolean() ||
+         value_type.IsNonPrimitive())) {
+      return NULL;
+    }
+    return this;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -1779,7 +1962,7 @@
   Handle<JSObject> prototype() const { return prototype_; }
   Handle<JSObject> holder() const { return holder_; }
 
-  DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check_prototype_maps")
+  DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps)
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
@@ -1823,7 +2006,7 @@
   virtual void Verify();
 #endif
 
-  DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check_smi")
+  DECLARE_CONCRETE_INSTRUCTION(CheckSmi)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -1836,7 +2019,8 @@
       : inputs_(2),
         merged_index_(merged_index),
         phi_id_(-1),
-        is_live_(false) {
+        is_live_(false),
+        is_convertible_to_integer_(true) {
     for (int i = 0; i < Representation::kNumRepresentations; i++) {
       non_phi_uses_[i] = 0;
       indirect_uses_[i] = 0;
@@ -1876,16 +2060,12 @@
 
   int merged_index() const { return merged_index_; }
 
-  virtual const char* Mnemonic() const { return "phi"; }
-
   virtual void PrintTo(StringStream* stream);
 
 #ifdef DEBUG
   virtual void Verify();
 #endif
 
-  DECLARE_INSTRUCTION(Phi)
-
   void InitRealUses(int id);
   void AddNonPhiUsesFrom(HPhi* other);
   void AddIndirectUsesTo(int* use_count);
@@ -1912,6 +2092,20 @@
   bool is_live() { return is_live_; }
   void set_is_live(bool b) { is_live_ = b; }
 
+  static HPhi* cast(HValue* value) {
+    ASSERT(value->IsPhi());
+    return reinterpret_cast<HPhi*>(value);
+  }
+  virtual Opcode opcode() const { return HValue::kPhi; }
+
+  virtual bool IsConvertibleToInteger() const {
+    return is_convertible_to_integer_;
+  }
+
+  void set_is_convertible_to_integer(bool b) {
+    is_convertible_to_integer_ = b;
+  }
+
  protected:
   virtual void DeleteFromGraph();
   virtual void InternalSetOperandAt(int index, HValue* value) {
@@ -1926,6 +2120,7 @@
   int indirect_uses_[Representation::kNumRepresentations];
   int phi_id_;
   bool is_live_;
+  bool is_convertible_to_integer_;
 };
 
 
@@ -1940,7 +2135,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(ArgumentsObject, "arguments-object")
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsObject)
 };
 
 
@@ -1956,6 +2151,14 @@
     return Representation::None();
   }
 
+  virtual bool IsConvertibleToInteger() const {
+    if (handle_->IsSmi()) return true;
+    if (handle_->IsHeapNumber() &&
+        (HeapNumber::cast(*handle_)->value() ==
+         static_cast<double>(NumberToInt32(*handle_)))) return true;
+    return false;
+  }
+
   virtual bool EmitAtUses() { return !representation().IsDouble(); }
   virtual void PrintDataTo(StringStream* stream);
   virtual HType CalculateInferredType();
@@ -1985,7 +2188,7 @@
   virtual void Verify() { }
 #endif
 
-  DECLARE_CONCRETE_INSTRUCTION(Constant, "constant")
+  DECLARE_CONCRETE_INSTRUCTION(Constant)
 
  protected:
   virtual Range* InferRange();
@@ -1997,14 +2200,13 @@
 
  private:
   Handle<Object> handle_;
-  HType constant_type_;
 
   // The following two values represent the int32 and the double value of the
   // given constant if there is a lossless conversion between the constant
   // and the specific representation.
-  bool has_int32_value_;
+  bool has_int32_value_ : 1;
+  bool has_double_value_ : 1;
   int32_t int32_value_;
-  bool has_double_value_;
   double double_value_;
 };
 
@@ -2034,8 +2236,6 @@
   virtual bool IsCommutative() const { return false; }
 
   virtual void PrintDataTo(StringStream* stream);
-
-  DECLARE_INSTRUCTION(BinaryOperation)
 };
 
 
@@ -2065,7 +2265,7 @@
   HValue* length() { return OperandAt(2); }
   HValue* elements() { return OperandAt(3); }
 
-  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply_arguments")
+  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments)
 };
 
 
@@ -2078,7 +2278,7 @@
     SetFlag(kUseGVN);
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments_elements")
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements)
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
@@ -2100,7 +2300,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments_length")
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2130,7 +2330,7 @@
   HValue* length() { return OperandAt(1); }
   HValue* index() { return OperandAt(2); }
 
-  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access_arguments_at")
+  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt)
 
   virtual bool DataEquals(HValue* other) { return true; }
 };
@@ -2157,7 +2357,7 @@
   HValue* index() { return left(); }
   HValue* length() { return right(); }
 
-  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds_check")
+  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2188,7 +2388,7 @@
 
   virtual HType CalculateInferredType();
 
-  DECLARE_INSTRUCTION(BitwiseBinaryOperation)
+  DECLARE_ABSTRACT_INSTRUCTION(BitwiseBinaryOperation)
 };
 
 
@@ -2218,8 +2418,6 @@
     }
     return HValue::InferredRepresentation();
   }
-
-  DECLARE_INSTRUCTION(ArithmeticBinaryOperation)
 };
 
 
@@ -2235,7 +2433,7 @@
   void SetInputRepresentation(Representation r);
 
   virtual bool EmitAtUses() {
-    return !HasSideEffects() && (uses()->length() <= 1);
+    return !HasSideEffects() && !HasMultipleUses();
   }
 
   virtual Representation RequiredInputRepresentation(int index) const {
@@ -2253,7 +2451,7 @@
     return HValue::Hashcode() * 7 + token_;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Compare, "compare")
+  DECLARE_CONCRETE_INSTRUCTION(Compare)
 
  protected:
   virtual bool DataEquals(HValue* other) {
@@ -2277,7 +2475,7 @@
   }
 
   virtual bool EmitAtUses() {
-    return !HasSideEffects() && (uses()->length() <= 1);
+    return !HasSideEffects() && !HasMultipleUses();
   }
 
   virtual Representation RequiredInputRepresentation(int index) const {
@@ -2285,13 +2483,47 @@
   }
   virtual HType CalculateInferredType();
 
-  DECLARE_CONCRETE_INSTRUCTION(CompareJSObjectEq, "compare-js-object-eq")
+  DECLARE_CONCRETE_INSTRUCTION(CompareJSObjectEq)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
 };
 
 
+class HCompareSymbolEq: public HBinaryOperation {
+ public:
+  HCompareSymbolEq(HValue* left, HValue* right, Token::Value op)
+      : HBinaryOperation(left, right), op_(op) {
+    ASSERT(op == Token::EQ || op == Token::EQ_STRICT);
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetFlag(kDependsOnMaps);
+  }
+
+  Token::Value op() const { return op_; }
+
+  virtual bool EmitAtUses() {
+    return !HasSideEffects() && !HasMultipleUses();
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  virtual HType CalculateInferredType() { return HType::Boolean(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareSymbolEq);
+
+ protected:
+  virtual bool DataEquals(HValue* other) {
+    return op_ == HCompareSymbolEq::cast(other)->op_;
+  }
+
+ private:
+  const Token::Value op_;
+};
+
+
 class HUnaryPredicate: public HUnaryOperation {
  public:
   explicit HUnaryPredicate(HValue* value) : HUnaryOperation(value) {
@@ -2300,7 +2532,7 @@
   }
 
   virtual bool EmitAtUses() {
-    return !HasSideEffects() && (uses()->length() <= 1);
+    return !HasSideEffects() && !HasMultipleUses();
   }
 
   virtual Representation RequiredInputRepresentation(int index) const {
@@ -2317,7 +2549,7 @@
 
   bool is_strict() const { return is_strict_; }
 
-  DECLARE_CONCRETE_INSTRUCTION(IsNull, "is_null")
+  DECLARE_CONCRETE_INSTRUCTION(IsNull)
 
  protected:
   virtual bool DataEquals(HValue* other) {
@@ -2334,7 +2566,7 @@
  public:
   explicit HIsObject(HValue* value) : HUnaryPredicate(value) { }
 
-  DECLARE_CONCRETE_INSTRUCTION(IsObject, "is_object")
+  DECLARE_CONCRETE_INSTRUCTION(IsObject)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2345,7 +2577,18 @@
  public:
   explicit HIsSmi(HValue* value) : HUnaryPredicate(value) { }
 
-  DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is_smi")
+  DECLARE_CONCRETE_INSTRUCTION(IsSmi)
+
+ protected:
+  virtual bool DataEquals(HValue* other) { return true; }
+};
+
+
+class HIsUndetectable: public HUnaryPredicate {
+ public:
+  explicit HIsUndetectable(HValue* value) : HUnaryPredicate(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsUndetectable)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2360,14 +2603,14 @@
   }
 
   virtual bool EmitAtUses() {
-    return !HasSideEffects() && (uses()->length() <= 1);
+    return !HasSideEffects() && !HasMultipleUses();
   }
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is_construct_call")
+  DECLARE_CONCRETE_INSTRUCTION(IsConstructCall)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2388,7 +2631,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has_instance_type")
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceType)
 
  protected:
   virtual bool DataEquals(HValue* other) {
@@ -2406,7 +2649,7 @@
  public:
   explicit HHasCachedArrayIndex(HValue* value) : HUnaryPredicate(value) { }
 
-  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has_cached_array_index")
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2417,7 +2660,7 @@
  public:
   explicit HGetCachedArrayIndex(HValue* value) : HUnaryPredicate(value) { }
 
-  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get_cached_array_index")
+  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2429,7 +2672,7 @@
   HClassOfTest(HValue* value, Handle<String> class_name)
       : HUnaryPredicate(value), class_name_(class_name) { }
 
-  DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class_of_test")
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTest)
 
   virtual void PrintDataTo(StringStream* stream);
 
@@ -2454,7 +2697,7 @@
   Handle<String> type_literal() { return type_literal_; }
   virtual void PrintDataTo(StringStream* stream);
 
-  DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof_is")
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIs)
 
  protected:
   virtual bool DataEquals(HValue* other) {
@@ -2482,7 +2725,7 @@
   HValue* right() { return OperandAt(2); }
 
   virtual bool EmitAtUses() {
-    return !HasSideEffects() && (uses()->length() <= 1);
+    return !HasSideEffects() && !HasMultipleUses();
   }
 
   virtual Representation RequiredInputRepresentation(int index) const {
@@ -2491,7 +2734,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance_of")
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOf)
 };
 
 
@@ -2509,8 +2752,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
-                               "instance_of_known_global")
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal)
 
  private:
   Handle<JSFunction> function_;
@@ -2529,7 +2771,7 @@
     return (index == 1) ? Representation::None() : Representation::Double();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+  DECLARE_CONCRETE_INSTRUCTION(Power)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2552,7 +2794,7 @@
 
   virtual HType CalculateInferredType();
 
-  DECLARE_CONCRETE_INSTRUCTION(Add, "add")
+  DECLARE_CONCRETE_INSTRUCTION(Add)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2569,7 +2811,7 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
-  DECLARE_CONCRETE_INSTRUCTION(Sub, "sub")
+  DECLARE_CONCRETE_INSTRUCTION(Sub)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2591,7 +2833,7 @@
     return !representation().IsTagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Mul, "mul")
+  DECLARE_CONCRETE_INSTRUCTION(Mul)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2618,7 +2860,7 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
-  DECLARE_CONCRETE_INSTRUCTION(Mod, "mod")
+  DECLARE_CONCRETE_INSTRUCTION(Mod)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2636,7 +2878,7 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
-  DECLARE_CONCRETE_INSTRUCTION(Div, "div")
+  DECLARE_CONCRETE_INSTRUCTION(Div)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2653,7 +2895,7 @@
   virtual bool IsCommutative() const { return true; }
   virtual HType CalculateInferredType();
 
-  DECLARE_CONCRETE_INSTRUCTION(BitAnd, "bit_and")
+  DECLARE_CONCRETE_INSTRUCTION(BitAnd)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2670,7 +2912,7 @@
   virtual bool IsCommutative() const { return true; }
   virtual HType CalculateInferredType();
 
-  DECLARE_CONCRETE_INSTRUCTION(BitXor, "bit_xor")
+  DECLARE_CONCRETE_INSTRUCTION(BitXor)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2685,7 +2927,7 @@
   virtual bool IsCommutative() const { return true; }
   virtual HType CalculateInferredType();
 
-  DECLARE_CONCRETE_INSTRUCTION(BitOr, "bit_or")
+  DECLARE_CONCRETE_INSTRUCTION(BitOr)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2702,7 +2944,7 @@
   virtual Range* InferRange();
   virtual HType CalculateInferredType();
 
-  DECLARE_CONCRETE_INSTRUCTION(Shl, "shl")
+  DECLARE_CONCRETE_INSTRUCTION(Shl)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2716,7 +2958,7 @@
 
   virtual HType CalculateInferredType();
 
-  DECLARE_CONCRETE_INSTRUCTION(Shr, "shr")
+  DECLARE_CONCRETE_INSTRUCTION(Shr)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2731,7 +2973,7 @@
   virtual Range* InferRange();
   virtual HType CalculateInferredType();
 
-  DECLARE_CONCRETE_INSTRUCTION(Sar, "sar")
+  DECLARE_CONCRETE_INSTRUCTION(Sar)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -2750,7 +2992,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr_entry")
+  DECLARE_CONCRETE_INSTRUCTION(OsrEntry)
 
  private:
   int ast_id_;
@@ -2771,7 +3013,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+  DECLARE_CONCRETE_INSTRUCTION(Parameter)
 
  private:
   unsigned index_;
@@ -2803,7 +3045,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call_stub")
+  DECLARE_CONCRETE_INSTRUCTION(CallStub)
 
  private:
   CodeStub::Major major_key_;
@@ -2819,7 +3061,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown_osr_value")
+  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue)
 };
 
 
@@ -2846,7 +3088,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load_global_cell")
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell)
 
  protected:
   virtual bool DataEquals(HValue* other) {
@@ -2884,7 +3126,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load_global_generic")
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric)
 
  private:
   Handle<Object> name_;
@@ -2911,7 +3153,7 @@
   }
   virtual void PrintDataTo(StringStream* stream);
 
-  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store_global_cell")
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell)
 
  private:
   Handle<JSGlobalPropertyCell> cell_;
@@ -2947,7 +3189,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store_global_generic")
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric)
 
  private:
   Handle<Object> name_;
@@ -2972,7 +3214,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load_context_slot")
+  DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot)
 
  protected:
   virtual bool DataEquals(HValue* other) {
@@ -3012,7 +3254,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store_context_slot")
+  DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot)
 
  private:
   int slot_index_;
@@ -3044,7 +3286,7 @@
   }
   virtual void PrintDataTo(StringStream* stream);
 
-  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load_named_field")
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField)
 
  protected:
   virtual bool DataEquals(HValue* other) {
@@ -3073,8 +3315,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(LoadNamedFieldPolymorphic,
-                               "load_named_field_polymorphic")
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedFieldPolymorphic)
 
   static const int kMaxLoadPolymorphism = 4;
 
@@ -3105,7 +3346,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load_named_generic")
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric)
 
  private:
   Handle<Object> name_;
@@ -3127,7 +3368,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load_function_prototype")
+  DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -3153,8 +3394,9 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement,
-                               "load_keyed_fast_element")
+  bool RequiresHoleCheck() const;
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -3168,7 +3410,8 @@
                                     ExternalArrayType array_type)
       : HBinaryOperation(external_elements, key),
         array_type_(array_type) {
-    if (array_type == kExternalFloatArray) {
+    if (array_type == kExternalFloatArray ||
+        array_type == kExternalDoubleArray) {
       set_representation(Representation::Double());
     } else {
       set_representation(Representation::Integer32());
@@ -3192,8 +3435,7 @@
   HValue* key() { return OperandAt(1); }
   ExternalArrayType array_type() const { return array_type_; }
 
-  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
-                               "load_keyed_specialized_array_element")
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement)
 
  protected:
   virtual bool DataEquals(HValue* other) {
@@ -3210,7 +3452,7 @@
 
 class HLoadKeyedGeneric: public HTemplateInstruction<3> {
  public:
-  HLoadKeyedGeneric(HContext* context, HValue* obj, HValue* key) {
+  HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key) {
     set_representation(Representation::Tagged());
     SetOperandAt(0, obj);
     SetOperandAt(1, key);
@@ -3228,7 +3470,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load_keyed_generic")
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric)
 };
 
 
@@ -3250,7 +3492,7 @@
     }
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store_named_field")
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
@@ -3305,7 +3547,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store_named_generic")
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric)
 
  private:
   Handle<String> name_;
@@ -3338,8 +3580,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
-                               "store_keyed_fast_element")
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement)
 };
 
 
@@ -3362,7 +3603,8 @@
     if (index == 0) {
       return Representation::External();
     } else {
-      if (index == 2 && array_type() == kExternalFloatArray) {
+      if (index == 2 && (array_type() == kExternalFloatArray ||
+                         array_type() == kExternalDoubleArray)) {
         return Representation::Double();
       } else {
         return Representation::Integer32();
@@ -3375,8 +3617,8 @@
   HValue* value() { return OperandAt(2); }
   ExternalArrayType array_type() const { return array_type_; }
 
-  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
-                               "store_keyed_specialized_array_element")
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement)
+
  private:
   ExternalArrayType array_type_;
 };
@@ -3409,13 +3651,36 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store_keyed_generic")
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric)
 
  private:
   bool strict_mode_;
 };
 
 
+class HStringAdd: public HBinaryOperation {
+ public:
+  HStringAdd(HValue* left, HValue* right) : HBinaryOperation(left, right) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetFlag(kDependsOnMaps);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  virtual HType CalculateInferredType() {
+    return HType::String();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd)
+
+ protected:
+  virtual bool DataEquals(HValue* other) { return true; }
+};
+
+
 class HStringCharCodeAt: public HBinaryOperation {
  public:
   HStringCharCodeAt(HValue* string, HValue* index)
@@ -3434,7 +3699,7 @@
   HValue* string() { return OperandAt(0); }
   HValue* index() { return OperandAt(1); }
 
-  DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string_char_code_at")
+  DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -3458,7 +3723,7 @@
 
   virtual bool DataEquals(HValue* other) { return true; }
 
-  DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string_char_from_code")
+  DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode)
 };
 
 
@@ -3479,7 +3744,7 @@
     return HType::Smi();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(StringLength, "string_length")
+  DECLARE_CONCRETE_INSTRUCTION(StringLength)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
@@ -3526,7 +3791,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array_literal")
+  DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral)
 
  private:
   int length_;
@@ -3560,7 +3825,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object_literal")
+  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral)
 
  private:
   Handle<FixedArray> constant_properties_;
@@ -3585,7 +3850,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp_literal")
+  DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral)
 
  private:
   Handle<String> pattern_;
@@ -3604,7 +3869,7 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function_literal")
+  DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral)
 
   Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
   bool pretenure() const { return pretenure_; }
@@ -3625,7 +3890,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+  DECLARE_CONCRETE_INSTRUCTION(Typeof)
 };
 
 
@@ -3643,7 +3908,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to_fast_properties")
+  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties)
 };
 
 
@@ -3657,7 +3922,7 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value_of")
+  DECLARE_CONCRETE_INSTRUCTION(ValueOf)
 };
 
 
@@ -3673,12 +3938,38 @@
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete_property")
+  DECLARE_CONCRETE_INSTRUCTION(DeleteProperty)
 
   HValue* object() { return left(); }
   HValue* key() { return right(); }
 };
 
+
+class HIn: public HTemplateInstruction<2> {
+ public:
+  HIn(HValue* key, HValue* object) {
+    SetOperandAt(0, key);
+    SetOperandAt(1, object);
+    set_representation(Representation::Tagged());
+    SetAllSideEffects();
+  }
+
+  HValue* key() { return OperandAt(0); }
+  HValue* object() { return OperandAt(1); }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  virtual HType CalculateInferredType() {
+    return HType::Boolean();
+  }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  DECLARE_CONCRETE_INSTRUCTION(In)
+};
+
 #undef DECLARE_INSTRUCTION
 #undef DECLARE_CONCRETE_INSTRUCTION
 
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index d07e6c7..1b37d93 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -29,7 +29,6 @@
 #include "hydrogen.h"
 
 #include "codegen.h"
-#include "data-flow.h"
 #include "full-codegen.h"
 #include "hashmap.h"
 #include "lithium-allocator.h"
@@ -116,12 +115,13 @@
 }
 
 
-HDeoptimize* HBasicBlock::CreateDeoptimize() {
+HDeoptimize* HBasicBlock::CreateDeoptimize(
+    HDeoptimize::UseEnvironment has_uses) {
   ASSERT(HasEnvironment());
+  if (has_uses == HDeoptimize::kNoUses) return new(zone()) HDeoptimize(0);
+
   HEnvironment* environment = last_environment();
-
   HDeoptimize* instr = new(zone()) HDeoptimize(environment->length());
-
   for (int i = 0; i < environment->length(); i++) {
     HValue* val = environment->values()->at(i);
     instr->AddEnvironmentValue(val);
@@ -242,7 +242,7 @@
 
 
 void HBasicBlock::RegisterPredecessor(HBasicBlock* pred) {
-  if (!predecessors_.is_empty()) {
+  if (HasPredecessor()) {
     // Only loop header blocks can have a predecessor added after
     // instructions have been added to the block (they have phis for all
     // values in the environment, these phis may be eliminated later).
@@ -521,6 +521,22 @@
   return GetConstant(&constant_false_, isolate()->heap()->false_value());
 }
 
+HGraphBuilder::HGraphBuilder(CompilationInfo* info,
+                             TypeFeedbackOracle* oracle)
+    : function_state_(NULL),
+      initial_function_state_(this, info, oracle),
+      ast_context_(NULL),
+      break_scope_(NULL),
+      graph_(NULL),
+      current_block_(NULL),
+      inlined_count_(0),
+      zone_(info->isolate()->zone()),
+      inline_bailout_(false) {
+  // This is not initialized in the initializer list because the
+  // constructor for the initial state relies on function_state_ == NULL
+  // to know it's the initial state.
+  function_state_= &initial_function_state_;
+}
 
 HBasicBlock* HGraphBuilder::CreateJoin(HBasicBlock* first,
                                        HBasicBlock* second,
@@ -644,7 +660,7 @@
     HInstruction* instr = blocks()->at(i)->first();
     while (instr != NULL) {
       HValue* value = instr->Canonicalize();
-      if (value != instr) instr->ReplaceAndDelete(value);
+      if (value != instr) instr->DeleteAndReplaceWith(value);
       instr = instr->next();
     }
   }
@@ -726,9 +742,9 @@
 void HGraph::EliminateRedundantPhis() {
   HPhase phase("Redundant phi elimination", this);
 
-  // Worklist of phis that can potentially be eliminated. Initialized
-  // with all phi nodes. When elimination of a phi node modifies
-  // another phi node the modified phi node is added to the worklist.
+  // Worklist of phis that can potentially be eliminated. Initialized with
+  // all phi nodes. When elimination of a phi node modifies another phi node
+  // the modified phi node is added to the worklist.
   ZoneList<HPhi*> worklist(blocks_.length());
   for (int i = 0; i < blocks_.length(); ++i) {
     worklist.AddAll(*blocks_[i]->phis());
@@ -742,18 +758,14 @@
     if (block == NULL) continue;
 
     // Get replacement value if phi is redundant.
-    HValue* value = phi->GetRedundantReplacement();
+    HValue* replacement = phi->GetRedundantReplacement();
 
-    if (value != NULL) {
-      // Iterate through uses finding the ones that should be
-      // replaced.
-      SmallPointerList<HValue>* uses = phi->uses();
-      while (!uses->is_empty()) {
-        HValue* use = uses->RemoveLast();
-        if (use != NULL) {
-          phi->ReplaceAtUse(use, value);
-          if (use->IsPhi()) worklist.Add(HPhi::cast(use));
-        }
+    if (replacement != NULL) {
+      // Iterate through the uses and replace them all.
+      for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+        HValue* value = it.value();
+        value->SetOperandAt(it.index(), replacement);
+        if (value->IsPhi()) worklist.Add(HPhi::cast(value));
       }
       block->RemovePhi(phi);
     }
@@ -805,6 +817,19 @@
 }
 
 
+bool HGraph::CheckPhis() {
+  int block_count = blocks_.length();
+  for (int i = 0; i < block_count; ++i) {
+    for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
+      HPhi* phi = blocks_[i]->phis()->at(j);
+      // We don't support phi uses of arguments for now.
+      if (phi->CheckFlag(HValue::kIsArguments)) return false;
+    }
+  }
+  return true;
+}
+
+
 bool HGraph::CollectPhis() {
   int block_count = blocks_.length();
   phi_list_ = new ZoneList<HPhi*>(block_count);
@@ -812,8 +837,6 @@
     for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
       HPhi* phi = blocks_[i]->phis()->at(j);
       phi_list_->Add(phi);
-      // We don't support phi uses of arguments for now.
-      if (phi->CheckFlag(HValue::kIsArguments)) return false;
     }
   }
   return true;
@@ -831,8 +854,8 @@
     HValue* current = worklist->RemoveLast();
     in_worklist.Remove(current->id());
     if (current->UpdateInferredType()) {
-      for (int j = 0; j < current->uses()->length(); j++) {
-        HValue* use = current->uses()->at(j);
+      for (HUseIterator it(current->uses()); !it.Done(); it.Advance()) {
+        HValue* use = it.value();
         if (!in_worklist.Contains(use->id())) {
           in_worklist.Add(use->id());
           worklist->Add(use);
@@ -1025,13 +1048,13 @@
 }
 
 
-HValueMap::HValueMap(const HValueMap* other)
+HValueMap::HValueMap(Zone* zone, const HValueMap* other)
     : array_size_(other->array_size_),
       lists_size_(other->lists_size_),
       count_(other->count_),
       present_flags_(other->present_flags_),
-      array_(ZONE->NewArray<HValueMapListElement>(other->array_size_)),
-      lists_(ZONE->NewArray<HValueMapListElement>(other->lists_size_)),
+      array_(zone->NewArray<HValueMapListElement>(other->array_size_)),
+      lists_(zone->NewArray<HValueMapListElement>(other->lists_size_)),
       free_list_head_(other->free_list_head_) {
   memcpy(array_, other->array_, array_size_ * sizeof(HValueMapListElement));
   memcpy(lists_, other->lists_, lists_size_ * sizeof(HValueMapListElement));
@@ -1244,13 +1267,49 @@
 }
 
 
+// Simple sparse set with O(1) add, contains, and clear.
+class SparseSet {
+ public:
+  SparseSet(Zone* zone, int capacity)
+      : capacity_(capacity),
+        length_(0),
+        dense_(zone->NewArray<int>(capacity)),
+        sparse_(zone->NewArray<int>(capacity)) {}
+
+  bool Contains(int n) const {
+    ASSERT(0 <= n && n < capacity_);
+    int d = sparse_[n];
+    return 0 <= d && d < length_ && dense_[d] == n;
+  }
+
+  bool Add(int n) {
+    if (Contains(n)) return false;
+    dense_[length_] = n;
+    sparse_[n] = length_;
+    ++length_;
+    return true;
+  }
+
+  void Clear() { length_ = 0; }
+
+ private:
+  int capacity_;
+  int length_;
+  int* dense_;
+  int* sparse_;
+
+  DISALLOW_COPY_AND_ASSIGN(SparseSet);
+};
+
+
 class HGlobalValueNumberer BASE_EMBEDDED {
  public:
   explicit HGlobalValueNumberer(HGraph* graph, CompilationInfo* info)
       : graph_(graph),
         info_(info),
-        block_side_effects_(graph_->blocks()->length()),
-        loop_side_effects_(graph_->blocks()->length()) {
+        block_side_effects_(graph->blocks()->length()),
+        loop_side_effects_(graph->blocks()->length()),
+        visited_on_paths_(graph->zone(), graph->blocks()->length()) {
     ASSERT(info->isolate()->heap()->allow_allocation(false));
     block_side_effects_.AddBlock(0, graph_->blocks()->length());
     loop_side_effects_.AddBlock(0, graph_->blocks()->length());
@@ -1262,6 +1321,8 @@
   void Analyze();
 
  private:
+  int CollectSideEffectsOnPathsToDominatedBlock(HBasicBlock* dominator,
+                                                HBasicBlock* dominated);
   void AnalyzeBlock(HBasicBlock* block, HValueMap* map);
   void ComputeBlockSideEffects();
   void LoopInvariantCodeMotion();
@@ -1283,6 +1344,10 @@
 
   // A map of loop header block IDs to their loop's side effects.
   ZoneList<int> loop_side_effects_;
+
+  // Used when collecting side effects on paths from dominator to
+  // dominated.
+  SparseSet visited_on_paths_;
 };
 
 
@@ -1418,8 +1483,30 @@
 }
 
 
+int HGlobalValueNumberer::CollectSideEffectsOnPathsToDominatedBlock(
+    HBasicBlock* dominator, HBasicBlock* dominated) {
+  int side_effects = 0;
+  for (int i = 0; i < dominated->predecessors()->length(); ++i) {
+    HBasicBlock* block = dominated->predecessors()->at(i);
+    if (dominator->block_id() < block->block_id() &&
+        block->block_id() < dominated->block_id() &&
+        visited_on_paths_.Add(block->block_id())) {
+      side_effects |= block_side_effects_[block->block_id()];
+      if (block->IsLoopHeader()) {
+        side_effects |= loop_side_effects_[block->block_id()];
+      }
+      side_effects |= CollectSideEffectsOnPathsToDominatedBlock(
+          dominator, block);
+    }
+  }
+  return side_effects;
+}
+
+
 void HGlobalValueNumberer::AnalyzeBlock(HBasicBlock* block, HValueMap* map) {
-  TraceGVN("Analyzing block B%d\n", block->block_id());
+  TraceGVN("Analyzing block B%d%s\n",
+           block->block_id(),
+           block->IsLoopHeader() ? " (loop header)" : "");
 
   // If this is a loop header kill everything killed by the loop.
   if (block->IsLoopHeader()) {
@@ -1445,7 +1532,7 @@
                  instr->Mnemonic(),
                  other->id(),
                  other->Mnemonic());
-        instr->ReplaceAndDelete(other);
+        instr->DeleteAndReplaceWith(other);
       } else {
         map->Add(instr);
       }
@@ -1460,23 +1547,18 @@
     // No need to copy the map for the last child in the dominator tree.
     HValueMap* successor_map = (i == length - 1) ? map : map->Copy(zone());
 
-    // If the dominated block is not a successor to this block we have to
-    // kill everything killed on any path between this block and the
-    // dominated block.  Note we rely on the block ordering.
-    bool is_successor = false;
-    int predecessor_count = dominated->predecessors()->length();
-    for (int j = 0; !is_successor && j < predecessor_count; ++j) {
-      is_successor = (dominated->predecessors()->at(j) == block);
+    // Kill everything killed on any path between this block and the
+    // dominated block.
+    // We don't have to traverse these paths if the value map is
+    // already empty.
+    // If the range of block ids (block_id, dominated_id) is empty
+    // there are no such paths.
+    if (!successor_map->IsEmpty() &&
+        block->block_id() + 1 < dominated->block_id()) {
+      visited_on_paths_.Clear();
+      successor_map->Kill(CollectSideEffectsOnPathsToDominatedBlock(block,
+                                                                    dominated));
     }
-
-    if (!is_successor) {
-      int side_effects = 0;
-      for (int j = block->block_id() + 1; j < dominated->block_id(); ++j) {
-        side_effects |= block_side_effects_[j];
-      }
-      successor_map->Kill(side_effects);
-    }
-
     AnalyzeBlock(dominated, successor_map);
   }
 }
@@ -1529,12 +1611,12 @@
 }
 
 
-void HInferRepresentation::AddDependantsToWorklist(HValue* current) {
-  for (int i = 0; i < current->uses()->length(); ++i) {
-    AddToWorklist(current->uses()->at(i));
+void HInferRepresentation::AddDependantsToWorklist(HValue* value) {
+  for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
+    AddToWorklist(it.value());
   }
-  for (int i = 0; i < current->OperandCount(); ++i) {
-    AddToWorklist(current->OperandAt(i));
+  for (int i = 0; i < value->OperandCount(); ++i) {
+    AddToWorklist(value->OperandAt(i));
   }
 }
 
@@ -1543,37 +1625,30 @@
 // given as the parameter has a benefit in terms of less necessary type
 // conversions. If there is a benefit, then the representation of the value is
 // specialized.
-void HInferRepresentation::InferBasedOnUses(HValue* current) {
-  Representation r = current->representation();
-  if (r.IsSpecialization() || current->HasNoUses()) return;
-  ASSERT(current->CheckFlag(HValue::kFlexibleRepresentation));
-  Representation new_rep = TryChange(current);
+void HInferRepresentation::InferBasedOnUses(HValue* value) {
+  Representation r = value->representation();
+  if (r.IsSpecialization() || value->HasNoUses()) return;
+  ASSERT(value->CheckFlag(HValue::kFlexibleRepresentation));
+  Representation new_rep = TryChange(value);
   if (!new_rep.IsNone()) {
-    if (!current->representation().Equals(new_rep)) {
-      current->ChangeRepresentation(new_rep);
-      AddDependantsToWorklist(current);
+    if (!value->representation().Equals(new_rep)) {
+      value->ChangeRepresentation(new_rep);
+      AddDependantsToWorklist(value);
     }
   }
 }
 
 
-Representation HInferRepresentation::TryChange(HValue* current) {
+Representation HInferRepresentation::TryChange(HValue* value) {
   // Array of use counts for each representation.
-  int use_count[Representation::kNumRepresentations];
-  for (int i = 0; i < Representation::kNumRepresentations; i++) {
-    use_count[i] = 0;
-  }
+  int use_count[Representation::kNumRepresentations] = { 0 };
 
-  for (int i = 0; i < current->uses()->length(); ++i) {
-    HValue* use = current->uses()->at(i);
-    int index = use->LookupOperandIndex(0, current);
-    Representation req_rep = use->RequiredInputRepresentation(index);
-    if (req_rep.IsNone()) continue;
-    if (use->IsPhi()) {
-      HPhi* phi = HPhi::cast(use);
-      phi->AddIndirectUsesTo(&use_count[0]);
-    }
-    use_count[req_rep.kind()]++;
+  for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
+    HValue* use = it.value();
+    Representation rep = use->RequiredInputRepresentation(it.index());
+    if (rep.IsNone()) continue;
+    if (use->IsPhi()) HPhi::cast(use)->AddIndirectUsesTo(&use_count[0]);
+    ++use_count[rep.kind()];
   }
   int tagged_count = use_count[Representation::kTagged];
   int double_count = use_count[Representation::kDouble];
@@ -1581,19 +1656,17 @@
   int non_tagged_count = double_count + int32_count;
 
   // If a non-loop phi has tagged uses, don't convert it to untagged.
-  if (current->IsPhi() && !current->block()->IsLoopHeader()) {
+  if (value->IsPhi() && !value->block()->IsLoopHeader()) {
     if (tagged_count > 0) return Representation::None();
   }
 
   if (non_tagged_count >= tagged_count) {
-    // More untagged than tagged.
-    if (double_count > 0) {
-      // There is at least one usage that is a double => guess that the
-      // correct representation is double.
-      return Representation::Double();
-    } else if (int32_count > 0) {
-      return Representation::Integer32();
+    if (int32_count > 0) {
+      if (!value->IsPhi() || value->IsConvertibleToInteger()) {
+        return Representation::Integer32();
+      }
     }
+    if (double_count > 0) return Representation::Double();
   }
   return Representation::None();
 }
@@ -1602,41 +1675,40 @@
 void HInferRepresentation::Analyze() {
   HPhase phase("Infer representations", graph_);
 
-  // (1) Initialize bit vectors and count real uses. Each phi
-  // gets a bit-vector of length <number of phis>.
+  // (1) Initialize bit vectors and count real uses. Each phi gets a
+  // bit-vector of length <number of phis>.
   const ZoneList<HPhi*>* phi_list = graph_->phi_list();
-  int num_phis = phi_list->length();
-  ScopedVector<BitVector*> connected_phis(num_phis);
-  for (int i = 0; i < num_phis; i++) {
+  int phi_count = phi_list->length();
+  ZoneList<BitVector*> connected_phis(phi_count);
+  for (int i = 0; i < phi_count; ++i) {
     phi_list->at(i)->InitRealUses(i);
-    connected_phis[i] = new(zone()) BitVector(num_phis);
-    connected_phis[i]->Add(i);
+    BitVector* connected_set = new(zone()) BitVector(phi_count);
+    connected_set->Add(i);
+    connected_phis.Add(connected_set);
   }
 
-  // (2) Do a fixed point iteration to find the set of connected phis.
-  // A phi is connected to another phi if its value is used either
-  // directly or indirectly through a transitive closure of the def-use
-  // relation.
+  // (2) Do a fixed point iteration to find the set of connected phis.  A
+  // phi is connected to another phi if its value is used either directly or
+  // indirectly through a transitive closure of the def-use relation.
   bool change = true;
   while (change) {
     change = false;
-    for (int i = 0; i < num_phis; i++) {
+    for (int i = 0; i < phi_count; ++i) {
       HPhi* phi = phi_list->at(i);
-      for (int j = 0; j < phi->uses()->length(); j++) {
-        HValue* use = phi->uses()->at(j);
+      for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+        HValue* use = it.value();
         if (use->IsPhi()) {
-          int phi_use = HPhi::cast(use)->phi_id();
-          if (connected_phis[i]->UnionIsChanged(*connected_phis[phi_use])) {
-            change = true;
-          }
+          int id = HPhi::cast(use)->phi_id();
+          change = change ||
+              connected_phis[i]->UnionIsChanged(*connected_phis[id]);
         }
       }
     }
   }
 
-  // (3) Sum up the non-phi use counts of all connected phis.
-  // Don't include the non-phi uses of the phi itself.
-  for (int i = 0; i < num_phis; i++) {
+  // (3) Sum up the non-phi use counts of all connected phis.  Don't include
+  // the non-phi uses of the phi itself.
+  for (int i = 0; i < phi_count; ++i) {
     HPhi* phi = phi_list->at(i);
     for (BitVector::Iterator it(connected_phis.at(i));
          !it.Done();
@@ -1649,6 +1721,25 @@
     }
   }
 
+  // (4) Compute phis that definitely can't be converted to integer
+  // without deoptimization and mark them to avoid unnecessary deoptimization.
+  change = true;
+  while (change) {
+    change = false;
+    for (int i = 0; i < phi_count; ++i) {
+      HPhi* phi = phi_list->at(i);
+      for (int j = 0; j < phi->OperandCount(); ++j) {
+        if (phi->IsConvertibleToInteger() &&
+            !phi->OperandAt(j)->IsConvertibleToInteger()) {
+          phi->set_is_convertible_to_integer(false);
+          change = true;
+          break;
+        }
+      }
+    }
+  }
+
+
   for (int i = 0; i < graph_->blocks()->length(); ++i) {
     HBasicBlock* block = graph_->blocks()->at(i);
     const ZoneList<HPhi*>* phis = block->phis();
@@ -1746,17 +1837,16 @@
 
 
 void HGraph::InsertRepresentationChangeForUse(HValue* value,
-                                              HValue* use,
+                                              HValue* use_value,
+                                              int use_index,
                                               Representation to) {
   // Insert the representation change right before its use. For phi-uses we
   // insert at the end of the corresponding predecessor.
   HInstruction* next = NULL;
-  if (use->IsPhi()) {
-    int index = 0;
-    while (use->OperandAt(index) != value) ++index;
-    next = use->block()->predecessors()->at(index)->end();
+  if (use_value->IsPhi()) {
+    next = use_value->block()->predecessors()->at(use_index)->end();
   } else {
-    next = HInstruction::cast(use);
+    next = HInstruction::cast(use_value);
   }
 
   // For constants we try to make the representation change at compile
@@ -1764,8 +1854,9 @@
   // information we treat constants like normal instructions and insert the
   // change instructions for them.
   HInstruction* new_value = NULL;
-  bool is_truncating = use->CheckFlag(HValue::kTruncatingToInt32);
-  bool deoptimize_on_undefined = use->CheckFlag(HValue::kDeoptimizeOnUndefined);
+  bool is_truncating = use_value->CheckFlag(HValue::kTruncatingToInt32);
+  bool deoptimize_on_undefined =
+      use_value->CheckFlag(HValue::kDeoptimizeOnUndefined);
   if (value->IsConstant()) {
     HConstant* constant = HConstant::cast(value);
     // Try to create a new copy of the constant with the new representation.
@@ -1780,89 +1871,32 @@
   }
 
   new_value->InsertBefore(next);
-  value->ReplaceFirstAtUse(use, new_value, to);
+  use_value->SetOperandAt(use_index, new_value);
 }
 
 
-int CompareConversionUses(HValue* a,
-                          HValue* b,
-                          Representation a_rep,
-                          Representation b_rep) {
-  if (a_rep.kind() > b_rep.kind()) {
-    // Make sure specializations are separated in the result array.
-    return 1;
-  }
-  // Put truncating conversions before non-truncating conversions.
-  bool a_truncate = a->CheckFlag(HValue::kTruncatingToInt32);
-  bool b_truncate = b->CheckFlag(HValue::kTruncatingToInt32);
-  if (a_truncate != b_truncate) {
-    return a_truncate ? -1 : 1;
-  }
-  // Sort by increasing block ID.
-  return a->block()->block_id() - b->block()->block_id();
-}
-
-
-void HGraph::InsertRepresentationChangesForValue(
-    HValue* current,
-    ZoneList<HValue*>* to_convert,
-    ZoneList<Representation>* to_convert_reps) {
-  Representation r = current->representation();
+void HGraph::InsertRepresentationChangesForValue(HValue* value) {
+  Representation r = value->representation();
   if (r.IsNone()) return;
-  if (current->uses()->length() == 0) return;
+  if (value->HasNoUses()) return;
 
-  // Collect the representation changes in a sorted list.  This allows
-  // us to avoid duplicate changes without searching the list.
-  ASSERT(to_convert->is_empty());
-  ASSERT(to_convert_reps->is_empty());
-  for (int i = 0; i < current->uses()->length(); ++i) {
-    HValue* use = current->uses()->at(i);
-    // The occurrences index means the index within the operand array of "use"
-    // at which "current" is used. While iterating through the use array we
-    // also have to iterate over the different occurrence indices.
-    int occurrence_index = 0;
-    if (use->UsesMultipleTimes(current)) {
-      occurrence_index = current->uses()->CountOccurrences(use, 0, i - 1);
-      if (FLAG_trace_representation) {
-        PrintF("Instruction %d is used multiple times at %d; occurrence=%d\n",
-               current->id(),
-               use->id(),
-               occurrence_index);
-      }
-    }
-    int operand_index = use->LookupOperandIndex(occurrence_index, current);
-    Representation req = use->RequiredInputRepresentation(operand_index);
+  for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
+    HValue* use_value = it.value();
+    int use_index = it.index();
+    Representation req = use_value->RequiredInputRepresentation(use_index);
     if (req.IsNone() || req.Equals(r)) continue;
-    int index = 0;
-    while (index < to_convert->length() &&
-           CompareConversionUses(to_convert->at(index),
-                                 use,
-                                 to_convert_reps->at(index),
-                                 req) < 0) {
-      ++index;
-    }
-    if (FLAG_trace_representation) {
-      PrintF("Inserting a representation change to %s of %d for use at %d\n",
-             req.Mnemonic(),
-             current->id(),
-             use->id());
-    }
-    to_convert->InsertAt(index, use);
-    to_convert_reps->InsertAt(index, req);
+    InsertRepresentationChangeForUse(value, use_value, use_index, req);
+  }
+  if (value->HasNoUses()) {
+    ASSERT(value->IsConstant());
+    value->DeleteAndReplaceWith(NULL);
   }
 
-  for (int i = 0; i < to_convert->length(); ++i) {
-    HValue* use = to_convert->at(i);
-    Representation r_to = to_convert_reps->at(i);
-    InsertRepresentationChangeForUse(current, use, r_to);
+  // The only purpose of a HForceRepresentation is to represent the value
+  // after the (possible) HChange instruction.  We make it disappear.
+  if (value->IsForceRepresentation()) {
+    value->DeleteAndReplaceWith(HForceRepresentation::cast(value)->value());
   }
-
-  if (current->uses()->is_empty()) {
-    ASSERT(current->IsConstant());
-    current->Delete();
-  }
-  to_convert->Rewind(0);
-  to_convert_reps->Rewind(0);
 }
 
 
@@ -1887,8 +1921,8 @@
     for (int i = 0; i < phi_list()->length(); i++) {
       HPhi* phi = phi_list()->at(i);
       if (!phi->CheckFlag(HValue::kTruncatingToInt32)) continue;
-      for (int j = 0; j < phi->uses()->length(); j++) {
-        HValue* use = phi->uses()->at(j);
+      for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+        HValue* use = it.value();
         if (!use->CheckFlag(HValue::kTruncatingToInt32)) {
           phi->ClearFlag(HValue::kTruncatingToInt32);
           change = true;
@@ -1898,19 +1932,17 @@
     }
   }
 
-  ZoneList<HValue*> value_list(4);
-  ZoneList<Representation> rep_list(4);
   for (int i = 0; i < blocks_.length(); ++i) {
     // Process phi instructions first.
-    for (int j = 0; j < blocks_[i]->phis()->length(); j++) {
-      HPhi* phi = blocks_[i]->phis()->at(j);
-      InsertRepresentationChangesForValue(phi, &value_list, &rep_list);
+    const ZoneList<HPhi*>* phis = blocks_[i]->phis();
+    for (int j = 0; j < phis->length(); j++) {
+      InsertRepresentationChangesForValue(phis->at(j));
     }
 
     // Process normal instructions.
     HInstruction* current = blocks_[i]->first();
     while (current != NULL) {
-      InsertRepresentationChangesForValue(current, &value_list, &rep_list);
+      InsertRepresentationChangesForValue(current);
       current = current->next();
     }
   }
@@ -1940,9 +1972,8 @@
   for (int i = 0; i < phi_list()->length(); i++) {
     HPhi* phi = phi_list()->at(i);
     if (phi->representation().IsDouble()) {
-      for (int j = 0; j < phi->uses()->length(); j++) {
-        HValue* use = phi->uses()->at(j);
-        if (use->CheckFlag(HValue::kDeoptimizeOnUndefined)) {
+      for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+        if (it.value()->CheckFlag(HValue::kDeoptimizeOnUndefined)) {
           RecursivelyMarkPhiDeoptimizeOnUndefined(phi);
           break;
         }
@@ -2058,6 +2089,9 @@
 void ValueContext::ReturnValue(HValue* value) {
   // The value is tracked in the bailout environment, and communicated
   // through the environment as the result of the expression.
+  if (!arguments_allowed() && value->CheckFlag(HValue::kIsArguments)) {
+    owner()->Bailout("bad value context for arguments value");
+  }
   owner()->Push(value);
 }
 
@@ -2074,6 +2108,9 @@
 
 
 void ValueContext::ReturnInstruction(HInstruction* instr, int ast_id) {
+  if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
+    owner()->Bailout("bad value context for arguments object value");
+  }
   owner()->AddInstruction(instr);
   owner()->Push(instr);
   if (instr->HasSideEffects()) owner()->AddSimulate(ast_id);
@@ -2100,6 +2137,9 @@
   // property by always adding an empty block on the outgoing edges of this
   // branch.
   HGraphBuilder* builder = owner();
+  if (value->CheckFlag(HValue::kIsArguments)) {
+    builder->Bailout("arguments object value in a test context");
+  }
   HBasicBlock* empty_true = builder->graph()->CreateBasicBlock();
   HBasicBlock* empty_false = builder->graph()->CreateBasicBlock();
   HTest* test = new(zone()) HTest(value, empty_true, empty_false);
@@ -2112,37 +2152,17 @@
 
 
 // HGraphBuilder infrastructure for bailing out and checking bailouts.
-#define BAILOUT(reason)                         \
+#define CHECK_BAILOUT(call)                     \
   do {                                          \
-    Bailout(reason);                            \
-    return;                                     \
-  } while (false)
-
-
-#define CHECK_BAILOUT                           \
-  do {                                          \
+    call;                                       \
     if (HasStackOverflow()) return;             \
   } while (false)
 
 
-#define VISIT_FOR_EFFECT(expr)                  \
-  do {                                          \
-    VisitForEffect(expr);                       \
-    if (HasStackOverflow()) return;             \
-  } while (false)
-
-
-#define VISIT_FOR_VALUE(expr)                   \
-  do {                                          \
-    VisitForValue(expr);                        \
-    if (HasStackOverflow()) return;             \
-  } while (false)
-
-
-#define VISIT_FOR_CONTROL(expr, true_block, false_block)        \
+#define CHECK_ALIVE(call)                                       \
   do {                                                          \
-    VisitForControl(expr, true_block, false_block);             \
-    if (HasStackOverflow()) return;                             \
+    call;                                                       \
+    if (HasStackOverflow() || current_block() == NULL) return;  \
   } while (false)
 
 
@@ -2161,14 +2181,14 @@
 }
 
 
-void HGraphBuilder::VisitForValue(Expression* expr) {
-  ValueContext for_value(this);
+void HGraphBuilder::VisitForValue(Expression* expr, ArgumentsAllowedFlag flag) {
+  ValueContext for_value(this, flag);
   Visit(expr);
 }
 
 
 void HGraphBuilder::VisitForTypeOf(Expression* expr) {
-  ValueContext for_value(this);
+  ValueContext for_value(this, ARGUMENTS_NOT_ALLOWED);
   for_value.set_for_typeof(true);
   Visit(expr);
 }
@@ -2184,22 +2204,21 @@
 
 
 void HGraphBuilder::VisitArgument(Expression* expr) {
-  VISIT_FOR_VALUE(expr);
+  CHECK_ALIVE(VisitForValue(expr));
   Push(AddInstruction(new(zone()) HPushArgument(Pop())));
 }
 
 
 void HGraphBuilder::VisitArgumentList(ZoneList<Expression*>* arguments) {
   for (int i = 0; i < arguments->length(); i++) {
-    VisitArgument(arguments->at(i));
-    if (HasStackOverflow() || current_block() == NULL) return;
+    CHECK_ALIVE(VisitArgument(arguments->at(i)));
   }
 }
 
 
 void HGraphBuilder::VisitExpressions(ZoneList<Expression*>* exprs) {
   for (int i = 0; i < exprs->length(); ++i) {
-    VISIT_FOR_VALUE(exprs->at(i));
+    CHECK_ALIVE(VisitForValue(exprs->at(i)));
   }
 }
 
@@ -2254,9 +2273,13 @@
   graph()->OrderBlocks();
   graph()->AssignDominators();
   graph()->EliminateRedundantPhis();
+  if (!graph()->CheckPhis()) {
+    Bailout("Unsupported phi use of arguments object");
+    return NULL;
+  }
   if (FLAG_eliminate_dead_phis) graph()->EliminateUnreachablePhis();
   if (!graph()->CollectPhis()) {
-    Bailout("Phi-use of arguments object");
+    Bailout("Unsupported phi use of uninitialized constant");
     return NULL;
   }
 
@@ -2302,8 +2325,8 @@
     while (instr != NULL) {
       if (instr->IsBoundsCheck()) {
         // Replace all uses of the checked value with the original input.
-        ASSERT(instr->uses()->length() > 0);
-        instr->ReplaceValue(HBoundsCheck::cast(instr)->index());
+        ASSERT(instr->UseCount() > 0);
+        instr->ReplaceAllUsesWith(HBoundsCheck::cast(instr)->index());
       }
       instr = instr->next();
     }
@@ -2353,7 +2376,7 @@
 
 void HGraphBuilder::SetupScope(Scope* scope) {
   // We don't yet handle the function name for named function expressions.
-  if (scope->function() != NULL) BAILOUT("named function expression");
+  if (scope->function() != NULL) return Bailout("named function expression");
 
   HConstant* undefined_constant = new(zone()) HConstant(
       isolate()->factory()->undefined_value(), Representation::Tagged());
@@ -2362,14 +2385,21 @@
 
   // Set the initial values of parameters including "this".  "This" has
   // parameter index 0.
-  int count = scope->num_parameters() + 1;
-  for (int i = 0; i < count; ++i) {
+  ASSERT_EQ(scope->num_parameters() + 1, environment()->parameter_count());
+
+  for (int i = 0; i < environment()->parameter_count(); ++i) {
     HInstruction* parameter = AddInstruction(new(zone()) HParameter(i));
     environment()->Bind(i, parameter);
   }
 
-  // Set the initial values of stack-allocated locals.
-  for (int i = count; i < environment()->length(); ++i) {
+  // First special is HContext.
+  HInstruction* context = AddInstruction(new(zone()) HContext);
+  environment()->BindContext(context);
+
+  // Initialize specials and locals to undefined.
+  for (int i = environment()->parameter_count() + 1;
+       i < environment()->length();
+       ++i) {
     environment()->Bind(i, undefined_constant);
   }
 
@@ -2379,7 +2409,7 @@
     if (!scope->arguments()->IsStackAllocated() ||
         (scope->arguments_shadow() != NULL &&
         !scope->arguments_shadow()->IsStackAllocated())) {
-      BAILOUT("context-allocated arguments");
+      return Bailout("context-allocated arguments");
     }
     HArgumentsObject* object = new(zone()) HArgumentsObject;
     AddInstruction(object);
@@ -2394,8 +2424,7 @@
 
 void HGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
   for (int i = 0; i < statements->length(); i++) {
-    Visit(statements->at(i));
-    if (HasStackOverflow() || current_block() == NULL) break;
+    CHECK_ALIVE(Visit(statements->at(i)));
   }
 }
 
@@ -2417,10 +2446,12 @@
 
 
 void HGraphBuilder::VisitBlock(Block* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   BreakAndContinueInfo break_info(stmt);
   { BreakAndContinueScope push(&break_info, this);
-    VisitStatements(stmt->statements());
-    CHECK_BAILOUT;
+    CHECK_BAILOUT(VisitStatements(stmt->statements()));
   }
   HBasicBlock* break_block = break_info.break_block();
   if (break_block != NULL) {
@@ -2432,15 +2463,24 @@
 
 
 void HGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   VisitForEffect(stmt->expression());
 }
 
 
 void HGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
 }
 
 
 void HGraphBuilder::VisitIfStatement(IfStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   if (stmt->condition()->ToBooleanIsTrue()) {
     AddSimulate(stmt->ThenId());
     Visit(stmt->then_statement());
@@ -2450,20 +2490,27 @@
   } else {
     HBasicBlock* cond_true = graph()->CreateBasicBlock();
     HBasicBlock* cond_false = graph()->CreateBasicBlock();
-    VISIT_FOR_CONTROL(stmt->condition(), cond_true, cond_false);
-    cond_true->SetJoinId(stmt->ThenId());
-    cond_false->SetJoinId(stmt->ElseId());
+    CHECK_BAILOUT(VisitForControl(stmt->condition(), cond_true, cond_false));
 
-    set_current_block(cond_true);
-    Visit(stmt->then_statement());
-    CHECK_BAILOUT;
-    HBasicBlock* other = current_block();
+    if (cond_true->HasPredecessor()) {
+      cond_true->SetJoinId(stmt->ThenId());
+      set_current_block(cond_true);
+      CHECK_BAILOUT(Visit(stmt->then_statement()));
+      cond_true = current_block();
+    } else {
+      cond_true = NULL;
+    }
 
-    set_current_block(cond_false);
-    Visit(stmt->else_statement());
-    CHECK_BAILOUT;
+    if (cond_false->HasPredecessor()) {
+      cond_false->SetJoinId(stmt->ElseId());
+      set_current_block(cond_false);
+      CHECK_BAILOUT(Visit(stmt->else_statement()));
+      cond_false = current_block();
+    } else {
+      cond_false = NULL;
+    }
 
-    HBasicBlock* join = CreateJoin(other, current_block(), stmt->id());
+    HBasicBlock* join = CreateJoin(cond_true, cond_false, stmt->id());
     set_current_block(join);
   }
 }
@@ -2501,6 +2548,9 @@
 
 
 void HGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   HBasicBlock* continue_block = break_scope()->Get(stmt->target(), CONTINUE);
   current_block()->Goto(continue_block);
   set_current_block(NULL);
@@ -2508,6 +2558,9 @@
 
 
 void HGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   HBasicBlock* break_block = break_scope()->Get(stmt->target(), BREAK);
   current_block()->Goto(break_block);
   set_current_block(NULL);
@@ -2515,10 +2568,13 @@
 
 
 void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   AstContext* context = call_context();
   if (context == NULL) {
     // Not an inlined return, so an actual one.
-    VISIT_FOR_VALUE(stmt->expression());
+    CHECK_ALIVE(VisitForValue(stmt->expression()));
     HValue* result = environment()->Pop();
     current_block()->FinishExit(new(zone()) HReturn(result));
     set_current_block(NULL);
@@ -2531,11 +2587,11 @@
                       test->if_true(),
                       test->if_false());
     } else if (context->IsEffect()) {
-      VISIT_FOR_EFFECT(stmt->expression());
+      CHECK_ALIVE(VisitForEffect(stmt->expression()));
       current_block()->Goto(function_return(), false);
     } else {
       ASSERT(context->IsValue());
-      VISIT_FOR_VALUE(stmt->expression());
+      CHECK_ALIVE(VisitForValue(stmt->expression()));
       HValue* return_value = environment()->Pop();
       current_block()->AddLeaveInlined(return_value, function_return());
     }
@@ -2545,26 +2601,35 @@
 
 
 void HGraphBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
-  BAILOUT("WithEnterStatement");
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  return Bailout("WithEnterStatement");
 }
 
 
 void HGraphBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
-  BAILOUT("WithExitStatement");
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  return Bailout("WithExitStatement");
 }
 
 
 void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   // We only optimize switch statements with smi-literal smi comparisons,
   // with a bounded number of clauses.
   const int kCaseClauseLimit = 128;
   ZoneList<CaseClause*>* clauses = stmt->cases();
   int clause_count = clauses->length();
   if (clause_count > kCaseClauseLimit) {
-    BAILOUT("SwitchStatement: too many clauses");
+    return Bailout("SwitchStatement: too many clauses");
   }
 
-  VISIT_FOR_VALUE(stmt->tag());
+  CHECK_ALIVE(VisitForValue(stmt->tag()));
   AddSimulate(stmt->EntryId());
   HValue* tag_value = Pop();
   HBasicBlock* first_test_block = current_block();
@@ -2575,19 +2640,21 @@
     CaseClause* clause = clauses->at(i);
     if (clause->is_default()) continue;
     if (!clause->label()->IsSmiLiteral()) {
-      BAILOUT("SwitchStatement: non-literal switch label");
+      return Bailout("SwitchStatement: non-literal switch label");
     }
 
     // Unconditionally deoptimize on the first non-smi compare.
     clause->RecordTypeFeedback(oracle());
     if (!clause->IsSmiCompare()) {
-      current_block()->FinishExitWithDeoptimization();
+      // Finish with deoptimize and add uses of enviroment values to
+      // account for invisible uses.
+      current_block()->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
       set_current_block(NULL);
       break;
     }
 
     // Otherwise generate a compare and branch.
-    VISIT_FOR_VALUE(clause->label());
+    CHECK_ALIVE(VisitForValue(clause->label()));
     HValue* label_value = Pop();
     HCompare* compare =
         new(zone()) HCompare(tag_value, label_value, Token::EQ_STRICT);
@@ -2651,8 +2718,7 @@
         set_current_block(join);
       }
 
-      VisitStatements(clause->statements());
-      CHECK_BAILOUT;
+      CHECK_BAILOUT(VisitStatements(clause->statements()));
       fall_through_block = current_block();
     }
   }
@@ -2694,17 +2760,18 @@
   int osr_entry_id = statement->OsrEntryId();
   // We want the correct environment at the OsrEntry instruction.  Build
   // it explicitly.  The expression stack should be empty.
-  int count = environment()->length();
-  ASSERT(count ==
-         (environment()->parameter_count() + environment()->local_count()));
-  for (int i = 0; i < count; ++i) {
-    HUnknownOSRValue* unknown = new(zone()) HUnknownOSRValue;
-    AddInstruction(unknown);
-    environment()->Bind(i, unknown);
+  ASSERT(environment()->ExpressionStackIsEmpty());
+  for (int i = 0; i < environment()->length(); ++i) {
+    HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue;
+    AddInstruction(osr_value);
+    environment()->Bind(i, osr_value);
   }
 
   AddSimulate(osr_entry_id);
   AddInstruction(new(zone()) HOsrEntry(osr_entry_id));
+  HContext* context = new(zone()) HContext;
+  AddInstruction(context);
+  environment()->BindContext(context);
   current_block()->Goto(loop_predecessor);
   loop_predecessor->SetJoinId(statement->EntryId());
   set_current_block(loop_predecessor);
@@ -2712,6 +2779,9 @@
 
 
 void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   ASSERT(current_block() != NULL);
   PreProcessOsrEntry(stmt);
   HBasicBlock* loop_entry = CreateLoopHeaderBlock();
@@ -2720,8 +2790,7 @@
 
   BreakAndContinueInfo break_info(stmt);
   { BreakAndContinueScope push(&break_info, this);
-    Visit(stmt->body());
-    CHECK_BAILOUT;
+    CHECK_BAILOUT(Visit(stmt->body()));
   }
   HBasicBlock* body_exit =
       JoinContinue(stmt, current_block(), break_info.continue_block());
@@ -2732,9 +2801,17 @@
     // back edge.
     body_exit = graph()->CreateBasicBlock();
     loop_successor = graph()->CreateBasicBlock();
-    VISIT_FOR_CONTROL(stmt->cond(), body_exit, loop_successor);
-    body_exit->SetJoinId(stmt->BackEdgeId());
-    loop_successor->SetJoinId(stmt->ExitId());
+    CHECK_BAILOUT(VisitForControl(stmt->cond(), body_exit, loop_successor));
+    if (body_exit->HasPredecessor()) {
+      body_exit->SetJoinId(stmt->BackEdgeId());
+    } else {
+      body_exit = NULL;
+    }
+    if (loop_successor->HasPredecessor()) {
+      loop_successor->SetJoinId(stmt->ExitId());
+    } else {
+      loop_successor = NULL;
+    }
   }
   HBasicBlock* loop_exit = CreateLoop(stmt,
                                       loop_entry,
@@ -2746,6 +2823,9 @@
 
 
 void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   ASSERT(current_block() != NULL);
   PreProcessOsrEntry(stmt);
   HBasicBlock* loop_entry = CreateLoopHeaderBlock();
@@ -2757,16 +2837,22 @@
   if (!stmt->cond()->ToBooleanIsTrue()) {
     HBasicBlock* body_entry = graph()->CreateBasicBlock();
     loop_successor = graph()->CreateBasicBlock();
-    VISIT_FOR_CONTROL(stmt->cond(), body_entry, loop_successor);
-    body_entry->SetJoinId(stmt->BodyId());
-    loop_successor->SetJoinId(stmt->ExitId());
-    set_current_block(body_entry);
+    CHECK_BAILOUT(VisitForControl(stmt->cond(), body_entry, loop_successor));
+    if (body_entry->HasPredecessor()) {
+      body_entry->SetJoinId(stmt->BodyId());
+      set_current_block(body_entry);
+    }
+    if (loop_successor->HasPredecessor()) {
+      loop_successor->SetJoinId(stmt->ExitId());
+    } else {
+      loop_successor = NULL;
+    }
   }
 
   BreakAndContinueInfo break_info(stmt);
-  { BreakAndContinueScope push(&break_info, this);
-    Visit(stmt->body());
-    CHECK_BAILOUT;
+  if (current_block() != NULL) {
+    BreakAndContinueScope push(&break_info, this);
+    CHECK_BAILOUT(Visit(stmt->body()));
   }
   HBasicBlock* body_exit =
       JoinContinue(stmt, current_block(), break_info.continue_block());
@@ -2780,9 +2866,11 @@
 
 
 void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   if (stmt->init() != NULL) {
-    Visit(stmt->init());
-    CHECK_BAILOUT;
+    CHECK_ALIVE(Visit(stmt->init()));
   }
   ASSERT(current_block() != NULL);
   PreProcessOsrEntry(stmt);
@@ -2794,24 +2882,29 @@
   if (stmt->cond() != NULL) {
     HBasicBlock* body_entry = graph()->CreateBasicBlock();
     loop_successor = graph()->CreateBasicBlock();
-    VISIT_FOR_CONTROL(stmt->cond(), body_entry, loop_successor);
-    body_entry->SetJoinId(stmt->BodyId());
-    loop_successor->SetJoinId(stmt->ExitId());
-    set_current_block(body_entry);
+    CHECK_BAILOUT(VisitForControl(stmt->cond(), body_entry, loop_successor));
+    if (body_entry->HasPredecessor()) {
+      body_entry->SetJoinId(stmt->BodyId());
+      set_current_block(body_entry);
+    }
+    if (loop_successor->HasPredecessor()) {
+      loop_successor->SetJoinId(stmt->ExitId());
+    } else {
+      loop_successor = NULL;
+    }
   }
 
   BreakAndContinueInfo break_info(stmt);
-  { BreakAndContinueScope push(&break_info, this);
-    Visit(stmt->body());
-    CHECK_BAILOUT;
+  if (current_block() != NULL) {
+    BreakAndContinueScope push(&break_info, this);
+    CHECK_BAILOUT(Visit(stmt->body()));
   }
   HBasicBlock* body_exit =
       JoinContinue(stmt, current_block(), break_info.continue_block());
 
   if (stmt->next() != NULL && body_exit != NULL) {
     set_current_block(body_exit);
-    Visit(stmt->next());
-    CHECK_BAILOUT;
+    CHECK_BAILOUT(Visit(stmt->next()));
     body_exit = current_block();
   }
 
@@ -2825,22 +2918,34 @@
 
 
 void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
-  BAILOUT("ForInStatement");
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  return Bailout("ForInStatement");
 }
 
 
 void HGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
-  BAILOUT("TryCatchStatement");
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  return Bailout("TryCatchStatement");
 }
 
 
 void HGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
-  BAILOUT("TryFinallyStatement");
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  return Bailout("TryFinallyStatement");
 }
 
 
 void HGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
-  BAILOUT("DebuggerStatement");
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  return Bailout("DebuggerStatement");
 }
 
 
@@ -2865,13 +2970,17 @@
 
 
 void HGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   Handle<SharedFunctionInfo> shared_info =
       SearchSharedFunctionInfo(info()->shared_info()->code(),
                                expr);
   if (shared_info.is_null()) {
     shared_info = Compiler::BuildFunctionInfo(expr, info()->script());
   }
-  CHECK_BAILOUT;
+  // We also have a stack overflow if the recursive compilation did.
+  if (HasStackOverflow()) return;
   HFunctionLiteral* instr =
       new(zone()) HFunctionLiteral(shared_info, expr->pretenure());
   ast_context()->ReturnInstruction(instr, expr->id());
@@ -2880,32 +2989,47 @@
 
 void HGraphBuilder::VisitSharedFunctionInfoLiteral(
     SharedFunctionInfoLiteral* expr) {
-  BAILOUT("SharedFunctionInfoLiteral");
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  return Bailout("SharedFunctionInfoLiteral");
 }
 
 
 void HGraphBuilder::VisitConditional(Conditional* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   HBasicBlock* cond_true = graph()->CreateBasicBlock();
   HBasicBlock* cond_false = graph()->CreateBasicBlock();
-  VISIT_FOR_CONTROL(expr->condition(), cond_true, cond_false);
-  cond_true->SetJoinId(expr->ThenId());
-  cond_false->SetJoinId(expr->ElseId());
+  CHECK_BAILOUT(VisitForControl(expr->condition(), cond_true, cond_false));
 
   // Visit the true and false subexpressions in the same AST context as the
   // whole expression.
-  set_current_block(cond_true);
-  Visit(expr->then_expression());
-  CHECK_BAILOUT;
-  HBasicBlock* other = current_block();
+  if (cond_true->HasPredecessor()) {
+    cond_true->SetJoinId(expr->ThenId());
+    set_current_block(cond_true);
+    CHECK_BAILOUT(Visit(expr->then_expression()));
+    cond_true = current_block();
+  } else {
+    cond_true = NULL;
+  }
 
-  set_current_block(cond_false);
-  Visit(expr->else_expression());
-  CHECK_BAILOUT;
+  if (cond_false->HasPredecessor()) {
+    cond_false->SetJoinId(expr->ElseId());
+    set_current_block(cond_false);
+    CHECK_BAILOUT(Visit(expr->else_expression()));
+    cond_false = current_block();
+  } else {
+    cond_false = NULL;
+  }
 
   if (!ast_context()->IsTest()) {
-    HBasicBlock* join = CreateJoin(other, current_block(), expr->id());
+    HBasicBlock* join = CreateJoin(cond_true, cond_false, expr->id());
     set_current_block(join);
-    if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+    if (join != NULL && !ast_context()->IsEffect()) {
+      ast_context()->ReturnValue(Pop());
+    }
   }
 }
 
@@ -2930,29 +3054,29 @@
 
 HValue* HGraphBuilder::BuildContextChainWalk(Variable* var) {
   ASSERT(var->IsContextSlot());
-  HInstruction* context = new(zone()) HContext;
-  AddInstruction(context);
+  HValue* context = environment()->LookupContext();
   int length = info()->scope()->ContextChainLength(var->scope());
   while (length-- > 0) {
-    context = new(zone()) HOuterContext(context);
-    AddInstruction(context);
+    HInstruction* context_instruction = new(zone()) HOuterContext(context);
+    AddInstruction(context_instruction);
+    context = context_instruction;
   }
   return context;
 }
 
 
 void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   Variable* variable = expr->AsVariable();
   if (variable == NULL) {
-    BAILOUT("reference to rewritten variable");
+    return Bailout("reference to rewritten variable");
   } else if (variable->IsStackAllocated()) {
-    if (environment()->Lookup(variable)->CheckFlag(HValue::kIsArguments)) {
-      BAILOUT("unsupported context for arguments object");
-    }
     ast_context()->ReturnValue(environment()->Lookup(variable));
   } else if (variable->IsContextSlot()) {
     if (variable->mode() == Variable::CONST) {
-      BAILOUT("reference to const context slot");
+      return Bailout("reference to const context slot");
     }
     HValue* context = BuildContextChainWalk(variable);
     int index = variable->AsSlot()->index();
@@ -2974,8 +3098,7 @@
       HLoadGlobalCell* instr = new(zone()) HLoadGlobalCell(cell, check_hole);
       ast_context()->ReturnInstruction(instr, expr->id());
     } else {
-      HContext* context = new(zone()) HContext;
-      AddInstruction(context);
+      HValue* context = environment()->LookupContext();
       HGlobalObject* global_object = new(zone()) HGlobalObject(context);
       AddInstruction(global_object);
       HLoadGlobalGeneric* instr =
@@ -2988,12 +3111,15 @@
       ast_context()->ReturnInstruction(instr, expr->id());
     }
   } else {
-    BAILOUT("reference to a variable which requires dynamic lookup");
+    return Bailout("reference to a variable which requires dynamic lookup");
   }
 }
 
 
 void HGraphBuilder::VisitLiteral(Literal* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   HConstant* instr =
       new(zone()) HConstant(expr->handle(), Representation::Tagged());
   ast_context()->ReturnInstruction(instr, expr->id());
@@ -3001,6 +3127,9 @@
 
 
 void HGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   HRegExpLiteral* instr = new(zone()) HRegExpLiteral(expr->pattern(),
                                                      expr->flags(),
                                                      expr->literal_index());
@@ -3009,8 +3138,10 @@
 
 
 void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  HValue* context = environment()->LookupContext();
   HObjectLiteral* literal =
       new(zone()) HObjectLiteral(context,
                                  expr->constant_properties(),
@@ -3038,7 +3169,7 @@
       case ObjectLiteral::Property::COMPUTED:
         if (key->handle()->IsSymbol()) {
           if (property->emit_store()) {
-            VISIT_FOR_VALUE(value);
+            CHECK_ALIVE(VisitForValue(value));
             HValue* value = Pop();
             Handle<String> name = Handle<String>::cast(key->handle());
             HStoreNamedGeneric* store =
@@ -3051,7 +3182,7 @@
             AddInstruction(store);
             AddSimulate(key->id());
           } else {
-            VISIT_FOR_EFFECT(value);
+            CHECK_ALIVE(VisitForEffect(value));
           }
           break;
         }
@@ -3059,7 +3190,7 @@
       case ObjectLiteral::Property::PROTOTYPE:
       case ObjectLiteral::Property::SETTER:
       case ObjectLiteral::Property::GETTER:
-        BAILOUT("Object literal with complex property");
+        return Bailout("Object literal with complex property");
       default: UNREACHABLE();
     }
   }
@@ -3080,6 +3211,9 @@
 
 
 void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   ZoneList<Expression*>* subexprs = expr->values();
   int length = subexprs->length();
 
@@ -3099,9 +3233,9 @@
     // is already set in the cloned array.
     if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
 
-    VISIT_FOR_VALUE(subexpr);
+    CHECK_ALIVE(VisitForValue(subexpr));
     HValue* value = Pop();
-    if (!Smi::IsValid(i)) BAILOUT("Non-smi key in array literal");
+    if (!Smi::IsValid(i)) return Bailout("Non-smi key in array literal");
 
     // Load the elements array before the first store.
     if (elements == NULL)  {
@@ -3120,7 +3254,10 @@
 
 
 void HGraphBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
-  BAILOUT("CatchExtensionObject");
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  return Bailout("CatchExtensionObject");
 }
 
 
@@ -3186,8 +3323,7 @@
 HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object,
                                                     Handle<String> name,
                                                     HValue* value) {
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  HValue* context = environment()->LookupContext();
   return new(zone()) HStoreNamedGeneric(
                          context,
                          object,
@@ -3261,7 +3397,7 @@
   // know about and do not want to handle ones we've never seen.  Otherwise
   // use a generic IC.
   if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
-    current_block()->FinishExitWithDeoptimization();
+    current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
   } else {
     HInstruction* instr = BuildStoreNamedGeneric(object, name, value);
     instr->set_position(expr->position());
@@ -3299,14 +3435,14 @@
   Property* prop = expr->target()->AsProperty();
   ASSERT(prop != NULL);
   expr->RecordTypeFeedback(oracle());
-  VISIT_FOR_VALUE(prop->obj());
+  CHECK_ALIVE(VisitForValue(prop->obj()));
 
   HValue* value = NULL;
   HInstruction* instr = NULL;
 
   if (prop->key()->IsPropertyName()) {
     // Named store.
-    VISIT_FOR_VALUE(expr->value());
+    CHECK_ALIVE(VisitForValue(expr->value()));
     value = Pop();
     HValue* object = Pop();
 
@@ -3330,8 +3466,8 @@
 
   } else {
     // Keyed store.
-    VISIT_FOR_VALUE(prop->key());
-    VISIT_FOR_VALUE(expr->value());
+    CHECK_ALIVE(VisitForValue(prop->key()));
+    CHECK_ALIVE(VisitForValue(expr->value()));
     value = Pop();
     HValue* key = Pop();
     HValue* object = Pop();
@@ -3363,8 +3499,7 @@
     AddInstruction(instr);
     if (instr->HasSideEffects()) AddSimulate(ast_id);
   } else {
-    HContext* context = new(zone()) HContext;
-    AddInstruction(context);
+    HValue* context =  environment()->LookupContext();
     HGlobalObject* global_object = new(zone()) HGlobalObject(context);
     AddInstruction(global_object);
     HStoreGlobalGeneric* instr =
@@ -3393,7 +3528,7 @@
   BinaryOperation* operation = expr->binary_operation();
 
   if (var != NULL) {
-    VISIT_FOR_VALUE(operation);
+    CHECK_ALIVE(VisitForValue(operation));
 
     if (var->is_global()) {
       HandleGlobalVariableAssignment(var,
@@ -3410,7 +3545,7 @@
       AddInstruction(instr);
       if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
     } else {
-      BAILOUT("compound assignment to lookup slot");
+      return Bailout("compound assignment to lookup slot");
     }
     ast_context()->ReturnValue(Pop());
 
@@ -3419,7 +3554,7 @@
 
     if (prop->key()->IsPropertyName()) {
       // Named property.
-      VISIT_FOR_VALUE(prop->obj());
+      CHECK_ALIVE(VisitForValue(prop->obj()));
       HValue* obj = Top();
 
       HInstruction* load = NULL;
@@ -3433,7 +3568,7 @@
       PushAndAdd(load);
       if (load->HasSideEffects()) AddSimulate(expr->CompoundLoadId());
 
-      VISIT_FOR_VALUE(expr->value());
+      CHECK_ALIVE(VisitForValue(expr->value()));
       HValue* right = Pop();
       HValue* left = Pop();
 
@@ -3451,8 +3586,8 @@
 
     } else {
       // Keyed property.
-      VISIT_FOR_VALUE(prop->obj());
-      VISIT_FOR_VALUE(prop->key());
+      CHECK_ALIVE(VisitForValue(prop->obj()));
+      CHECK_ALIVE(VisitForValue(prop->key()));
       HValue* obj = environment()->ExpressionStackAt(1);
       HValue* key = environment()->ExpressionStackAt(0);
 
@@ -3460,7 +3595,7 @@
       PushAndAdd(load);
       if (load->HasSideEffects()) AddSimulate(expr->CompoundLoadId());
 
-      VISIT_FOR_VALUE(expr->value());
+      CHECK_ALIVE(VisitForValue(expr->value()));
       HValue* right = Pop();
       HValue* left = Pop();
 
@@ -3479,12 +3614,15 @@
     }
 
   } else {
-    BAILOUT("invalid lhs in compound assignment");
+    return Bailout("invalid lhs in compound assignment");
   }
 }
 
 
 void HGraphBuilder::VisitAssignment(Assignment* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   VariableProxy* proxy = expr->target()->AsVariableProxy();
   Variable* var = proxy->AsVariable();
   Property* prop = expr->target()->AsProperty();
@@ -3496,28 +3634,20 @@
   }
 
   if (var != NULL) {
-    if (proxy->IsArguments()) BAILOUT("assignment to arguments");
+    if (proxy->IsArguments()) return Bailout("assignment to arguments");
 
     // Handle the assignment.
     if (var->IsStackAllocated()) {
-      HValue* value = NULL;
-      // Handle stack-allocated variables on the right-hand side directly.
       // We do not allow the arguments object to occur in a context where it
       // may escape, but assignments to stack-allocated locals are
-      // permitted.  Handling such assignments here bypasses the check for
-      // the arguments object in VisitVariableProxy.
-      Variable* rhs_var = expr->value()->AsVariableProxy()->AsVariable();
-      if (rhs_var != NULL && rhs_var->IsStackAllocated()) {
-        value = environment()->Lookup(rhs_var);
-      } else {
-        VISIT_FOR_VALUE(expr->value());
-        value = Pop();
-      }
+      // permitted.
+      CHECK_ALIVE(VisitForValue(expr->value(), ARGUMENTS_ALLOWED));
+      HValue* value = Pop();
       Bind(var, value);
       ast_context()->ReturnValue(value);
 
     } else if (var->IsContextSlot() && var->mode() != Variable::CONST) {
-      VISIT_FOR_VALUE(expr->value());
+      CHECK_ALIVE(VisitForValue(expr->value()));
       HValue* context = BuildContextChainWalk(var);
       int index = var->AsSlot()->index();
       HStoreContextSlot* instr =
@@ -3527,7 +3657,7 @@
       ast_context()->ReturnValue(Pop());
 
     } else if (var->is_global()) {
-      VISIT_FOR_VALUE(expr->value());
+      CHECK_ALIVE(VisitForValue(expr->value()));
       HandleGlobalVariableAssignment(var,
                                      Top(),
                                      expr->position(),
@@ -3535,23 +3665,26 @@
       ast_context()->ReturnValue(Pop());
 
     } else {
-      BAILOUT("assignment to LOOKUP or const CONTEXT variable");
+      return Bailout("assignment to LOOKUP or const CONTEXT variable");
     }
 
   } else if (prop != NULL) {
     HandlePropertyAssignment(expr);
   } else {
-    BAILOUT("invalid left-hand side in assignment");
+    return Bailout("invalid left-hand side in assignment");
   }
 }
 
 
 void HGraphBuilder::VisitThrow(Throw* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   // We don't optimize functions with invalid left-hand sides in
   // assignments, count operations, or for-in.  Consequently throw can
   // currently only occur in an effect context.
   ASSERT(ast_context()->IsEffect());
-  VISIT_FOR_VALUE(expr->exception());
+  CHECK_ALIVE(VisitForValue(expr->exception()));
 
   HValue* value = environment()->Pop();
   HThrow* instr = new(zone()) HThrow(value);
@@ -3591,8 +3724,7 @@
                                                    Property* expr) {
   ASSERT(expr->key()->IsPropertyName());
   Handle<Object> name = expr->key()->AsLiteral()->handle();
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  HValue* context = environment()->LookupContext();
   return new(zone()) HLoadNamedGeneric(context, obj, name);
 }
 
@@ -3622,8 +3754,7 @@
 
 HInstruction* HGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
                                                    HValue* key) {
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  HValue* context = environment()->LookupContext();
   return new(zone()) HLoadKeyedGeneric(context, object, key);
 }
 
@@ -3700,8 +3831,7 @@
 HInstruction* HGraphBuilder::BuildStoreKeyedGeneric(HValue* object,
                                                     HValue* key,
                                                     HValue* value) {
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  HValue* context = environment()->LookupContext();
   return new(zone()) HStoreKeyedGeneric(
                          context,
                          object,
@@ -3756,6 +3886,11 @@
   HLoadExternalArrayPointer* external_elements =
       new(zone()) HLoadExternalArrayPointer(elements);
   AddInstruction(external_elements);
+  if (expr->external_array_type() == kExternalPixelArray) {
+    HClampToUint8* clamp = new(zone()) HClampToUint8(val);
+    AddInstruction(clamp);
+    val = clamp;
+  }
   return new(zone()) HStoreKeyedSpecializedArrayElement(
       external_elements,
       checked_key,
@@ -3810,7 +3945,7 @@
   } else {
     Push(graph()->GetArgumentsObject());
     VisitForValue(expr->key());
-    if (HasStackOverflow()) return false;
+    if (HasStackOverflow() || current_block() == NULL) return true;
     HValue* key = Pop();
     Drop(1);  // Arguments object.
     HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
@@ -3826,31 +3961,29 @@
 
 
 void HGraphBuilder::VisitProperty(Property* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   expr->RecordTypeFeedback(oracle());
 
   if (TryArgumentsAccess(expr)) return;
-  CHECK_BAILOUT;
 
-  VISIT_FOR_VALUE(expr->obj());
+  CHECK_ALIVE(VisitForValue(expr->obj()));
 
   HInstruction* instr = NULL;
   if (expr->IsArrayLength()) {
     HValue* array = Pop();
     AddInstruction(new(zone()) HCheckNonSmi(array));
-    AddInstruction(new(zone()) HCheckInstanceType(array,
-                                                  JS_ARRAY_TYPE,
-                                                  JS_ARRAY_TYPE));
+    AddInstruction(HCheckInstanceType::NewIsJSArray(array));
     instr = new(zone()) HJSArrayLength(array);
 
   } else if (expr->IsStringLength()) {
     HValue* string = Pop();
     AddInstruction(new(zone()) HCheckNonSmi(string));
-    AddInstruction(new(zone()) HCheckInstanceType(string,
-                                                  FIRST_STRING_TYPE,
-                                                  LAST_STRING_TYPE));
+    AddInstruction(HCheckInstanceType::NewIsString(string));
     instr = new(zone()) HStringLength(string);
   } else if (expr->IsStringAccess()) {
-    VISIT_FOR_VALUE(expr->key());
+    CHECK_ALIVE(VisitForValue(expr->key()));
     HValue* index = Pop();
     HValue* string = Pop();
     HStringCharCodeAt* char_code = BuildStringCharCodeAt(string, index);
@@ -3877,7 +4010,7 @@
     }
 
   } else {
-    VISIT_FOR_VALUE(expr->key());
+    CHECK_ALIVE(VisitForValue(expr->key()));
 
     HValue* key = Pop();
     HValue* obj = Pop();
@@ -3938,10 +4071,11 @@
         PrintF("Trying to inline the polymorphic call to %s\n",
                *name->ToCString());
       }
-      if (!FLAG_polymorphic_inlining || !TryInline(expr)) {
-        // Check for bailout, as trying to inline might fail due to bailout
-        // during hydrogen processing.
-        CHECK_BAILOUT;
+      if (FLAG_polymorphic_inlining && TryInline(expr)) {
+        // Trying to inline will signal that we should bailout from the
+        // entire compilation by setting stack overflow on the visitor.
+        if (HasStackOverflow()) return;
+      } else {
         HCallConstantFunction* call =
             new(zone()) HCallConstantFunction(expr->target(), argument_count);
         call->set_position(expr->position());
@@ -3959,10 +4093,9 @@
   // know about and do not want to handle ones we've never seen.  Otherwise
   // use a generic IC.
   if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
-    current_block()->FinishExitWithDeoptimization();
+    current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
   } else {
-    HContext* context = new(zone()) HContext;
-    AddInstruction(context);
+    HValue* context = environment()->LookupContext();
     HCallNamed* call = new(zone()) HCallNamed(context, name, argument_count);
     call->set_position(expr->position());
     PreProcessCall(call);
@@ -3981,30 +4114,27 @@
   // even without predecessors to the join block, we set it as the exit
   // block and continue by adding instructions there.
   ASSERT(join != NULL);
-  set_current_block(join);
   if (join->HasPredecessor()) {
+    set_current_block(join);
     join->SetJoinId(expr->id());
     if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+  } else {
+    set_current_block(NULL);
   }
 }
 
 
-void HGraphBuilder::TraceInline(Handle<JSFunction> target, const char* reason) {
+void HGraphBuilder::TraceInline(Handle<JSFunction> target,
+                                Handle<JSFunction> caller,
+                                const char* reason) {
   if (FLAG_trace_inlining) {
+    SmartPointer<char> target_name = target->shared()->DebugName()->ToCString();
+    SmartPointer<char> caller_name = caller->shared()->DebugName()->ToCString();
     if (reason == NULL) {
-      // We are currently in the context of inlined function thus we have
-      // to go to an outer FunctionState to get caller.
-      SmartPointer<char> callee = target->shared()->DebugName()->ToCString();
-      SmartPointer<char> caller =
-          function_state()->outer()->compilation_info()->function()->
-              debug_name()->ToCString();
-      PrintF("Inlined %s called from %s.\n", *callee, *caller);
+      PrintF("Inlined %s called from %s.\n", *target_name, *caller_name);
     } else {
-      SmartPointer<char> callee = target->shared()->DebugName()->ToCString();
-      SmartPointer<char> caller =
-          info()->function()->debug_name()->ToCString();
       PrintF("Did not inline %s called from %s (%s).\n",
-             *callee, *caller, reason);
+             *target_name, *caller_name, reason);
     }
   }
 }
@@ -4013,20 +4143,28 @@
 bool HGraphBuilder::TryInline(Call* expr) {
   if (!FLAG_use_inlining) return false;
 
+  // The function call we are inlining is a method call if the call
+  // is a property call.
+  CallKind call_kind = (expr->expression()->AsProperty() == NULL)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+
   // Precondition: call is monomorphic and we have found a target with the
   // appropriate arity.
+  Handle<JSFunction> caller = info()->closure();
   Handle<JSFunction> target = expr->target();
+  Handle<SharedFunctionInfo> target_shared(target->shared());
 
   // Do a quick check on source code length to avoid parsing large
   // inlining candidates.
   if (FLAG_limit_inlining && target->shared()->SourceSize() > kMaxSourceSize) {
-    TraceInline(target, "target text too big");
+    TraceInline(target, caller, "target text too big");
     return false;
   }
 
   // Target must be inlineable.
   if (!target->IsInlineable()) {
-    TraceInline(target, "target not inlineable");
+    TraceInline(target, caller, "target not inlineable");
     return false;
   }
 
@@ -4035,7 +4173,7 @@
   if (target->context() != outer_info->closure()->context() ||
       outer_info->scope()->contains_with() ||
       outer_info->scope()->num_heap_slots() > 0) {
-    TraceInline(target, "target requires context change");
+    TraceInline(target, caller, "target requires context change");
     return false;
   }
 
@@ -4044,7 +4182,7 @@
   int current_level = 1;
   while (env->outer() != NULL) {
     if (current_level == Compiler::kMaxInliningLevels) {
-      TraceInline(target, "inline depth limit reached");
+      TraceInline(target, caller, "inline depth limit reached");
       return false;
     }
     current_level++;
@@ -4052,14 +4190,14 @@
   }
 
   // Don't inline recursive functions.
-  if (target->shared() == outer_info->closure()->shared()) {
-    TraceInline(target, "target is recursive");
+  if (*target_shared == outer_info->closure()->shared()) {
+    TraceInline(target, caller, "target is recursive");
     return false;
   }
 
   // We don't want to add more than a certain number of nodes from inlining.
   if (FLAG_limit_inlining && inlined_count_ > kMaxInlinedNodes) {
-    TraceInline(target, "cumulative AST node limit reached");
+    TraceInline(target, caller, "cumulative AST node limit reached");
     return false;
   }
 
@@ -4072,14 +4210,14 @@
     if (target_info.isolate()->has_pending_exception()) {
       // Parse or scope error, never optimize this function.
       SetStackOverflow();
-      target->shared()->set_optimization_disabled(true);
+      target_shared->DisableOptimization(*target);
     }
-    TraceInline(target, "parse failure");
+    TraceInline(target, caller, "parse failure");
     return false;
   }
 
   if (target_info.scope()->num_heap_slots() > 0) {
-    TraceInline(target, "target has context-allocated variables");
+    TraceInline(target, caller, "target has context-allocated variables");
     return false;
   }
   FunctionLiteral* function = target_info.function();
@@ -4087,32 +4225,31 @@
   // Count the number of AST nodes added by inlining this call.
   int nodes_added = AstNode::Count() - count_before;
   if (FLAG_limit_inlining && nodes_added > kMaxInlinedSize) {
-    TraceInline(target, "target AST is too large");
+    TraceInline(target, caller, "target AST is too large");
     return false;
   }
 
   // Check if we can handle all declarations in the inlined functions.
   VisitDeclarations(target_info.scope()->declarations());
   if (HasStackOverflow()) {
-    TraceInline(target, "target has non-trivial declaration");
+    TraceInline(target, caller, "target has non-trivial declaration");
     ClearStackOverflow();
     return false;
   }
 
   // Don't inline functions that uses the arguments object or that
   // have a mismatching number of parameters.
-  Handle<SharedFunctionInfo> target_shared(target->shared());
   int arity = expr->arguments()->length();
   if (function->scope()->arguments() != NULL ||
       arity != target_shared->formal_parameter_count()) {
-    TraceInline(target, "target requires special argument handling");
+    TraceInline(target, caller, "target requires special argument handling");
     return false;
   }
 
   // All statements in the body must be inlineable.
   for (int i = 0, count = function->body()->length(); i < count; ++i) {
     if (!function->body()->at(i)->IsInlineable()) {
-      TraceInline(target, "target contains unsupported syntax");
+      TraceInline(target, caller, "target contains unsupported syntax");
       return false;
     }
   }
@@ -4124,7 +4261,7 @@
     // generating the optimized inline code.
     target_info.EnableDeoptimizationSupport();
     if (!FullCodeGenerator::MakeCode(&target_info)) {
-      TraceInline(target, "could not generate deoptimization info");
+      TraceInline(target, caller, "could not generate deoptimization info");
       return false;
     }
     target_shared->EnableDeoptimizationSupport(*target_info.code());
@@ -4144,25 +4281,33 @@
 
   HConstant* undefined = graph()->GetConstantUndefined();
   HEnvironment* inner_env =
-      environment()->CopyForInlining(target, function, true, undefined);
+      environment()->CopyForInlining(target,
+                                     function,
+                                     HEnvironment::HYDROGEN,
+                                     undefined,
+                                     call_kind);
   HBasicBlock* body_entry = CreateBasicBlock(inner_env);
   current_block()->Goto(body_entry);
 
   body_entry->SetJoinId(expr->ReturnId());
   set_current_block(body_entry);
-  AddInstruction(new(zone()) HEnterInlined(target, function));
+  AddInstruction(new(zone()) HEnterInlined(target,
+                                           function,
+                                           call_kind));
   VisitStatements(function->body());
   if (HasStackOverflow()) {
     // Bail out if the inline function did, as we cannot residualize a call
     // instead.
-    TraceInline(target, "inline graph construction failed");
-    return false;
+    TraceInline(target, caller, "inline graph construction failed");
+    target_shared->DisableOptimization(*target);
+    inline_bailout_ = true;
+    return true;
   }
 
   // Update inlined nodes count.
   inlined_count_ += nodes_added;
 
-  TraceInline(target, NULL);
+  TraceInline(target, caller, NULL);
 
   if (current_block() != NULL) {
     // Add a return of undefined if control can fall off the body.  In a
@@ -4195,26 +4340,32 @@
   if (inlined_test_context() != NULL) {
     HBasicBlock* if_true = inlined_test_context()->if_true();
     HBasicBlock* if_false = inlined_test_context()->if_false();
-    if_true->SetJoinId(expr->id());
-    if_false->SetJoinId(expr->id());
     ASSERT(ast_context() == inlined_test_context());
     // Pop the return test context from the expression context stack.
     ClearInlinedTestContext();
 
     // Forward to the real test context.
-    HBasicBlock* true_target = TestContext::cast(ast_context())->if_true();
-    HBasicBlock* false_target = TestContext::cast(ast_context())->if_false();
-    if_true->Goto(true_target, false);
-    if_false->Goto(false_target, false);
+    if (if_true->HasPredecessor()) {
+      if_true->SetJoinId(expr->id());
+      HBasicBlock* true_target = TestContext::cast(ast_context())->if_true();
+      if_true->Goto(true_target, false);
+    }
+    if (if_false->HasPredecessor()) {
+      if_false->SetJoinId(expr->id());
+      HBasicBlock* false_target = TestContext::cast(ast_context())->if_false();
+      if_false->Goto(false_target, false);
+    }
 
     // TODO(kmillikin): Come up with a better way to handle this. It is too
     // subtle. NULL here indicates that the enclosing context has no control
     // flow to handle.
     set_current_block(NULL);
 
-  } else {
+  } else if (function_return()->HasPredecessor()) {
     function_return()->SetJoinId(expr->id());
     set_current_block(function_return());
+  } else {
+    set_current_block(NULL);
   }
 
   return true;
@@ -4351,14 +4502,16 @@
 
   // Found pattern f.apply(receiver, arguments).
   VisitForValue(prop->obj());
-  if (HasStackOverflow()) return false;
-  HValue* function = Pop();
+  if (HasStackOverflow() || current_block() == NULL) return true;
+  HValue* function = Top();
+  AddCheckConstantFunction(expr, function, function_map, true);
+  Drop(1);
+
   VisitForValue(args->at(0));
-  if (HasStackOverflow()) return false;
+  if (HasStackOverflow() || current_block() == NULL) return true;
   HValue* receiver = Pop();
   HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
   HInstruction* length = AddInstruction(new(zone()) HArgumentsLength(elements));
-  AddCheckConstantFunction(expr, function, function_map, true);
   HInstruction* result =
       new(zone()) HApplyArguments(function, receiver, length, elements);
   result->set_position(expr->position());
@@ -4368,6 +4521,9 @@
 
 
 void HGraphBuilder::VisitCall(Call* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   Expression* callee = expr->expression();
   int argument_count = expr->arguments()->length() + 1;  // Plus receiver.
   HInstruction* call = NULL;
@@ -4376,20 +4532,18 @@
   if (prop != NULL) {
     if (!prop->key()->IsPropertyName()) {
       // Keyed function call.
-      VISIT_FOR_VALUE(prop->obj());
+      CHECK_ALIVE(VisitForValue(prop->obj()));
 
-      VISIT_FOR_VALUE(prop->key());
+      CHECK_ALIVE(VisitForValue(prop->key()));
       // Push receiver and key like the non-optimized code generator expects it.
       HValue* key = Pop();
       HValue* receiver = Pop();
       Push(key);
       Push(receiver);
 
-      VisitExpressions(expr->arguments());
-      CHECK_BAILOUT;
+      CHECK_ALIVE(VisitExpressions(expr->arguments()));
 
-      HContext* context = new(zone()) HContext;
-      AddInstruction(context);
+      HValue* context = environment()->LookupContext();
       call = PreProcessCall(
           new(zone()) HCallKeyed(context, key, argument_count));
       call->set_position(expr->position());
@@ -4399,18 +4553,15 @@
     }
 
     // Named function call.
-    expr->RecordTypeFeedback(oracle());
+    expr->RecordTypeFeedback(oracle(), CALL_AS_METHOD);
 
     if (TryCallApply(expr)) return;
-    CHECK_BAILOUT;
 
-    VISIT_FOR_VALUE(prop->obj());
-    VisitExpressions(expr->arguments());
-    CHECK_BAILOUT;
+    CHECK_ALIVE(VisitForValue(prop->obj()));
+    CHECK_ALIVE(VisitExpressions(expr->arguments()));
 
     Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
 
-    expr->RecordTypeFeedback(oracle());
     ZoneMapList* types = expr->GetReceiverTypes();
 
     HValue* receiver =
@@ -4430,23 +4581,16 @@
         // When the target has a custom call IC generator, use the IC,
         // because it is likely to generate better code.  Also use the IC
         // when a primitive receiver check is required.
-        HContext* context = new(zone()) HContext;
-        AddInstruction(context);
+        HValue* context = environment()->LookupContext();
         call = PreProcessCall(
             new(zone()) HCallNamed(context, name, argument_count));
       } else {
         AddCheckConstantFunction(expr, receiver, receiver_map, true);
 
-        if (TryInline(expr)) {
-          return;
-        } else {
-          // Check for bailout, as the TryInline call in the if condition above
-          // might return false due to bailout during hydrogen processing.
-          CHECK_BAILOUT;
-          call = PreProcessCall(
-              new(zone()) HCallConstantFunction(expr->target(),
-                                                argument_count));
-        }
+        if (TryInline(expr)) return;
+        call = PreProcessCall(
+            new(zone()) HCallConstantFunction(expr->target(),
+                                              argument_count));
       }
     } else if (types != NULL && types->length() > 1) {
       ASSERT(expr->check_type() == RECEIVER_MAP_CHECK);
@@ -4454,8 +4598,7 @@
       return;
 
     } else {
-      HContext* context = new(zone()) HContext;
-      AddInstruction(context);
+      HValue* context = environment()->LookupContext();
       call = PreProcessCall(
           new(zone()) HCallNamed(context, name, argument_count));
     }
@@ -4466,7 +4609,7 @@
 
     if (!global_call) {
       ++argument_count;
-      VISIT_FOR_VALUE(expr->expression());
+      CHECK_ALIVE(VisitForValue(expr->expression()));
     }
 
     if (global_call) {
@@ -4484,14 +4627,12 @@
       if (known_global_function) {
         // Push the global object instead of the global receiver because
         // code generated by the full code generator expects it.
-        HContext* context = new(zone()) HContext;
+        HValue* context = environment()->LookupContext();
         HGlobalObject* global_object = new(zone()) HGlobalObject(context);
-        AddInstruction(context);
         PushAndAdd(global_object);
-        VisitExpressions(expr->arguments());
-        CHECK_BAILOUT;
+        CHECK_ALIVE(VisitExpressions(expr->arguments()));
 
-        VISIT_FOR_VALUE(expr->expression());
+        CHECK_ALIVE(VisitForValue(expr->expression()));
         HValue* function = Pop();
         AddInstruction(new(zone()) HCheckFunction(function, expr->target()));
 
@@ -4505,35 +4646,25 @@
                IsGlobalObject());
         environment()->SetExpressionStackAt(receiver_index, global_receiver);
 
-        if (TryInline(expr)) {
-          return;
-        }
-        // Check for bailout, as trying to inline might fail due to bailout
-        // during hydrogen processing.
-        CHECK_BAILOUT;
-
+        if (TryInline(expr)) return;
         call = PreProcessCall(new(zone()) HCallKnownGlobal(expr->target(),
-                                                   argument_count));
+                                                           argument_count));
       } else {
-        HContext* context = new(zone()) HContext;
-        AddInstruction(context);
+        HValue* context = environment()->LookupContext();
         PushAndAdd(new(zone()) HGlobalObject(context));
-        VisitExpressions(expr->arguments());
-        CHECK_BAILOUT;
+        CHECK_ALIVE(VisitExpressions(expr->arguments()));
 
         call = PreProcessCall(new(zone()) HCallGlobal(context,
-                                              var->name(),
-                                              argument_count));
+                                                      var->name(),
+                                                      argument_count));
       }
 
     } else {
-      HContext* context = new(zone()) HContext;
+      HValue* context = environment()->LookupContext();
       HGlobalObject* global_object = new(zone()) HGlobalObject(context);
-      AddInstruction(context);
       AddInstruction(global_object);
       PushAndAdd(new(zone()) HGlobalReceiver(global_object));
-      VisitExpressions(expr->arguments());
-      CHECK_BAILOUT;
+      CHECK_ALIVE(VisitExpressions(expr->arguments()));
 
       call = PreProcessCall(new(zone()) HCallFunction(context, argument_count));
     }
@@ -4545,14 +4676,15 @@
 
 
 void HGraphBuilder::VisitCallNew(CallNew* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   // The constructor function is also used as the receiver argument to the
   // JS construct call builtin.
-  VISIT_FOR_VALUE(expr->expression());
-  VisitExpressions(expr->arguments());
-  CHECK_BAILOUT;
+  CHECK_ALIVE(VisitForValue(expr->expression()));
+  CHECK_ALIVE(VisitExpressions(expr->arguments()));
 
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  HValue* context = environment()->LookupContext();
 
   // The constructor is both an operand to the instruction and an argument
   // to the construct call.
@@ -4581,8 +4713,11 @@
 
 
 void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   if (expr->is_jsruntime()) {
-    BAILOUT("call to a JavaScript runtime function");
+    return Bailout("call to a JavaScript runtime function");
   }
 
   const Runtime::Function* function = expr->function();
@@ -4602,8 +4737,7 @@
     (this->*generator)(expr);
   } else {
     ASSERT(function->intrinsic_type == Runtime::RUNTIME);
-    VisitArgumentList(expr->arguments());
-    CHECK_BAILOUT;
+    CHECK_ALIVE(VisitArgumentList(expr->arguments()));
 
     Handle<String> name = expr->name();
     int argument_count = expr->arguments()->length();
@@ -4617,128 +4751,203 @@
 
 
 void HGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
-  Token::Value op = expr->op();
-  if (op == Token::VOID) {
-    VISIT_FOR_EFFECT(expr->expression());
-    ast_context()->ReturnValue(graph()->GetConstantUndefined());
-  } else if (op == Token::DELETE) {
-    Property* prop = expr->expression()->AsProperty();
-    Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
-    if (prop == NULL && var == NULL) {
-      // Result of deleting non-property, non-variable reference is true.
-      // Evaluate the subexpression for side effects.
-      VISIT_FOR_EFFECT(expr->expression());
-      ast_context()->ReturnValue(graph()->GetConstantTrue());
-    } else if (var != NULL &&
-               !var->is_global() &&
-               var->AsSlot() != NULL &&
-               var->AsSlot()->type() != Slot::LOOKUP) {
-      // Result of deleting non-global, non-dynamic variables is false.
-      // The subexpression does not have side effects.
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  switch (expr->op()) {
+    case Token::DELETE: return VisitDelete(expr);
+    case Token::VOID: return VisitVoid(expr);
+    case Token::TYPEOF: return VisitTypeof(expr);
+    case Token::ADD: return VisitAdd(expr);
+    case Token::SUB: return VisitSub(expr);
+    case Token::BIT_NOT: return VisitBitNot(expr);
+    case Token::NOT: return VisitNot(expr);
+    default: UNREACHABLE();
+  }
+}
+
+void HGraphBuilder::VisitDelete(UnaryOperation* expr) {
+  Property* prop = expr->expression()->AsProperty();
+  Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+  if (prop == NULL && var == NULL) {
+    // Result of deleting non-property, non-variable reference is true.
+    // Evaluate the subexpression for side effects.
+    CHECK_ALIVE(VisitForEffect(expr->expression()));
+    ast_context()->ReturnValue(graph()->GetConstantTrue());
+  } else if (var != NULL &&
+             !var->is_global() &&
+             var->AsSlot() != NULL &&
+             var->AsSlot()->type() != Slot::LOOKUP) {
+    // Result of deleting non-global, non-dynamic variables is false.
+    // The subexpression does not have side effects.
+    ast_context()->ReturnValue(graph()->GetConstantFalse());
+  } else if (prop != NULL) {
+    if (prop->is_synthetic()) {
+      // Result of deleting parameters is false, even when they rewrite
+      // to accesses on the arguments object.
       ast_context()->ReturnValue(graph()->GetConstantFalse());
-    } else if (prop != NULL) {
-      if (prop->is_synthetic()) {
-        // Result of deleting parameters is false, even when they rewrite
-        // to accesses on the arguments object.
-        ast_context()->ReturnValue(graph()->GetConstantFalse());
-      } else {
-        VISIT_FOR_VALUE(prop->obj());
-        VISIT_FOR_VALUE(prop->key());
-        HValue* key = Pop();
-        HValue* obj = Pop();
-        HDeleteProperty* instr = new(zone()) HDeleteProperty(obj, key);
-        ast_context()->ReturnInstruction(instr, expr->id());
-      }
-    } else if (var->is_global()) {
-      BAILOUT("delete with global variable");
     } else {
-      BAILOUT("delete with non-global variable");
+      CHECK_ALIVE(VisitForValue(prop->obj()));
+      CHECK_ALIVE(VisitForValue(prop->key()));
+      HValue* key = Pop();
+      HValue* obj = Pop();
+      HDeleteProperty* instr = new(zone()) HDeleteProperty(obj, key);
+      ast_context()->ReturnInstruction(instr, expr->id());
     }
-  } else if (op == Token::NOT) {
-    if (ast_context()->IsTest()) {
-      TestContext* context = TestContext::cast(ast_context());
-      VisitForControl(expr->expression(),
-                      context->if_false(),
-                      context->if_true());
-    } else if (ast_context()->IsValue()) {
-      HBasicBlock* materialize_false = graph()->CreateBasicBlock();
-      HBasicBlock* materialize_true = graph()->CreateBasicBlock();
-      VISIT_FOR_CONTROL(expr->expression(),
-                        materialize_false,
-                        materialize_true);
-      materialize_false->SetJoinId(expr->expression()->id());
-      materialize_true->SetJoinId(expr->expression()->id());
-
-      set_current_block(materialize_false);
-      Push(graph()->GetConstantFalse());
-      set_current_block(materialize_true);
-      Push(graph()->GetConstantTrue());
-
-      HBasicBlock* join =
-          CreateJoin(materialize_false, materialize_true, expr->id());
-      set_current_block(join);
-      ast_context()->ReturnValue(Pop());
-    } else {
-      ASSERT(ast_context()->IsEffect());
-      VisitForEffect(expr->expression());
-    }
-
-  } else if (op == Token::TYPEOF) {
-    VisitForTypeOf(expr->expression());
-    if (HasStackOverflow()) return;
-    HValue* value = Pop();
-    ast_context()->ReturnInstruction(new(zone()) HTypeof(value), expr->id());
-
+  } else if (var->is_global()) {
+    Bailout("delete with global variable");
   } else {
-    VISIT_FOR_VALUE(expr->expression());
-    HValue* value = Pop();
-    HInstruction* instr = NULL;
-    switch (op) {
-      case Token::BIT_NOT:
-        instr = new(zone()) HBitNot(value);
-        break;
-      case Token::SUB:
-        instr = new(zone()) HMul(value, graph_->GetConstantMinus1());
-        break;
-      case Token::ADD:
-        instr = new(zone()) HMul(value, graph_->GetConstant1());
-        break;
-      default:
-        BAILOUT("Value: unsupported unary operation");
-        break;
-    }
-    ast_context()->ReturnInstruction(instr, expr->id());
+    Bailout("delete with non-global variable");
   }
 }
 
 
-HInstruction* HGraphBuilder::BuildIncrement(HValue* value, bool increment) {
-  HConstant* delta = increment
+void HGraphBuilder::VisitVoid(UnaryOperation* expr) {
+  CHECK_ALIVE(VisitForEffect(expr->expression()));
+  ast_context()->ReturnValue(graph()->GetConstantUndefined());
+}
+
+
+void HGraphBuilder::VisitTypeof(UnaryOperation* expr) {
+  CHECK_ALIVE(VisitForTypeOf(expr->expression()));
+  HValue* value = Pop();
+  ast_context()->ReturnInstruction(new(zone()) HTypeof(value), expr->id());
+}
+
+
+void HGraphBuilder::VisitAdd(UnaryOperation* expr) {
+  CHECK_ALIVE(VisitForValue(expr->expression()));
+  HValue* value = Pop();
+  HInstruction* instr = new(zone()) HMul(value, graph_->GetConstant1());
+  ast_context()->ReturnInstruction(instr, expr->id());
+}
+
+
+void HGraphBuilder::VisitSub(UnaryOperation* expr) {
+  CHECK_ALIVE(VisitForValue(expr->expression()));
+  HValue* value = Pop();
+  HInstruction* instr = new(zone()) HMul(value, graph_->GetConstantMinus1());
+  TypeInfo info = oracle()->UnaryType(expr);
+  Representation rep = ToRepresentation(info);
+  TraceRepresentation(expr->op(), info, instr, rep);
+  instr->AssumeRepresentation(rep);
+  ast_context()->ReturnInstruction(instr, expr->id());
+}
+
+
+void HGraphBuilder::VisitBitNot(UnaryOperation* expr) {
+  CHECK_ALIVE(VisitForValue(expr->expression()));
+  HValue* value = Pop();
+  HInstruction* instr = new(zone()) HBitNot(value);
+  ast_context()->ReturnInstruction(instr, expr->id());
+}
+
+
+void HGraphBuilder::VisitNot(UnaryOperation* expr) {
+  // TODO(svenpanne) Perhaps a switch/virtual function is nicer here.
+  if (ast_context()->IsTest()) {
+    TestContext* context = TestContext::cast(ast_context());
+    VisitForControl(expr->expression(),
+                    context->if_false(),
+                    context->if_true());
+    return;
+  }
+
+  if (ast_context()->IsEffect()) {
+    VisitForEffect(expr->expression());
+    return;
+  }
+
+  ASSERT(ast_context()->IsValue());
+  HBasicBlock* materialize_false = graph()->CreateBasicBlock();
+  HBasicBlock* materialize_true = graph()->CreateBasicBlock();
+  CHECK_BAILOUT(VisitForControl(expr->expression(),
+                                materialize_false,
+                                materialize_true));
+
+  if (materialize_false->HasPredecessor()) {
+    materialize_false->SetJoinId(expr->expression()->id());
+    set_current_block(materialize_false);
+    Push(graph()->GetConstantFalse());
+  } else {
+    materialize_false = NULL;
+  }
+
+  if (materialize_true->HasPredecessor()) {
+    materialize_true->SetJoinId(expr->expression()->id());
+    set_current_block(materialize_true);
+    Push(graph()->GetConstantTrue());
+  } else {
+    materialize_true = NULL;
+  }
+
+  HBasicBlock* join =
+    CreateJoin(materialize_false, materialize_true, expr->id());
+  set_current_block(join);
+  if (join != NULL) ast_context()->ReturnValue(Pop());
+}
+
+
+HInstruction* HGraphBuilder::BuildIncrement(bool returns_original_input,
+                                            CountOperation* expr) {
+  // The input to the count operation is on top of the expression stack.
+  TypeInfo info = oracle()->IncrementType(expr);
+  Representation rep = ToRepresentation(info);
+  if (rep.IsTagged()) {
+    rep = Representation::Integer32();
+  }
+
+  if (returns_original_input) {
+    // We need an explicit HValue representing ToNumber(input).  The
+    // actual HChange instruction we need is (sometimes) added in a later
+    // phase, so it is not available now to be used as an input to HAdd and
+    // as the return value.
+    HInstruction* number_input = new(zone()) HForceRepresentation(Pop(), rep);
+    AddInstruction(number_input);
+    Push(number_input);
+  }
+
+  // The addition has no side effects, so we do not need
+  // to simulate the expression stack after this instruction.
+  // Any later failures deopt to the load of the input or earlier.
+  HConstant* delta = (expr->op() == Token::INC)
       ? graph_->GetConstant1()
       : graph_->GetConstantMinus1();
-  HInstruction* instr = new(zone()) HAdd(value, delta);
-  AssumeRepresentation(instr,  Representation::Integer32());
+  HInstruction* instr = new(zone()) HAdd(Top(), delta);
+  TraceRepresentation(expr->op(), info, instr, rep);
+  instr->AssumeRepresentation(rep);
+  AddInstruction(instr);
   return instr;
 }
 
 
 void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   Expression* target = expr->expression();
   VariableProxy* proxy = target->AsVariableProxy();
   Variable* var = proxy->AsVariable();
   Property* prop = target->AsProperty();
-  ASSERT(var == NULL || prop == NULL);
-  bool inc = expr->op() == Token::INC;
+  if (var == NULL && prop == NULL) {
+    return Bailout("invalid lhs in count operation");
+  }
+
+  // Match the full code generator stack by simulating an extra stack
+  // element for postfix operations in a non-effect context.  The return
+  // value is ToNumber(input).
+  bool returns_original_input =
+      expr->is_postfix() && !ast_context()->IsEffect();
+  HValue* input = NULL;  // ToNumber(original_input).
+  HValue* after = NULL;  // The result after incrementing or decrementing.
 
   if (var != NULL) {
-    VISIT_FOR_VALUE(target);
+    // Argument of the count operation is a variable, not a property.
+    ASSERT(prop == NULL);
+    CHECK_ALIVE(VisitForValue(target));
 
-    // Match the full code generator stack by simulating an extra stack
-    // element for postfix operations in a non-effect context.
-    bool has_extra = expr->is_postfix() && !ast_context()->IsEffect();
-    HValue* before = has_extra ? Top() : Pop();
-    HInstruction* after = BuildIncrement(before, inc);
-    AddInstruction(after);
+    after = BuildIncrement(returns_original_input, expr);
+    input = returns_original_input ? Top() : Pop();
     Push(after);
 
     if (var->is_global()) {
@@ -4756,23 +4965,19 @@
       AddInstruction(instr);
       if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
     } else {
-      BAILOUT("lookup variable in count operation");
+      return Bailout("lookup variable in count operation");
     }
-    Drop(has_extra ? 2 : 1);
-    ast_context()->ReturnValue(expr->is_postfix() ? before : after);
 
-  } else if (prop != NULL) {
+  } else {
+    // Argument of the count operation is a property.
+    ASSERT(prop != NULL);
     prop->RecordTypeFeedback(oracle());
 
     if (prop->key()->IsPropertyName()) {
       // Named property.
+      if (returns_original_input) Push(graph_->GetConstantUndefined());
 
-      // Match the full code generator stack by simulating an extra stack
-      // element for postfix operations in a non-effect context.
-      bool has_extra = expr->is_postfix() && !ast_context()->IsEffect();
-      if (has_extra) Push(graph_->GetConstantUndefined());
-
-      VISIT_FOR_VALUE(prop->obj());
+      CHECK_ALIVE(VisitForValue(prop->obj()));
       HValue* obj = Top();
 
       HInstruction* load = NULL;
@@ -4786,11 +4991,8 @@
       PushAndAdd(load);
       if (load->HasSideEffects()) AddSimulate(expr->CountId());
 
-      HValue* before = Pop();
-      // There is no deoptimization to after the increment, so we don't need
-      // to simulate the expression stack after this instruction.
-      HInstruction* after = BuildIncrement(before, inc);
-      AddInstruction(after);
+      after = BuildIncrement(returns_original_input, expr);
+      input = Pop();
 
       HInstruction* store = BuildStoreNamed(obj, after, prop);
       AddInstruction(store);
@@ -4799,22 +5001,15 @@
       // of the operation, and the placeholder with the original value if
       // necessary.
       environment()->SetExpressionStackAt(0, after);
-      if (has_extra) environment()->SetExpressionStackAt(1, before);
+      if (returns_original_input) environment()->SetExpressionStackAt(1, input);
       if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
-      Drop(has_extra ? 2 : 1);
-
-      ast_context()->ReturnValue(expr->is_postfix() ? before : after);
 
     } else {
       // Keyed property.
+      if (returns_original_input) Push(graph_->GetConstantUndefined());
 
-      // Match the full code generator stack by simulate an extra stack element
-      // for postfix operations in a non-effect context.
-      bool has_extra = expr->is_postfix() && !ast_context()->IsEffect();
-      if (has_extra) Push(graph_->GetConstantUndefined());
-
-      VISIT_FOR_VALUE(prop->obj());
-      VISIT_FOR_VALUE(prop->key());
+      CHECK_ALIVE(VisitForValue(prop->obj()));
+      CHECK_ALIVE(VisitForValue(prop->key()));
       HValue* obj = environment()->ExpressionStackAt(1);
       HValue* key = environment()->ExpressionStackAt(0);
 
@@ -4822,11 +5017,8 @@
       PushAndAdd(load);
       if (load->HasSideEffects()) AddSimulate(expr->CountId());
 
-      HValue* before = Pop();
-      // There is no deoptimization to after the increment, so we don't need
-      // to simulate the expression stack after this instruction.
-      HInstruction* after = BuildIncrement(before, inc);
-      AddInstruction(after);
+      after = BuildIncrement(returns_original_input, expr);
+      input = Pop();
 
       expr->RecordTypeFeedback(oracle());
       HInstruction* store = BuildStoreKeyed(obj, key, after, expr);
@@ -4837,24 +5029,32 @@
       // original value if necessary.
       Drop(1);
       environment()->SetExpressionStackAt(0, after);
-      if (has_extra) environment()->SetExpressionStackAt(1, before);
+      if (returns_original_input) environment()->SetExpressionStackAt(1, input);
       if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
-      Drop(has_extra ? 2 : 1);
-
-      ast_context()->ReturnValue(expr->is_postfix() ? before : after);
     }
-
-  } else {
-    BAILOUT("invalid lhs in count operation");
   }
+
+  Drop(returns_original_input ? 2 : 1);
+  ast_context()->ReturnValue(expr->is_postfix() ? input : after);
+}
+
+
+HCompareSymbolEq* HGraphBuilder::BuildSymbolCompare(HValue* left,
+                                                    HValue* right,
+                                                    Token::Value op) {
+  ASSERT(op == Token::EQ || op == Token::EQ_STRICT);
+  AddInstruction(new(zone()) HCheckNonSmi(left));
+  AddInstruction(HCheckInstanceType::NewIsSymbol(left));
+  AddInstruction(new(zone()) HCheckNonSmi(right));
+  AddInstruction(HCheckInstanceType::NewIsSymbol(right));
+  return new(zone()) HCompareSymbolEq(left, right, op);
 }
 
 
 HStringCharCodeAt* HGraphBuilder::BuildStringCharCodeAt(HValue* string,
                                                         HValue* index) {
   AddInstruction(new(zone()) HCheckNonSmi(string));
-  AddInstruction(new(zone()) HCheckInstanceType(
-      string, FIRST_STRING_TYPE, LAST_STRING_TYPE));
+  AddInstruction(HCheckInstanceType::NewIsString(string));
   HStringLength* length = new(zone()) HStringLength(string);
   AddInstruction(length);
   HInstruction* checked_index =
@@ -4866,45 +5066,8 @@
 HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
                                                   HValue* left,
                                                   HValue* right) {
-  HInstruction* instr = NULL;
-  switch (expr->op()) {
-    case Token::ADD:
-      instr = new(zone()) HAdd(left, right);
-      break;
-    case Token::SUB:
-      instr = new(zone()) HSub(left, right);
-      break;
-    case Token::MUL:
-      instr = new(zone()) HMul(left, right);
-      break;
-    case Token::MOD:
-      instr = new(zone()) HMod(left, right);
-      break;
-    case Token::DIV:
-      instr = new(zone()) HDiv(left, right);
-      break;
-    case Token::BIT_XOR:
-      instr = new(zone()) HBitXor(left, right);
-      break;
-    case Token::BIT_AND:
-      instr = new(zone()) HBitAnd(left, right);
-      break;
-    case Token::BIT_OR:
-      instr = new(zone()) HBitOr(left, right);
-      break;
-    case Token::SAR:
-      instr = new(zone()) HSar(left, right);
-      break;
-    case Token::SHR:
-      instr = new(zone()) HShr(left, right);
-      break;
-    case Token::SHL:
-      instr = new(zone()) HShl(left, right);
-      break;
-    default:
-      UNREACHABLE();
-  }
   TypeInfo info = oracle()->BinaryType(expr);
+  HInstruction* instr = BuildBinaryOperation(expr->op(), left, right, info);
   // If we hit an uninitialized binary op stub we will get type info
   // for a smi operation. If one of the operands is a constant string
   // do not generate code assuming it is a smi operation.
@@ -4913,19 +5076,47 @@
        (right->IsConstant() && HConstant::cast(right)->HasStringValue()))) {
     return instr;
   }
-  if (FLAG_trace_representation) {
-    PrintF("Info: %s/%s\n", info.ToString(), ToRepresentation(info).Mnemonic());
-  }
   Representation rep = ToRepresentation(info);
   // We only generate either int32 or generic tagged bitwise operations.
   if (instr->IsBitwiseBinaryOperation() && rep.IsDouble()) {
     rep = Representation::Integer32();
   }
-  AssumeRepresentation(instr, rep);
+  TraceRepresentation(expr->op(), info, instr, rep);
+  instr->AssumeRepresentation(rep);
   return instr;
 }
 
 
+HInstruction* HGraphBuilder::BuildBinaryOperation(
+    Token::Value op, HValue* left, HValue* right, TypeInfo info) {
+  switch (op) {
+    case Token::ADD:
+      if (info.IsString()) {
+        AddInstruction(new(zone()) HCheckNonSmi(left));
+        AddInstruction(HCheckInstanceType::NewIsString(left));
+        AddInstruction(new(zone()) HCheckNonSmi(right));
+        AddInstruction(HCheckInstanceType::NewIsString(right));
+        return new(zone()) HStringAdd(left, right);
+      } else {
+        return new(zone()) HAdd(left, right);
+      }
+    case Token::SUB: return new(zone()) HSub(left, right);
+    case Token::MUL: return new(zone()) HMul(left, right);
+    case Token::MOD: return new(zone()) HMod(left, right);
+    case Token::DIV: return new(zone()) HDiv(left, right);
+    case Token::BIT_XOR: return new(zone()) HBitXor(left, right);
+    case Token::BIT_AND: return new(zone()) HBitAnd(left, right);
+    case Token::BIT_OR: return new(zone()) HBitOr(left, right);
+    case Token::SAR: return new(zone()) HSar(left, right);
+    case Token::SHR: return new(zone()) HShr(left, right);
+    case Token::SHL: return new(zone()) HShl(left, right);
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
 // Check for the form (%_ClassOf(foo) === 'BarClass').
 static bool IsClassOfTest(CompareOperation* expr) {
   if (expr->op() != Token::EQ_STRICT) return false;
@@ -4941,113 +5132,144 @@
 
 
 void HGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
-  if (expr->op() == Token::COMMA) {
-    VISIT_FOR_EFFECT(expr->left());
-    // Visit the right subexpression in the same AST context as the entire
-    // expression.
-    Visit(expr->right());
-
-  } else if (expr->op() == Token::AND || expr->op() == Token::OR) {
-    bool is_logical_and = (expr->op() == Token::AND);
-    if (ast_context()->IsTest()) {
-      TestContext* context = TestContext::cast(ast_context());
-      // Translate left subexpression.
-      HBasicBlock* eval_right = graph()->CreateBasicBlock();
-      if (is_logical_and) {
-        VISIT_FOR_CONTROL(expr->left(), eval_right, context->if_false());
-      } else {
-        VISIT_FOR_CONTROL(expr->left(), context->if_true(), eval_right);
-      }
-      eval_right->SetJoinId(expr->RightId());
-
-      // Translate right subexpression by visiting it in the same AST
-      // context as the entire expression.
-      set_current_block(eval_right);
-      Visit(expr->right());
-
-    } else if (ast_context()->IsValue()) {
-      VISIT_FOR_VALUE(expr->left());
-      ASSERT(current_block() != NULL);
-
-      // We need an extra block to maintain edge-split form.
-      HBasicBlock* empty_block = graph()->CreateBasicBlock();
-      HBasicBlock* eval_right = graph()->CreateBasicBlock();
-      HTest* test = is_logical_and
-          ? new(zone()) HTest(Top(), eval_right, empty_block)
-          : new(zone()) HTest(Top(), empty_block, eval_right);
-      current_block()->Finish(test);
-
-      set_current_block(eval_right);
-      Drop(1);  // Value of the left subexpression.
-      VISIT_FOR_VALUE(expr->right());
-
-      HBasicBlock* join_block =
-          CreateJoin(empty_block, current_block(), expr->id());
-      set_current_block(join_block);
-      ast_context()->ReturnValue(Pop());
-
-    } else {
-      ASSERT(ast_context()->IsEffect());
-      // In an effect context, we don't need the value of the left
-      // subexpression, only its control flow and side effects.  We need an
-      // extra block to maintain edge-split form.
-      HBasicBlock* empty_block = graph()->CreateBasicBlock();
-      HBasicBlock* right_block = graph()->CreateBasicBlock();
-      HBasicBlock* join_block = graph()->CreateBasicBlock();
-      if (is_logical_and) {
-        VISIT_FOR_CONTROL(expr->left(), right_block, empty_block);
-      } else {
-        VISIT_FOR_CONTROL(expr->left(), empty_block, right_block);
-      }
-      // TODO(kmillikin): Find a way to fix this.  It's ugly that there are
-      // actually two empty blocks (one here and one inserted by
-      // TestContext::BuildBranch, and that they both have an HSimulate
-      // though the second one is not a merge node, and that we really have
-      // no good AST ID to put on that first HSimulate.
-      empty_block->SetJoinId(expr->id());
-      right_block->SetJoinId(expr->RightId());
-      set_current_block(right_block);
-      VISIT_FOR_EFFECT(expr->right());
-
-      empty_block->Goto(join_block);
-      current_block()->Goto(join_block);
-      join_block->SetJoinId(expr->id());
-      set_current_block(join_block);
-      // We did not materialize any value in the predecessor environments,
-      // so there is no need to handle it here.
-    }
-
-  } else {
-    VISIT_FOR_VALUE(expr->left());
-    VISIT_FOR_VALUE(expr->right());
-
-    HValue* right = Pop();
-    HValue* left = Pop();
-    HInstruction* instr = BuildBinaryOperation(expr, left, right);
-    instr->set_position(expr->position());
-    ast_context()->ReturnInstruction(instr, expr->id());
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  switch (expr->op()) {
+    case Token::COMMA: return VisitComma(expr);
+    case Token::OR: return VisitAndOr(expr, false);
+    case Token::AND: return VisitAndOr(expr, true);
+    default: return VisitCommon(expr);
   }
 }
 
 
-void HGraphBuilder::AssumeRepresentation(HValue* value, Representation r) {
-  if (value->CheckFlag(HValue::kFlexibleRepresentation)) {
-    if (FLAG_trace_representation) {
-      PrintF("Assume representation for %s to be %s (%d)\n",
-             value->Mnemonic(),
-             r.Mnemonic(),
-             graph_->GetMaximumValueID());
+void HGraphBuilder::VisitComma(BinaryOperation* expr) {
+  CHECK_ALIVE(VisitForEffect(expr->left()));
+  // Visit the right subexpression in the same AST context as the entire
+  // expression.
+  Visit(expr->right());
+}
+
+
+void HGraphBuilder::VisitAndOr(BinaryOperation* expr, bool is_logical_and) {
+  if (ast_context()->IsTest()) {
+    TestContext* context = TestContext::cast(ast_context());
+    // Translate left subexpression.
+    HBasicBlock* eval_right = graph()->CreateBasicBlock();
+    if (is_logical_and) {
+      CHECK_BAILOUT(VisitForControl(expr->left(),
+                                    eval_right,
+                                    context->if_false()));
+    } else {
+      CHECK_BAILOUT(VisitForControl(expr->left(),
+                                    context->if_true(),
+                                    eval_right));
     }
-    value->ChangeRepresentation(r);
-    // The representation of the value is dictated by type feedback and
-    // will not be changed later.
-    value->ClearFlag(HValue::kFlexibleRepresentation);
-  } else if (FLAG_trace_representation) {
-    PrintF("No representation assumed\n");
+
+    // Translate right subexpression by visiting it in the same AST
+    // context as the entire expression.
+    if (eval_right->HasPredecessor()) {
+      eval_right->SetJoinId(expr->RightId());
+      set_current_block(eval_right);
+      Visit(expr->right());
+    }
+
+  } else if (ast_context()->IsValue()) {
+    CHECK_ALIVE(VisitForValue(expr->left()));
+    ASSERT(current_block() != NULL);
+
+    // We need an extra block to maintain edge-split form.
+    HBasicBlock* empty_block = graph()->CreateBasicBlock();
+    HBasicBlock* eval_right = graph()->CreateBasicBlock();
+    HTest* test = is_logical_and
+      ? new(zone()) HTest(Top(), eval_right, empty_block)
+      : new(zone()) HTest(Top(), empty_block, eval_right);
+    current_block()->Finish(test);
+
+    set_current_block(eval_right);
+    Drop(1);  // Value of the left subexpression.
+    CHECK_BAILOUT(VisitForValue(expr->right()));
+
+    HBasicBlock* join_block =
+      CreateJoin(empty_block, current_block(), expr->id());
+    set_current_block(join_block);
+    ast_context()->ReturnValue(Pop());
+
+  } else {
+    ASSERT(ast_context()->IsEffect());
+    // In an effect context, we don't need the value of the left subexpression,
+    // only its control flow and side effects.  We need an extra block to
+    // maintain edge-split form.
+    HBasicBlock* empty_block = graph()->CreateBasicBlock();
+    HBasicBlock* right_block = graph()->CreateBasicBlock();
+    if (is_logical_and) {
+      CHECK_BAILOUT(VisitForControl(expr->left(), right_block, empty_block));
+    } else {
+      CHECK_BAILOUT(VisitForControl(expr->left(), empty_block, right_block));
+    }
+
+    // TODO(kmillikin): Find a way to fix this.  It's ugly that there are
+    // actually two empty blocks (one here and one inserted by
+    // TestContext::BuildBranch, and that they both have an HSimulate though the
+    // second one is not a merge node, and that we really have no good AST ID to
+    // put on that first HSimulate.
+
+    if (empty_block->HasPredecessor()) {
+      empty_block->SetJoinId(expr->id());
+    } else {
+      empty_block = NULL;
+    }
+
+    if (right_block->HasPredecessor()) {
+      right_block->SetJoinId(expr->RightId());
+      set_current_block(right_block);
+      CHECK_BAILOUT(VisitForEffect(expr->right()));
+      right_block = current_block();
+    } else {
+      right_block = NULL;
+    }
+
+    HBasicBlock* join_block =
+      CreateJoin(empty_block, right_block, expr->id());
+    set_current_block(join_block);
+    // We did not materialize any value in the predecessor environments,
+    // so there is no need to handle it here.
   }
 }
 
 
+void HGraphBuilder::VisitCommon(BinaryOperation* expr) {
+  CHECK_ALIVE(VisitForValue(expr->left()));
+  CHECK_ALIVE(VisitForValue(expr->right()));
+  HValue* right = Pop();
+  HValue* left = Pop();
+  HInstruction* instr = BuildBinaryOperation(expr, left, right);
+  instr->set_position(expr->position());
+  ast_context()->ReturnInstruction(instr, expr->id());
+}
+
+
+void HGraphBuilder::TraceRepresentation(Token::Value op,
+                                        TypeInfo info,
+                                        HValue* value,
+                                        Representation rep) {
+  if (!FLAG_trace_representation) return;
+  // TODO(svenpanne) Under which circumstances are we actually not flexible?
+  // At first glance, this looks a bit weird...
+  bool flexible = value->CheckFlag(HValue::kFlexibleRepresentation);
+  PrintF("Operation %s has type info %s, %schange representation assumption "
+         "for %s (ID %d) from %s to %s\n",
+         Token::Name(op),
+         info.ToString(),
+         flexible ? "" : " DO NOT ",
+         value->Mnemonic(),
+         graph_->GetMaximumValueID(),
+         value->representation().Mnemonic(),
+         rep.Mnemonic());
+}
+
+
 Representation HGraphBuilder::ToRepresentation(TypeInfo info) {
   if (info.IsSmi()) return Representation::Integer32();
   if (info.IsInteger32()) return Representation::Integer32();
@@ -5058,9 +5280,12 @@
 
 
 void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   if (IsClassOfTest(expr)) {
     CallRuntime* call = expr->left()->AsCallRuntime();
-    VISIT_FOR_VALUE(call->arguments()->at(0));
+    CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
     HValue* value = Pop();
     Literal* literal = expr->right()->AsLiteral();
     Handle<String> rhs = Handle<String>::cast(literal->handle());
@@ -5076,8 +5301,7 @@
   if ((expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT) &&
       left_unary != NULL && left_unary->op() == Token::TYPEOF &&
       right_literal != NULL && right_literal->handle()->IsString()) {
-    VisitForTypeOf(left_unary->expression());
-    if (HasStackOverflow()) return;
+    CHECK_ALIVE(VisitForTypeOf(left_unary->expression()));
     HValue* left = Pop();
     HInstruction* instr = new(zone()) HTypeofIs(left,
         Handle<String>::cast(right_literal->handle()));
@@ -5086,8 +5310,8 @@
     return;
   }
 
-  VISIT_FOR_VALUE(expr->left());
-  VISIT_FOR_VALUE(expr->right());
+  CHECK_ALIVE(VisitForValue(expr->left()));
+  CHECK_ALIVE(VisitForValue(expr->right()));
 
   HValue* right = Pop();
   HValue* left = Pop();
@@ -5124,15 +5348,14 @@
     // If the target is not null we have found a known global function that is
     // assumed to stay the same for this instanceof.
     if (target.is_null()) {
-      HContext* context = new(zone()) HContext;
-      AddInstruction(context);
+      HValue* context = environment()->LookupContext();
       instr = new(zone()) HInstanceOf(context, left, right);
     } else {
       AddInstruction(new(zone()) HCheckFunction(right, target));
       instr = new(zone()) HInstanceOfKnownGlobal(left, target);
     }
   } else if (op == Token::IN) {
-    BAILOUT("Unsupported comparison: in");
+    instr = new(zone()) HIn(left, right);
   } else if (type_info.IsNonPrimitive()) {
     switch (op) {
       case Token::EQ:
@@ -5145,9 +5368,12 @@
         break;
       }
       default:
-        BAILOUT("Unsupported non-primitive compare");
+        return Bailout("Unsupported non-primitive compare");
         break;
     }
+  } else if (type_info.IsString() && oracle()->IsSymbolCompare(expr) &&
+             (op == Token::EQ || op == Token::EQ_STRICT)) {
+    instr = BuildSymbolCompare(left, right, op);
   } else {
     HCompare* compare = new(zone()) HCompare(left, right, op);
     Representation r = ToRepresentation(type_info);
@@ -5160,7 +5386,10 @@
 
 
 void HGraphBuilder::VisitCompareToNull(CompareToNull* expr) {
-  VISIT_FOR_VALUE(expr->expression());
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  CHECK_ALIVE(VisitForValue(expr->expression()));
 
   HValue* value = Pop();
   HIsNull* compare = new(zone()) HIsNull(value, expr->is_strict());
@@ -5169,7 +5398,10 @@
 
 
 void HGraphBuilder::VisitThisFunction(ThisFunction* expr) {
-  BAILOUT("ThisFunction");
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  return Bailout("ThisFunction");
 }
 
 
@@ -5184,7 +5416,7 @@
       (slot != NULL && slot->type() == Slot::LOOKUP) ||
       decl->mode() == Variable::CONST ||
       decl->fun() != NULL) {
-    BAILOUT("unsupported declaration");
+    return Bailout("unsupported declaration");
   }
 }
 
@@ -5193,7 +5425,7 @@
 // Support for types.
 void HGraphBuilder::GenerateIsSmi(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HIsSmi* result = new(zone()) HIsSmi(value);
   ast_context()->ReturnInstruction(result, call->id());
@@ -5202,7 +5434,7 @@
 
 void HGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HHasInstanceType* result =
       new(zone()) HHasInstanceType(value, FIRST_JS_OBJECT_TYPE, LAST_TYPE);
@@ -5212,7 +5444,7 @@
 
 void HGraphBuilder::GenerateIsFunction(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HHasInstanceType* result =
       new(zone()) HHasInstanceType(value, JS_FUNCTION_TYPE);
@@ -5222,7 +5454,7 @@
 
 void HGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HHasCachedArrayIndex* result = new(zone()) HHasCachedArrayIndex(value);
   ast_context()->ReturnInstruction(result, call->id());
@@ -5231,7 +5463,7 @@
 
 void HGraphBuilder::GenerateIsArray(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HHasInstanceType* result = new(zone()) HHasInstanceType(value, JS_ARRAY_TYPE);
   ast_context()->ReturnInstruction(result, call->id());
@@ -5240,7 +5472,7 @@
 
 void HGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HHasInstanceType* result =
       new(zone()) HHasInstanceType(value, JS_REGEXP_TYPE);
@@ -5250,7 +5482,7 @@
 
 void HGraphBuilder::GenerateIsObject(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HIsObject* test = new(zone()) HIsObject(value);
   ast_context()->ReturnInstruction(test, call->id());
@@ -5258,18 +5490,23 @@
 
 
 void HGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) {
-  BAILOUT("inlined runtime function: IsNonNegativeSmi");
+  return Bailout("inlined runtime function: IsNonNegativeSmi");
 }
 
 
 void HGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
-  BAILOUT("inlined runtime function: IsUndetectableObject");
+  ASSERT(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* value = Pop();
+  ast_context()->ReturnInstruction(new(zone()) HIsUndetectable(value),
+                                   call->id());
 }
 
 
 void HGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
     CallRuntime* call) {
-  BAILOUT("inlined runtime function: IsStringWrapperSafeForDefaultValueOf");
+  return Bailout(
+      "inlined runtime function: IsStringWrapperSafeForDefaultValueOf");
 }
 
 
@@ -5306,7 +5543,7 @@
   // function is blacklisted by AstNode::IsInlineable.
   ASSERT(function_state()->outer() == NULL);
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* index = Pop();
   HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
   HInstruction* length = AddInstruction(new(zone()) HArgumentsLength(elements));
@@ -5320,13 +5557,13 @@
 void HGraphBuilder::GenerateClassOf(CallRuntime* call) {
   // The special form detected by IsClassOfTest is detected before we get here
   // and does not cause a bailout.
-  BAILOUT("inlined runtime function: ClassOf");
+  return Bailout("inlined runtime function: ClassOf");
 }
 
 
 void HGraphBuilder::GenerateValueOf(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HValueOf* result = new(zone()) HValueOf(value);
   ast_context()->ReturnInstruction(result, call->id());
@@ -5334,15 +5571,15 @@
 
 
 void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
-  BAILOUT("inlined runtime function: SetValueOf");
+  return Bailout("inlined runtime function: SetValueOf");
 }
 
 
 // Fast support for charCodeAt(n).
 void HGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 2);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
-  VISIT_FOR_VALUE(call->arguments()->at(1));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
   HValue* index = Pop();
   HValue* string = Pop();
   HStringCharCodeAt* result = BuildStringCharCodeAt(string, index);
@@ -5353,7 +5590,7 @@
 // Fast support for string.charAt(n) and string[n].
 void HGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* char_code = Pop();
   HStringCharFromCode* result = new(zone()) HStringCharFromCode(char_code);
   ast_context()->ReturnInstruction(result, call->id());
@@ -5363,8 +5600,8 @@
 // Fast support for string.charAt(n) and string[n].
 void HGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 2);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
-  VISIT_FOR_VALUE(call->arguments()->at(1));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
   HValue* index = Pop();
   HValue* string = Pop();
   HStringCharCodeAt* char_code = BuildStringCharCodeAt(string, index);
@@ -5377,8 +5614,8 @@
 // Fast support for object equality testing.
 void HGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 2);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
-  VISIT_FOR_VALUE(call->arguments()->at(1));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
   HValue* right = Pop();
   HValue* left = Pop();
   HCompareJSObjectEq* result = new(zone()) HCompareJSObjectEq(left, right);
@@ -5394,17 +5631,15 @@
 
 // Fast support for Math.random().
 void HGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
-  BAILOUT("inlined runtime function: RandomHeapNumber");
+  return Bailout("inlined runtime function: RandomHeapNumber");
 }
 
 
 // Fast support for StringAdd.
 void HGraphBuilder::GenerateStringAdd(CallRuntime* call) {
   ASSERT_EQ(2, call->arguments()->length());
-  VisitArgumentList(call->arguments());
-  CHECK_BAILOUT;
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HValue* context = environment()->LookupContext();
   HCallStub* result = new(zone()) HCallStub(context, CodeStub::StringAdd, 2);
   Drop(2);
   ast_context()->ReturnInstruction(result, call->id());
@@ -5414,10 +5649,8 @@
 // Fast support for SubString.
 void HGraphBuilder::GenerateSubString(CallRuntime* call) {
   ASSERT_EQ(3, call->arguments()->length());
-  VisitArgumentList(call->arguments());
-  CHECK_BAILOUT;
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HValue* context = environment()->LookupContext();
   HCallStub* result = new(zone()) HCallStub(context, CodeStub::SubString, 3);
   Drop(3);
   ast_context()->ReturnInstruction(result, call->id());
@@ -5427,10 +5660,8 @@
 // Fast support for StringCompare.
 void HGraphBuilder::GenerateStringCompare(CallRuntime* call) {
   ASSERT_EQ(2, call->arguments()->length());
-  VisitArgumentList(call->arguments());
-  CHECK_BAILOUT;
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HValue* context = environment()->LookupContext();
   HCallStub* result =
       new(zone()) HCallStub(context, CodeStub::StringCompare, 2);
   Drop(2);
@@ -5441,10 +5672,8 @@
 // Support for direct calls from JavaScript to native RegExp code.
 void HGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
   ASSERT_EQ(4, call->arguments()->length());
-  VisitArgumentList(call->arguments());
-  CHECK_BAILOUT;
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HValue* context = environment()->LookupContext();
   HCallStub* result = new(zone()) HCallStub(context, CodeStub::RegExpExec, 4);
   Drop(4);
   ast_context()->ReturnInstruction(result, call->id());
@@ -5454,10 +5683,8 @@
 // Construct a RegExp exec result with two in-object properties.
 void HGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
   ASSERT_EQ(3, call->arguments()->length());
-  VisitArgumentList(call->arguments());
-  CHECK_BAILOUT;
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HValue* context = environment()->LookupContext();
   HCallStub* result =
       new(zone()) HCallStub(context, CodeStub::RegExpConstructResult, 3);
   Drop(3);
@@ -5467,17 +5694,15 @@
 
 // Support for fast native caches.
 void HGraphBuilder::GenerateGetFromCache(CallRuntime* call) {
-  BAILOUT("inlined runtime function: GetFromCache");
+  return Bailout("inlined runtime function: GetFromCache");
 }
 
 
 // Fast support for number to string.
 void HGraphBuilder::GenerateNumberToString(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
-  VisitArgumentList(call->arguments());
-  CHECK_BAILOUT;
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HValue* context = environment()->LookupContext();
   HCallStub* result =
       new(zone()) HCallStub(context, CodeStub::NumberToString, 1);
   Drop(1);
@@ -5489,21 +5714,34 @@
 // indices. This should only be used if the indices are known to be
 // non-negative and within bounds of the elements array at the call site.
 void HGraphBuilder::GenerateSwapElements(CallRuntime* call) {
-  BAILOUT("inlined runtime function: SwapElements");
+  return Bailout("inlined runtime function: SwapElements");
 }
 
 
 // Fast call for custom callbacks.
 void HGraphBuilder::GenerateCallFunction(CallRuntime* call) {
-  BAILOUT("inlined runtime function: CallFunction");
+  // 1 ~ The function to call is not itself an argument to the call.
+  int arg_count = call->arguments()->length() - 1;
+  ASSERT(arg_count >= 1);  // There's always at least a receiver.
+
+  for (int i = 0; i < arg_count; ++i) {
+    CHECK_ALIVE(VisitArgument(call->arguments()->at(i)));
+  }
+  CHECK_ALIVE(VisitForValue(call->arguments()->last()));
+  HValue* function = Pop();
+  HValue* context = environment()->LookupContext();
+  HInvokeFunction* result =
+      new(zone()) HInvokeFunction(context, function, arg_count);
+  Drop(arg_count);
+  ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 // Fast call to math functions.
 void HGraphBuilder::GenerateMathPow(CallRuntime* call) {
   ASSERT_EQ(2, call->arguments()->length());
-  VISIT_FOR_VALUE(call->arguments()->at(0));
-  VISIT_FOR_VALUE(call->arguments()->at(1));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
   HValue* right = Pop();
   HValue* left = Pop();
   HPower* result = new(zone()) HPower(left, right);
@@ -5513,10 +5751,8 @@
 
 void HGraphBuilder::GenerateMathSin(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
-  VisitArgumentList(call->arguments());
-  CHECK_BAILOUT;
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HValue* context = environment()->LookupContext();
   HCallStub* result =
       new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
   result->set_transcendental_type(TranscendentalCache::SIN);
@@ -5527,10 +5763,8 @@
 
 void HGraphBuilder::GenerateMathCos(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
-  VisitArgumentList(call->arguments());
-  CHECK_BAILOUT;
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HValue* context = environment()->LookupContext();
   HCallStub* result =
       new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
   result->set_transcendental_type(TranscendentalCache::COS);
@@ -5541,10 +5775,8 @@
 
 void HGraphBuilder::GenerateMathLog(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
-  VisitArgumentList(call->arguments());
-  CHECK_BAILOUT;
-  HContext* context = new(zone()) HContext;
-  AddInstruction(context);
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HValue* context = environment()->LookupContext();
   HCallStub* result =
       new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
   result->set_transcendental_type(TranscendentalCache::LOG);
@@ -5554,19 +5786,19 @@
 
 
 void HGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
-  BAILOUT("inlined runtime function: MathSqrt");
+  return Bailout("inlined runtime function: MathSqrt");
 }
 
 
 // Check whether two RegExps are equivalent
 void HGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) {
-  BAILOUT("inlined runtime function: IsRegExpEquivalent");
+  return Bailout("inlined runtime function: IsRegExpEquivalent");
 }
 
 
 void HGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HGetCachedArrayIndex* result = new(zone()) HGetCachedArrayIndex(value);
   ast_context()->ReturnInstruction(result, call->id());
@@ -5574,15 +5806,12 @@
 
 
 void HGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
-  BAILOUT("inlined runtime function: FastAsciiArrayJoin");
+  return Bailout("inlined runtime function: FastAsciiArrayJoin");
 }
 
 
-#undef BAILOUT
 #undef CHECK_BAILOUT
-#undef VISIT_FOR_EFFECT
-#undef VISIT_FOR_VALUE
-#undef ADD_TO_SUBGRAPH
+#undef CHECK_ALIVE
 
 
 HEnvironment::HEnvironment(HEnvironment* outer,
@@ -5592,6 +5821,7 @@
       values_(0),
       assigned_variables_(4),
       parameter_count_(0),
+      specials_count_(1),
       local_count_(0),
       outer_(outer),
       pop_count_(0),
@@ -5605,6 +5835,7 @@
     : values_(0),
       assigned_variables_(0),
       parameter_count_(0),
+      specials_count_(1),
       local_count_(0),
       outer_(NULL),
       pop_count_(0),
@@ -5621,7 +5852,7 @@
   local_count_ = local_count;
 
   // Avoid reallocating the temporaries' backing store on the first Push.
-  int total = parameter_count + local_count + stack_height;
+  int total = parameter_count + specials_count_ + local_count + stack_height;
   values_.Initialize(total + 4);
   for (int i = 0; i < total; ++i) values_.Add(NULL);
 }
@@ -5680,12 +5911,12 @@
 
 
 bool HEnvironment::HasExpressionAt(int index) const {
-  return index >= parameter_count_ + local_count_;
+  return index >= parameter_count_ + specials_count_ + local_count_;
 }
 
 
 bool HEnvironment::ExpressionStackIsEmpty() const {
-  int first_expression = parameter_count() + local_count();
+  int first_expression = parameter_count() + specials_count() + local_count();
   ASSERT(length() >= first_expression);
   return length() == first_expression;
 }
@@ -5738,10 +5969,12 @@
 }
 
 
-HEnvironment* HEnvironment::CopyForInlining(Handle<JSFunction> target,
-                                            FunctionLiteral* function,
-                                            bool is_speculative,
-                                            HConstant* undefined) const {
+HEnvironment* HEnvironment::CopyForInlining(
+    Handle<JSFunction> target,
+    FunctionLiteral* function,
+    CompilationPhase compilation_phase,
+    HConstant* undefined,
+    CallKind call_kind) const {
   // Outer environment is a copy of this one without the arguments.
   int arity = function->scope()->num_parameters();
   HEnvironment* outer = Copy();
@@ -5751,22 +5984,27 @@
   HEnvironment* inner =
       new(zone) HEnvironment(outer, function->scope(), target);
   // Get the argument values from the original environment.
-  if (is_speculative) {
+  if (compilation_phase == HYDROGEN) {
     for (int i = 0; i <= arity; ++i) {  // Include receiver.
       HValue* push = ExpressionStackAt(arity - i);
       inner->SetValueAt(i, push);
     }
   } else {
+    ASSERT(compilation_phase == LITHIUM);
     for (int i = 0; i <= arity; ++i) {  // Include receiver.
-      inner->SetValueAt(i, ExpressionStackAt(arity - i));
+      HValue* push = ExpressionStackAt(arity - i);
+      inner->SetValueAt(i, push);
     }
   }
-
-  // Initialize the stack-allocated locals to undefined.
-  int local_base = arity + 1;
-  int local_count = function->scope()->num_stack_slots();
-  for (int i = 0; i < local_count; ++i) {
-    inner->SetValueAt(local_base + i, undefined);
+  // If the function we are inlining is a strict mode function, pass
+  // undefined as the receiver for function calls (instead of the
+  // global receiver).
+  if (function->strict_mode() && call_kind == CALL_AS_FUNCTION) {
+    inner->SetValueAt(0, undefined);
+  }
+  inner->SetValueAt(arity + 1, outer->LookupContext());
+  for (int i = arity + 2; i < inner->length(); ++i) {
+    inner->SetValueAt(i, undefined);
   }
 
   inner->set_ast_id(AstNode::kFunctionEntryId);
@@ -5777,8 +6015,11 @@
 void HEnvironment::PrintTo(StringStream* stream) {
   for (int i = 0; i < length(); i++) {
     if (i == 0) stream->Add("parameters\n");
-    if (i == parameter_count()) stream->Add("locals\n");
-    if (i == parameter_count() + local_count()) stream->Add("expressions");
+    if (i == parameter_count()) stream->Add("specials\n");
+    if (i == parameter_count() + specials_count()) stream->Add("locals\n");
+    if (i == parameter_count() + specials_count() + local_count()) {
+      stream->Add("expressions");
+    }
     HValue* val = values_.at(i);
     stream->Add("%d: ", i);
     if (val != NULL) {
@@ -5873,10 +6114,11 @@
       Tag states_tag(this, "states");
       Tag locals_tag(this, "locals");
       int total = current->phis()->length();
-      trace_.Add("size %d\n", total);
-      trace_.Add("method \"None\"");
+      PrintIntProperty("size", current->phis()->length());
+      PrintStringProperty("method", "None");
       for (int j = 0; j < total; ++j) {
         HPhi* phi = current->phis()->at(j);
+        PrintIndent();
         trace_.Add("%d ", phi->merged_index());
         phi->PrintNameTo(&trace_);
         trace_.Add(" ");
@@ -5890,7 +6132,8 @@
       HInstruction* instruction = current->first();
       while (instruction != NULL) {
         int bci = 0;
-        int uses = instruction->uses()->length();
+        int uses = instruction->UseCount();
+        PrintIndent();
         trace_.Add("%d %d ", bci, uses);
         instruction->PrintNameTo(&trace_);
         trace_.Add(" ");
@@ -5910,6 +6153,7 @@
         for (int i = first_index; i <= last_index; ++i) {
           LInstruction* linstr = instructions->at(i);
           if (linstr != NULL) {
+            PrintIndent();
             trace_.Add("%d ",
                        LifetimePosition::FromInstructionIndex(i).Value());
             linstr->PrintTo(&trace_);
@@ -5945,6 +6189,7 @@
 
 void HTracer::TraceLiveRange(LiveRange* range, const char* type) {
   if (range != NULL && !range->IsEmpty()) {
+    PrintIndent();
     trace_.Add("%d %s", range->id(), type);
     if (range->HasRegisterAssigned()) {
       LOperand* op = range->CreateAssignedOperand();
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 37671f4..4d8a153 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -30,16 +30,18 @@
 
 #include "v8.h"
 
+#include "allocation.h"
 #include "ast.h"
 #include "compiler.h"
-#include "data-flow.h"
 #include "hydrogen-instructions.h"
+#include "type-info.h"
 #include "zone.h"
 
 namespace v8 {
 namespace internal {
 
 // Forward declarations.
+class BitVector;
 class HEnvironment;
 class HGraph;
 class HLoopInformation;
@@ -124,8 +126,8 @@
   void AddSimulate(int id) { AddInstruction(CreateSimulate(id)); }
   void AssignCommonDominator(HBasicBlock* other);
 
-  void FinishExitWithDeoptimization() {
-    FinishExit(CreateDeoptimize());
+  void FinishExitWithDeoptimization(HDeoptimize::UseEnvironment has_uses) {
+    FinishExit(CreateDeoptimize(has_uses));
   }
 
   // Add the inlined function exit sequence, adding an HLeaveInlined
@@ -152,7 +154,7 @@
   void AddDominatedBlock(HBasicBlock* block);
 
   HSimulate* CreateSimulate(int id);
-  HDeoptimize* CreateDeoptimize();
+  HDeoptimize* CreateDeoptimize(HDeoptimize::UseEnvironment has_uses);
 
   int block_id_;
   HGraph* graph_;
@@ -226,6 +228,10 @@
 
   // Returns false if there are phi-uses of the arguments-object
   // which are not supported by the optimizing compiler.
+  bool CheckPhis();
+
+  // Returns false if there are phi-uses of hole values comming
+  // from uninitialized consts.
   bool CollectPhis();
 
   Handle<Code> Compile(CompilationInfo* info);
@@ -280,11 +286,10 @@
   void PropagateMinusZeroChecks(HValue* value, BitVector* visited);
   void RecursivelyMarkPhiDeoptimizeOnUndefined(HPhi* phi);
   void InsertRepresentationChangeForUse(HValue* value,
-                                        HValue* use,
+                                        HValue* use_value,
+                                        int use_index,
                                         Representation to);
-  void InsertRepresentationChangesForValue(HValue* current,
-                                           ZoneList<HValue*>* value_list,
-                                           ZoneList<Representation>* rep_list);
+  void InsertRepresentationChangesForValue(HValue* value);
   void InferTypes(ZoneList<HValue*>* worklist);
   void InitializeInferredTypes(int from_inclusive, int to_inclusive);
   void CheckForBackEdge(HBasicBlock* block, HBasicBlock* successor);
@@ -312,6 +317,8 @@
 
 class HEnvironment: public ZoneObject {
  public:
+  enum CompilationPhase { HYDROGEN, LITHIUM };
+
   HEnvironment(HEnvironment* outer,
                Scope* scope,
                Handle<JSFunction> closure);
@@ -323,6 +330,7 @@
     return &assigned_variables_;
   }
   int parameter_count() const { return parameter_count_; }
+  int specials_count() const { return specials_count_; }
   int local_count() const { return local_count_; }
   HEnvironment* outer() const { return outer_; }
   int pop_count() const { return pop_count_; }
@@ -332,6 +340,9 @@
   void set_ast_id(int id) { ast_id_ = id; }
 
   int length() const { return values_.length(); }
+  bool is_special_index(int i) const {
+    return i >= parameter_count() && i < parameter_count() + specials_count();
+  }
 
   void Bind(Variable* variable, HValue* value) {
     Bind(IndexFor(variable), value);
@@ -339,6 +350,10 @@
 
   void Bind(int index, HValue* value);
 
+  void BindContext(HValue* value) {
+    Bind(parameter_count(), value);
+  }
+
   HValue* Lookup(Variable* variable) const {
     return Lookup(IndexFor(variable));
   }
@@ -349,6 +364,11 @@
     return result;
   }
 
+  HValue* LookupContext() const {
+    // Return first special.
+    return Lookup(parameter_count());
+  }
+
   void Push(HValue* value) {
     ASSERT(value != NULL);
     ++push_count_;
@@ -369,6 +389,8 @@
 
   HValue* Top() const { return ExpressionStackAt(0); }
 
+  bool ExpressionStackIsEmpty() const;
+
   HValue* ExpressionStackAt(int index_from_top) const {
     int index = length() - index_from_top - 1;
     ASSERT(HasExpressionAt(index));
@@ -388,8 +410,9 @@
   // instructions, otherwise they are the actual values.
   HEnvironment* CopyForInlining(Handle<JSFunction> target,
                                 FunctionLiteral* function,
-                                bool is_speculative,
-                                HConstant* undefined) const;
+                                CompilationPhase compilation_phase,
+                                HConstant* undefined,
+                                CallKind call_kind) const;
 
   void AddIncomingEdge(HBasicBlock* block, HEnvironment* other);
 
@@ -413,8 +436,6 @@
   // True if index is included in the expression stack part of the environment.
   bool HasExpressionAt(int index) const;
 
-  bool ExpressionStackIsEmpty() const;
-
   void Initialize(int parameter_count, int local_count, int stack_height);
   void Initialize(const HEnvironment* other);
 
@@ -424,15 +445,18 @@
   int IndexFor(Variable* variable) const {
     Slot* slot = variable->AsSlot();
     ASSERT(slot != NULL && slot->IsStackAllocated());
-    int shift = (slot->type() == Slot::PARAMETER) ? 1 : parameter_count_;
+    int shift = (slot->type() == Slot::PARAMETER)
+        ? 1
+        : parameter_count_ + specials_count_;
     return slot->index() + shift;
   }
 
   Handle<JSFunction> closure_;
-  // Value array [parameters] [locals] [temporaries].
+  // Value array [parameters] [specials] [locals] [temporaries].
   ZoneList<HValue*> values_;
   ZoneList<int> assigned_variables_;
   int parameter_count_;
+  int specials_count_;
   int local_count_;
   HEnvironment* outer_;
   int pop_count_;
@@ -443,6 +467,11 @@
 
 class HGraphBuilder;
 
+enum ArgumentsAllowedFlag {
+  ARGUMENTS_NOT_ALLOWED,
+  ARGUMENTS_ALLOWED
+};
+
 // This class is not BASE_EMBEDDED because our inlining implementation uses
 // new and delete.
 class AstContext {
@@ -501,13 +530,18 @@
 
 class ValueContext: public AstContext {
  public:
-  explicit ValueContext(HGraphBuilder* owner)
-      : AstContext(owner, Expression::kValue) {
+  explicit ValueContext(HGraphBuilder* owner, ArgumentsAllowedFlag flag)
+      : AstContext(owner, Expression::kValue), flag_(flag) {
   }
   virtual ~ValueContext();
 
   virtual void ReturnValue(HValue* value);
   virtual void ReturnInstruction(HInstruction* instr, int ast_id);
+
+  bool arguments_allowed() { return flag_ == ARGUMENTS_ALLOWED; }
+
+ private:
+  ArgumentsAllowedFlag flag_;
 };
 
 
@@ -634,20 +668,7 @@
     BreakAndContinueScope* next_;
   };
 
-  HGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle)
-      : function_state_(NULL),
-        initial_function_state_(this, info, oracle),
-        ast_context_(NULL),
-        break_scope_(NULL),
-        graph_(NULL),
-        current_block_(NULL),
-        inlined_count_(0),
-        zone_(info->isolate()->zone()) {
-    // This is not initialized in the initializer list because the
-    // constructor for the initial state relies on function_state_ == NULL
-    // to know it's the initial state.
-    function_state_= &initial_function_state_;
-  }
+  HGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle);
 
   HGraph* CreateGraph();
 
@@ -662,6 +683,8 @@
     return current_block()->last_environment();
   }
 
+  bool inline_bailout() { return inline_bailout_; }
+
   // Adding instructions.
   HInstruction* AddInstruction(HInstruction* instr);
   void AddSimulate(int id);
@@ -670,6 +693,8 @@
   void Push(HValue* value) { environment()->Push(value); }
   HValue* Pop() { return environment()->Pop(); }
 
+  void Bailout(const char* reason);
+
  private:
   // Type of a member function that generates inline code for a native function.
   typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call);
@@ -724,7 +749,17 @@
   INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
 #undef INLINE_FUNCTION_GENERATOR_DECLARATION
 
-  void Bailout(const char* reason);
+  void VisitDelete(UnaryOperation* expr);
+  void VisitVoid(UnaryOperation* expr);
+  void VisitTypeof(UnaryOperation* expr);
+  void VisitAdd(UnaryOperation* expr);
+  void VisitSub(UnaryOperation* expr);
+  void VisitBitNot(UnaryOperation* expr);
+  void VisitNot(UnaryOperation* expr);
+
+  void VisitComma(BinaryOperation* expr);
+  void VisitAndOr(BinaryOperation* expr, bool is_logical_and);
+  void VisitCommon(BinaryOperation* expr);
 
   void PreProcessOsrEntry(IterationStatement* statement);
   // True iff. we are compiling for OSR and the statement is the entry.
@@ -755,7 +790,11 @@
   void Drop(int n) { environment()->Drop(n); }
   void Bind(Variable* var, HValue* value) { environment()->Bind(var, value); }
 
-  void VisitForValue(Expression* expr);
+  // The value of the arguments object is allowed in some but not most value
+  // contexts.  (It's allowed in all effect contexts and disallowed in all
+  // test contexts.)
+  void VisitForValue(Expression* expr,
+                     ArgumentsAllowedFlag flag = ARGUMENTS_NOT_ALLOWED);
   void VisitForTypeOf(Expression* expr);
   void VisitForEffect(Expression* expr);
   void VisitForControl(Expression* expr,
@@ -778,7 +817,11 @@
   // to push them as outgoing parameters.
   template <int V> HInstruction* PreProcessCall(HCall<V>* call);
 
-  void AssumeRepresentation(HValue* value, Representation r);
+  void TraceRepresentation(Token::Value op,
+                           TypeInfo info,
+                           HValue* value,
+                           Representation rep);
+  void AssumeRepresentation(HValue* value, Representation rep);
   static Representation ToRepresentation(TypeInfo info);
 
   void SetupScope(Scope* scope);
@@ -814,7 +857,9 @@
   // If --trace-inlining, print a line of the inlining trace.  Inlining
   // succeeded if the reason string is NULL and failed if there is a
   // non-NULL reason string.
-  void TraceInline(Handle<JSFunction> target, const char* failure_reason);
+  void TraceInline(Handle<JSFunction> target,
+                   Handle<JSFunction> caller,
+                   const char* failure_reason);
 
   void HandleGlobalVariableAssignment(Variable* var,
                                       HValue* value,
@@ -833,12 +878,20 @@
                                   ZoneMapList* types,
                                   Handle<String> name);
 
+  HCompareSymbolEq* BuildSymbolCompare(HValue* left,
+                                       HValue* right,
+                                       Token::Value op);
   HStringCharCodeAt* BuildStringCharCodeAt(HValue* string,
                                            HValue* index);
   HInstruction* BuildBinaryOperation(BinaryOperation* expr,
                                      HValue* left,
                                      HValue* right);
-  HInstruction* BuildIncrement(HValue* value, bool increment);
+  HInstruction* BuildBinaryOperation(Token::Value op,
+                                     HValue* left,
+                                     HValue* right,
+                                     TypeInfo info);
+  HInstruction* BuildIncrement(bool returns_original_input,
+                               CountOperation* expr);
   HLoadNamedField* BuildLoadNamedField(HValue* object,
                                        Property* expr,
                                        Handle<Map> type,
@@ -923,6 +976,8 @@
 
   Zone* zone_;
 
+  bool inline_bailout_;
+
   friend class FunctionState;  // Pushes and pops the state stack.
   friend class AstContext;  // Pushes and pops the AST context stack.
 
@@ -957,9 +1012,11 @@
   HValue* Lookup(HValue* value) const;
 
   HValueMap* Copy(Zone* zone) const {
-    return new(zone) HValueMap(this);
+    return new(zone) HValueMap(zone, this);
   }
 
+  bool IsEmpty() const { return count_ == 0; }
+
  private:
   // A linked list of HValue* values.  Stored in arrays.
   struct HValueMapListElement {
@@ -971,7 +1028,7 @@
   // Must be a power of 2.
   static const int kInitialSize = 16;
 
-  explicit HValueMap(const HValueMap* other);
+  HValueMap(Zone* zone, const HValueMap* other);
 
   void Resize(int new_size);
   void ResizeLists(int new_size);
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index a9247f4..0ca2d6b 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 // A light-weight IA32 Assembler.
 
@@ -311,8 +311,12 @@
 }
 
 
-void Assembler::emit(uint32_t x, RelocInfo::Mode rmode) {
-  if (rmode != RelocInfo::NONE) RecordRelocInfo(rmode);
+void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, unsigned id) {
+  if (rmode == RelocInfo::CODE_TARGET && id != kNoASTId) {
+    RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, static_cast<intptr_t>(id));
+  } else if (rmode != RelocInfo::NONE) {
+    RecordRelocInfo(rmode);
+  }
   emit(x);
 }
 
@@ -376,6 +380,18 @@
 }
 
 
+void Assembler::emit_near_disp(Label* L) {
+  byte disp = 0x00;
+  if (L->is_near_linked()) {
+    int offset = L->near_link_pos() - pc_offset();
+    ASSERT(is_int8(offset));
+    disp = static_cast<byte>(offset & 0xFF);
+  }
+  L->link_to(pc_offset(), Label::kNear);
+  *pc_++ = disp;
+}
+
+
 void Operand::set_modrm(int mod, Register rm) {
   ASSERT((mod & -4) == 0);
   buf_[0] = mod << 6 | rm.code();
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 9273037..a7602e7 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -32,7 +32,7 @@
 
 // The original source code covered by the above license above has been modified
 // significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 #include "v8.h"
 
@@ -341,7 +341,6 @@
   pc_ = buffer_;
   reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
 
-  last_pc_ = NULL;
 #ifdef GENERATED_CODE_COVERAGE
   InitCoverageLog();
 #endif
@@ -389,7 +388,6 @@
 void Assembler::cpuid() {
   ASSERT(CpuFeatures::IsEnabled(CPUID));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0xA2);
 }
@@ -397,35 +395,30 @@
 
 void Assembler::pushad() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x60);
 }
 
 
 void Assembler::popad() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x61);
 }
 
 
 void Assembler::pushfd() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x9C);
 }
 
 
 void Assembler::popfd() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x9D);
 }
 
 
 void Assembler::push(const Immediate& x) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (x.is_int8()) {
     EMIT(0x6a);
     EMIT(x.x_);
@@ -445,14 +438,12 @@
 
 void Assembler::push(Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x50 | src.code());
 }
 
 
 void Assembler::push(const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xFF);
   emit_operand(esi, src);
 }
@@ -460,125 +451,13 @@
 
 void Assembler::pop(Register dst) {
   ASSERT(reloc_info_writer.last_pc() != NULL);
-  if (FLAG_peephole_optimization && (reloc_info_writer.last_pc() <= last_pc_)) {
-    // (last_pc_ != NULL) is rolled into the above check.
-    // If a last_pc_ is set, we need to make sure that there has not been any
-    // relocation information generated between the last instruction and this
-    // pop instruction.
-    byte instr = last_pc_[0];
-    if ((instr & ~0x7) == 0x50) {
-      int push_reg_code = instr & 0x7;
-      if (push_reg_code == dst.code()) {
-        pc_ = last_pc_;
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%d push/pop (same reg) eliminated\n", pc_offset());
-        }
-      } else {
-        // Convert 'push src; pop dst' to 'mov dst, src'.
-        last_pc_[0] = 0x8b;
-        Register src = { push_reg_code };
-        EnsureSpace ensure_space(this);
-        emit_operand(dst, Operand(src));
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%d push/pop (reg->reg) eliminated\n", pc_offset());
-        }
-      }
-      last_pc_ = NULL;
-      return;
-    } else if (instr == 0xff) {  // push of an operand, convert to a move
-      byte op1 = last_pc_[1];
-      // Check if the operation is really a push.
-      if ((op1 & 0x38) == (6 << 3)) {
-        op1 = (op1 & ~0x38) | static_cast<byte>(dst.code() << 3);
-        last_pc_[0] = 0x8b;
-        last_pc_[1] = op1;
-        last_pc_ = NULL;
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%d push/pop (op->reg) eliminated\n", pc_offset());
-        }
-        return;
-      }
-    } else if ((instr == 0x89) &&
-               (last_pc_[1] == 0x04) &&
-               (last_pc_[2] == 0x24)) {
-      // 0x71283c   396  890424         mov [esp],eax
-      // 0x71283f   399  58             pop eax
-      if (dst.is(eax)) {
-        // change to
-        // 0x710fac   216  83c404         add esp,0x4
-        last_pc_[0] = 0x83;
-        last_pc_[1] = 0xc4;
-        last_pc_[2] = 0x04;
-        last_pc_ = NULL;
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%d push/pop (mov-pop) eliminated\n", pc_offset());
-        }
-        return;
-      }
-    } else if (instr == 0x6a && dst.is(eax)) {  // push of immediate 8 bit
-      byte imm8 = last_pc_[1];
-      if (imm8 == 0) {
-        // 6a00         push 0x0
-        // 58           pop eax
-        last_pc_[0] = 0x31;
-        last_pc_[1] = 0xc0;
-        // change to
-        // 31c0         xor eax,eax
-        last_pc_ = NULL;
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset());
-        }
-        return;
-      } else {
-        // 6a00         push 0xXX
-        // 58           pop eax
-        last_pc_[0] = 0xb8;
-        EnsureSpace ensure_space(this);
-        if ((imm8 & 0x80) != 0) {
-          EMIT(0xff);
-          EMIT(0xff);
-          EMIT(0xff);
-          // change to
-          // b8XXffffff   mov eax,0xffffffXX
-        } else {
-          EMIT(0x00);
-          EMIT(0x00);
-          EMIT(0x00);
-          // change to
-          // b8XX000000   mov eax,0x000000XX
-        }
-        last_pc_ = NULL;
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset());
-        }
-        return;
-      }
-    } else if (instr == 0x68 && dst.is(eax)) {  // push of immediate 32 bit
-      // 68XXXXXXXX   push 0xXXXXXXXX
-      // 58           pop eax
-      last_pc_[0] = 0xb8;
-      last_pc_ = NULL;
-      // change to
-      // b8XXXXXXXX   mov eax,0xXXXXXXXX
-      if (FLAG_print_peephole_optimization) {
-        PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset());
-      }
-      return;
-    }
-
-    // Other potential patterns for peephole:
-    // 0x712716   102  890424         mov [esp], eax
-    // 0x712719   105  8b1424         mov edx, [esp]
-  }
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x58 | dst.code());
 }
 
 
 void Assembler::pop(const Operand& dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x8F);
   emit_operand(eax, dst);
 }
@@ -586,7 +465,6 @@
 
 void Assembler::enter(const Immediate& size) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xC8);
   emit_w(size);
   EMIT(0);
@@ -595,7 +473,6 @@
 
 void Assembler::leave() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xC9);
 }
 
@@ -603,7 +480,6 @@
 void Assembler::mov_b(Register dst, const Operand& src) {
   ASSERT(dst.code() < 4);
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x8A);
   emit_operand(dst, src);
 }
@@ -611,7 +487,6 @@
 
 void Assembler::mov_b(const Operand& dst, int8_t imm8) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xC6);
   emit_operand(eax, dst);
   EMIT(imm8);
@@ -621,7 +496,6 @@
 void Assembler::mov_b(const Operand& dst, Register src) {
   ASSERT(src.code() < 4);
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x88);
   emit_operand(src, dst);
 }
@@ -629,7 +503,6 @@
 
 void Assembler::mov_w(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x8B);
   emit_operand(dst, src);
@@ -638,7 +511,6 @@
 
 void Assembler::mov_w(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x89);
   emit_operand(src, dst);
@@ -647,7 +519,6 @@
 
 void Assembler::mov(Register dst, int32_t imm32) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xB8 | dst.code());
   emit(imm32);
 }
@@ -655,7 +526,6 @@
 
 void Assembler::mov(Register dst, const Immediate& x) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xB8 | dst.code());
   emit(x);
 }
@@ -663,7 +533,6 @@
 
 void Assembler::mov(Register dst, Handle<Object> handle) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xB8 | dst.code());
   emit(handle);
 }
@@ -671,7 +540,6 @@
 
 void Assembler::mov(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x8B);
   emit_operand(dst, src);
 }
@@ -679,7 +547,6 @@
 
 void Assembler::mov(Register dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x89);
   EMIT(0xC0 | src.code() << 3 | dst.code());
 }
@@ -687,7 +554,6 @@
 
 void Assembler::mov(const Operand& dst, const Immediate& x) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xC7);
   emit_operand(eax, dst);
   emit(x);
@@ -696,7 +562,6 @@
 
 void Assembler::mov(const Operand& dst, Handle<Object> handle) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xC7);
   emit_operand(eax, dst);
   emit(handle);
@@ -705,7 +570,6 @@
 
 void Assembler::mov(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x89);
   emit_operand(src, dst);
 }
@@ -713,7 +577,6 @@
 
 void Assembler::movsx_b(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0xBE);
   emit_operand(dst, src);
@@ -722,7 +585,6 @@
 
 void Assembler::movsx_w(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0xBF);
   emit_operand(dst, src);
@@ -731,7 +593,6 @@
 
 void Assembler::movzx_b(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0xB6);
   emit_operand(dst, src);
@@ -740,7 +601,6 @@
 
 void Assembler::movzx_w(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0xB7);
   emit_operand(dst, src);
@@ -750,7 +610,6 @@
 void Assembler::cmov(Condition cc, Register dst, int32_t imm32) {
   ASSERT(CpuFeatures::IsEnabled(CMOV));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   UNIMPLEMENTED();
   USE(cc);
   USE(dst);
@@ -761,7 +620,6 @@
 void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) {
   ASSERT(CpuFeatures::IsEnabled(CMOV));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   UNIMPLEMENTED();
   USE(cc);
   USE(dst);
@@ -772,7 +630,6 @@
 void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
   ASSERT(CpuFeatures::IsEnabled(CMOV));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // Opcode: 0f 40 + cc /r.
   EMIT(0x0F);
   EMIT(0x40 + cc);
@@ -782,14 +639,12 @@
 
 void Assembler::cld() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xFC);
 }
 
 
 void Assembler::rep_movs() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF3);
   EMIT(0xA5);
 }
@@ -797,7 +652,6 @@
 
 void Assembler::rep_stos() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF3);
   EMIT(0xAB);
 }
@@ -805,14 +659,12 @@
 
 void Assembler::stos() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xAB);
 }
 
 
 void Assembler::xchg(Register dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (src.is(eax) || dst.is(eax)) {  // Single-byte encoding.
     EMIT(0x90 | (src.is(eax) ? dst.code() : src.code()));
   } else {
@@ -824,14 +676,12 @@
 
 void Assembler::adc(Register dst, int32_t imm32) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_arith(2, Operand(dst), Immediate(imm32));
 }
 
 
 void Assembler::adc(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x13);
   emit_operand(dst, src);
 }
@@ -839,7 +689,6 @@
 
 void Assembler::add(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x03);
   emit_operand(dst, src);
 }
@@ -847,24 +696,7 @@
 
 void Assembler::add(const Operand& dst, const Immediate& x) {
   ASSERT(reloc_info_writer.last_pc() != NULL);
-  if (FLAG_peephole_optimization && (reloc_info_writer.last_pc() <= last_pc_)) {
-    byte instr = last_pc_[0];
-    if ((instr & 0xf8) == 0x50) {
-      // Last instruction was a push. Check whether this is a pop without a
-      // result.
-      if ((dst.is_reg(esp)) &&
-          (x.x_ == kPointerSize) && (x.rmode_ == RelocInfo::NONE)) {
-        pc_ = last_pc_;
-        last_pc_ = NULL;
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%d push/pop(noreg) eliminated\n", pc_offset());
-        }
-        return;
-      }
-    }
-  }
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_arith(0, dst, x);
 }
 
@@ -876,14 +708,12 @@
 
 void Assembler::and_(Register dst, const Immediate& x) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_arith(4, Operand(dst), x);
 }
 
 
 void Assembler::and_(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x23);
   emit_operand(dst, src);
 }
@@ -891,14 +721,12 @@
 
 void Assembler::and_(const Operand& dst, const Immediate& x) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_arith(4, dst, x);
 }
 
 
 void Assembler::and_(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x21);
   emit_operand(src, dst);
 }
@@ -906,7 +734,6 @@
 
 void Assembler::cmpb(const Operand& op, int8_t imm8) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x80);
   emit_operand(edi, op);  // edi == 7
   EMIT(imm8);
@@ -916,7 +743,6 @@
 void Assembler::cmpb(const Operand& dst, Register src) {
   ASSERT(src.is_byte_register());
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x38);
   emit_operand(src, dst);
 }
@@ -925,7 +751,6 @@
 void Assembler::cmpb(Register dst, const Operand& src) {
   ASSERT(dst.is_byte_register());
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x3A);
   emit_operand(dst, src);
 }
@@ -934,7 +759,6 @@
 void Assembler::cmpw(const Operand& op, Immediate imm16) {
   ASSERT(imm16.is_int16());
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x81);
   emit_operand(edi, op);
@@ -944,21 +768,18 @@
 
 void Assembler::cmp(Register reg, int32_t imm32) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_arith(7, Operand(reg), Immediate(imm32));
 }
 
 
 void Assembler::cmp(Register reg, Handle<Object> handle) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_arith(7, Operand(reg), Immediate(handle));
 }
 
 
 void Assembler::cmp(Register reg, const Operand& op) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x3B);
   emit_operand(reg, op);
 }
@@ -966,21 +787,18 @@
 
 void Assembler::cmp(const Operand& op, const Immediate& imm) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_arith(7, op, imm);
 }
 
 
 void Assembler::cmp(const Operand& op, Handle<Object> handle) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_arith(7, op, Immediate(handle));
 }
 
 
 void Assembler::cmpb_al(const Operand& op) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x38);  // CMP r/m8, r8
   emit_operand(eax, op);  // eax has same code as register al.
 }
@@ -988,7 +806,6 @@
 
 void Assembler::cmpw_ax(const Operand& op) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x39);  // CMP r/m16, r16
   emit_operand(eax, op);  // eax has same code as register ax.
@@ -997,7 +814,6 @@
 
 void Assembler::dec_b(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xFE);
   EMIT(0xC8 | dst.code());
 }
@@ -1005,7 +821,6 @@
 
 void Assembler::dec_b(const Operand& dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xFE);
   emit_operand(ecx, dst);
 }
@@ -1013,14 +828,12 @@
 
 void Assembler::dec(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x48 | dst.code());
 }
 
 
 void Assembler::dec(const Operand& dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xFF);
   emit_operand(ecx, dst);
 }
@@ -1028,14 +841,12 @@
 
 void Assembler::cdq() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x99);
 }
 
 
 void Assembler::idiv(Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF7);
   EMIT(0xF8 | src.code());
 }
@@ -1043,7 +854,6 @@
 
 void Assembler::imul(Register reg) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF7);
   EMIT(0xE8 | reg.code());
 }
@@ -1051,7 +861,6 @@
 
 void Assembler::imul(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0xAF);
   emit_operand(dst, src);
@@ -1060,7 +869,6 @@
 
 void Assembler::imul(Register dst, Register src, int32_t imm32) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (is_int8(imm32)) {
     EMIT(0x6B);
     EMIT(0xC0 | dst.code() << 3 | src.code());
@@ -1075,14 +883,12 @@
 
 void Assembler::inc(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x40 | dst.code());
 }
 
 
 void Assembler::inc(const Operand& dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xFF);
   emit_operand(eax, dst);
 }
@@ -1090,7 +896,6 @@
 
 void Assembler::lea(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x8D);
   emit_operand(dst, src);
 }
@@ -1098,7 +903,6 @@
 
 void Assembler::mul(Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF7);
   EMIT(0xE0 | src.code());
 }
@@ -1106,7 +910,6 @@
 
 void Assembler::neg(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF7);
   EMIT(0xD8 | dst.code());
 }
@@ -1114,7 +917,6 @@
 
 void Assembler::not_(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF7);
   EMIT(0xD0 | dst.code());
 }
@@ -1122,14 +924,12 @@
 
 void Assembler::or_(Register dst, int32_t imm32) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_arith(1, Operand(dst), Immediate(imm32));
 }
 
 
 void Assembler::or_(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0B);
   emit_operand(dst, src);
 }
@@ -1137,14 +937,12 @@
 
 void Assembler::or_(const Operand& dst, const Immediate& x) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_arith(1, dst, x);
 }
 
 
 void Assembler::or_(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x09);
   emit_operand(src, dst);
 }
@@ -1152,7 +950,6 @@
 
 void Assembler::rcl(Register dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(is_uint5(imm8));  // illegal shift count
   if (imm8 == 1) {
     EMIT(0xD1);
@@ -1167,7 +964,6 @@
 
 void Assembler::rcr(Register dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(is_uint5(imm8));  // illegal shift count
   if (imm8 == 1) {
     EMIT(0xD1);
@@ -1182,7 +978,6 @@
 
 void Assembler::sar(Register dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(is_uint5(imm8));  // illegal shift count
   if (imm8 == 1) {
     EMIT(0xD1);
@@ -1197,7 +992,6 @@
 
 void Assembler::sar_cl(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD3);
   EMIT(0xF8 | dst.code());
 }
@@ -1205,7 +999,6 @@
 
 void Assembler::sbb(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x1B);
   emit_operand(dst, src);
 }
@@ -1213,7 +1006,6 @@
 
 void Assembler::shld(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0xA5);
   emit_operand(dst, src);
@@ -1222,7 +1014,6 @@
 
 void Assembler::shl(Register dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(is_uint5(imm8));  // illegal shift count
   if (imm8 == 1) {
     EMIT(0xD1);
@@ -1237,7 +1028,6 @@
 
 void Assembler::shl_cl(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD3);
   EMIT(0xE0 | dst.code());
 }
@@ -1245,7 +1035,6 @@
 
 void Assembler::shrd(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0xAD);
   emit_operand(dst, src);
@@ -1254,7 +1043,6 @@
 
 void Assembler::shr(Register dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(is_uint5(imm8));  // illegal shift count
   if (imm8 == 1) {
     EMIT(0xD1);
@@ -1269,7 +1057,6 @@
 
 void Assembler::shr_cl(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD3);
   EMIT(0xE8 | dst.code());
 }
@@ -1277,7 +1064,6 @@
 
 void Assembler::subb(const Operand& op, int8_t imm8) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (op.is_reg(eax)) {
     EMIT(0x2c);
   } else {
@@ -1290,14 +1076,12 @@
 
 void Assembler::sub(const Operand& dst, const Immediate& x) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_arith(5, dst, x);
 }
 
 
 void Assembler::sub(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x2B);
   emit_operand(dst, src);
 }
@@ -1306,7 +1090,6 @@
 void Assembler::subb(Register dst, const Operand& src) {
   ASSERT(dst.code() < 4);
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x2A);
   emit_operand(dst, src);
 }
@@ -1314,7 +1097,6 @@
 
 void Assembler::sub(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x29);
   emit_operand(src, dst);
 }
@@ -1322,7 +1104,6 @@
 
 void Assembler::test(Register reg, const Immediate& imm) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // Only use test against byte for registers that have a byte
   // variant: eax, ebx, ecx, and edx.
   if (imm.rmode_ == RelocInfo::NONE && is_uint8(imm.x_) && reg.code() < 4) {
@@ -1349,7 +1130,6 @@
 
 void Assembler::test(Register reg, const Operand& op) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x85);
   emit_operand(reg, op);
 }
@@ -1357,7 +1137,6 @@
 
 void Assembler::test_b(Register reg, const Operand& op) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x84);
   emit_operand(reg, op);
 }
@@ -1365,7 +1144,6 @@
 
 void Assembler::test(const Operand& op, const Immediate& imm) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF7);
   emit_operand(eax, op);
   emit(imm);
@@ -1374,7 +1152,6 @@
 
 void Assembler::test_b(const Operand& op, uint8_t imm8) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF6);
   emit_operand(eax, op);
   EMIT(imm8);
@@ -1383,14 +1160,12 @@
 
 void Assembler::xor_(Register dst, int32_t imm32) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_arith(6, Operand(dst), Immediate(imm32));
 }
 
 
 void Assembler::xor_(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x33);
   emit_operand(dst, src);
 }
@@ -1398,7 +1173,6 @@
 
 void Assembler::xor_(const Operand& src, Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x31);
   emit_operand(dst, src);
 }
@@ -1406,14 +1180,12 @@
 
 void Assembler::xor_(const Operand& dst, const Immediate& x) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_arith(6, dst, x);
 }
 
 
 void Assembler::bt(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0xA3);
   emit_operand(src, dst);
@@ -1422,7 +1194,6 @@
 
 void Assembler::bts(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0xAB);
   emit_operand(src, dst);
@@ -1431,21 +1202,18 @@
 
 void Assembler::hlt() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF4);
 }
 
 
 void Assembler::int3() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xCC);
 }
 
 
 void Assembler::nop() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x90);
 }
 
@@ -1453,7 +1221,6 @@
 void Assembler::rdtsc() {
   ASSERT(CpuFeatures::IsEnabled(RDTSC));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0x31);
 }
@@ -1461,7 +1228,6 @@
 
 void Assembler::ret(int imm16) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(is_uint16(imm16));
   if (imm16 == 0) {
     EMIT(0xC3);
@@ -1507,7 +1273,6 @@
 
 void Assembler::bind_to(Label* L, int pos) {
   EnsureSpace ensure_space(this);
-  last_pc_ = NULL;
   ASSERT(0 <= pos && pos <= pc_offset());  // must have a valid binding position
   while (L->is_linked()) {
     Displacement disp = disp_at(L);
@@ -1525,36 +1290,35 @@
     }
     disp.next(L);
   }
+  while (L->is_near_linked()) {
+    int fixup_pos = L->near_link_pos();
+    int offset_to_next =
+        static_cast<int>(*reinterpret_cast<int8_t*>(addr_at(fixup_pos)));
+    ASSERT(offset_to_next <= 0);
+    // Relative address, relative to point after address.
+    int disp = pos - fixup_pos - sizeof(int8_t);
+    ASSERT(0 <= disp && disp <= 127);
+    set_byte_at(fixup_pos, disp);
+    if (offset_to_next < 0) {
+      L->link_to(fixup_pos + offset_to_next, Label::kNear);
+    } else {
+      L->UnuseNear();
+    }
+  }
   L->bind_to(pos);
 }
 
 
 void Assembler::bind(Label* L) {
   EnsureSpace ensure_space(this);
-  last_pc_ = NULL;
   ASSERT(!L->is_bound());  // label can only be bound once
   bind_to(L, pc_offset());
 }
 
 
-void Assembler::bind(NearLabel* L) {
-  ASSERT(!L->is_bound());
-  last_pc_ = NULL;
-  while (L->unresolved_branches_ > 0) {
-    int branch_pos = L->unresolved_positions_[L->unresolved_branches_ - 1];
-    int disp = pc_offset() - branch_pos;
-    ASSERT(is_int8(disp));
-    set_byte_at(branch_pos - sizeof(int8_t), disp);
-    L->unresolved_branches_--;
-  }
-  L->bind_to(pc_offset());
-}
-
-
 void Assembler::call(Label* L) {
   positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (L->is_bound()) {
     const int long_size = 5;
     int offs = L->pos() - pc_offset();
@@ -1573,35 +1337,44 @@
 void Assembler::call(byte* entry, RelocInfo::Mode rmode) {
   positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(!RelocInfo::IsCodeTarget(rmode));
   EMIT(0xE8);
   emit(entry - (pc_ + sizeof(int32_t)), rmode);
 }
 
 
+int Assembler::CallSize(const Operand& adr) {
+  // Call size is 1 (opcode) + adr.len_ (operand).
+  return 1 + adr.len_;
+}
+
+
 void Assembler::call(const Operand& adr) {
   positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xFF);
   emit_operand(edx, adr);
 }
 
 
-void Assembler::call(Handle<Code> code, RelocInfo::Mode rmode) {
-  positions_recorder()->WriteRecordedPositions();
-  EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
-  ASSERT(RelocInfo::IsCodeTarget(rmode));
-  EMIT(0xE8);
-  emit(reinterpret_cast<intptr_t>(code.location()), rmode);
+int Assembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
+  return 1 /* EMIT */ + sizeof(uint32_t) /* emit */;
 }
 
 
-void Assembler::jmp(Label* L) {
+void Assembler::call(Handle<Code> code,
+                     RelocInfo::Mode rmode,
+                     unsigned ast_id) {
+  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
+  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  EMIT(0xE8);
+  emit(reinterpret_cast<intptr_t>(code.location()), rmode, ast_id);
+}
+
+
+void Assembler::jmp(Label* L, Label::Distance distance) {
+  EnsureSpace ensure_space(this);
   if (L->is_bound()) {
     const int short_size = 2;
     const int long_size  = 5;
@@ -1616,6 +1389,9 @@
       EMIT(0xE9);
       emit(offs - long_size);
     }
+  } else if (distance == Label::kNear) {
+    EMIT(0xEB);
+    emit_near_disp(L);
   } else {
     // 1110 1001 #32-bit disp.
     EMIT(0xE9);
@@ -1626,7 +1402,6 @@
 
 void Assembler::jmp(byte* entry, RelocInfo::Mode rmode) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(!RelocInfo::IsCodeTarget(rmode));
   EMIT(0xE9);
   emit(entry - (pc_ + sizeof(int32_t)), rmode);
@@ -1635,7 +1410,6 @@
 
 void Assembler::jmp(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xFF);
   emit_operand(esp, adr);
 }
@@ -1643,37 +1417,15 @@
 
 void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(RelocInfo::IsCodeTarget(rmode));
   EMIT(0xE9);
   emit(reinterpret_cast<intptr_t>(code.location()), rmode);
 }
 
 
-void Assembler::jmp(NearLabel* L) {
+void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
-  if (L->is_bound()) {
-    const int short_size = 2;
-    int offs = L->pos() - pc_offset();
-    ASSERT(offs <= 0);
-    ASSERT(is_int8(offs - short_size));
-    // 1110 1011 #8-bit disp.
-    EMIT(0xEB);
-    EMIT((offs - short_size) & 0xFF);
-  } else {
-    EMIT(0xEB);
-    EMIT(0x00);      // The displacement will be resolved later.
-    L->link_to(pc_offset());
-  }
-}
-
-
-void Assembler::j(Condition cc, Label* L, Hint hint) {
-  EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(0 <= cc && cc < 16);
-  if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
   if (L->is_bound()) {
     const int short_size = 2;
     const int long_size  = 6;
@@ -1689,6 +1441,9 @@
       EMIT(0x80 | cc);
       emit(offs - long_size);
     }
+  } else if (distance == Label::kNear) {
+    EMIT(0x70 | cc);
+    emit_near_disp(L);
   } else {
     // 0000 1111 1000 tttn #32-bit disp
     // Note: could eliminate cond. jumps to this jump if condition
@@ -1700,11 +1455,9 @@
 }
 
 
-void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode, Hint hint) {
+void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT((0 <= cc) && (cc < 16));
-  if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
   // 0000 1111 1000 tttn #32-bit disp.
   EMIT(0x0F);
   EMIT(0x80 | cc);
@@ -1712,10 +1465,8 @@
 }
 
 
-void Assembler::j(Condition cc, Handle<Code> code, Hint hint) {
+void Assembler::j(Condition cc, Handle<Code> code) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
-  if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
   // 0000 1111 1000 tttn #32-bit disp
   EMIT(0x0F);
   EMIT(0x80 | cc);
@@ -1723,46 +1474,22 @@
 }
 
 
-void Assembler::j(Condition cc, NearLabel* L, Hint hint) {
-  EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
-  ASSERT(0 <= cc && cc < 16);
-  if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
-  if (L->is_bound()) {
-    const int short_size = 2;
-    int offs = L->pos() - pc_offset();
-    ASSERT(offs <= 0);
-    ASSERT(is_int8(offs - short_size));
-    // 0111 tttn #8-bit disp
-    EMIT(0x70 | cc);
-    EMIT((offs - short_size) & 0xFF);
-  } else {
-    EMIT(0x70 | cc);
-    EMIT(0x00);      // The displacement will be resolved later.
-    L->link_to(pc_offset());
-  }
-}
-
-
 // FPU instructions.
 
 void Assembler::fld(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xD9, 0xC0, i);
 }
 
 
 void Assembler::fstp(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDD, 0xD8, i);
 }
 
 
 void Assembler::fld1() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xE8);
 }
@@ -1770,7 +1497,6 @@
 
 void Assembler::fldpi() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xEB);
 }
@@ -1778,7 +1504,6 @@
 
 void Assembler::fldz() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xEE);
 }
@@ -1786,7 +1511,6 @@
 
 void Assembler::fldln2() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xED);
 }
@@ -1794,7 +1518,6 @@
 
 void Assembler::fld_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   emit_operand(eax, adr);
 }
@@ -1802,7 +1525,6 @@
 
 void Assembler::fld_d(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDD);
   emit_operand(eax, adr);
 }
@@ -1810,7 +1532,6 @@
 
 void Assembler::fstp_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   emit_operand(ebx, adr);
 }
@@ -1818,7 +1539,6 @@
 
 void Assembler::fstp_d(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDD);
   emit_operand(ebx, adr);
 }
@@ -1826,7 +1546,6 @@
 
 void Assembler::fst_d(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDD);
   emit_operand(edx, adr);
 }
@@ -1834,7 +1553,6 @@
 
 void Assembler::fild_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDB);
   emit_operand(eax, adr);
 }
@@ -1842,7 +1560,6 @@
 
 void Assembler::fild_d(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDF);
   emit_operand(ebp, adr);
 }
@@ -1850,7 +1567,6 @@
 
 void Assembler::fistp_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDB);
   emit_operand(ebx, adr);
 }
@@ -1859,7 +1575,6 @@
 void Assembler::fisttp_s(const Operand& adr) {
   ASSERT(CpuFeatures::IsEnabled(SSE3));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDB);
   emit_operand(ecx, adr);
 }
@@ -1868,7 +1583,6 @@
 void Assembler::fisttp_d(const Operand& adr) {
   ASSERT(CpuFeatures::IsEnabled(SSE3));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDD);
   emit_operand(ecx, adr);
 }
@@ -1876,7 +1590,6 @@
 
 void Assembler::fist_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDB);
   emit_operand(edx, adr);
 }
@@ -1884,7 +1597,6 @@
 
 void Assembler::fistp_d(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDF);
   emit_operand(edi, adr);
 }
@@ -1892,7 +1604,6 @@
 
 void Assembler::fabs() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xE1);
 }
@@ -1900,7 +1611,6 @@
 
 void Assembler::fchs() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xE0);
 }
@@ -1908,7 +1618,6 @@
 
 void Assembler::fcos() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xFF);
 }
@@ -1916,7 +1625,6 @@
 
 void Assembler::fsin() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xFE);
 }
@@ -1924,7 +1632,6 @@
 
 void Assembler::fyl2x() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xF1);
 }
@@ -1932,21 +1639,18 @@
 
 void Assembler::fadd(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDC, 0xC0, i);
 }
 
 
 void Assembler::fsub(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDC, 0xE8, i);
 }
 
 
 void Assembler::fisub_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDA);
   emit_operand(esp, adr);
 }
@@ -1954,56 +1658,48 @@
 
 void Assembler::fmul(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDC, 0xC8, i);
 }
 
 
 void Assembler::fdiv(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDC, 0xF8, i);
 }
 
 
 void Assembler::faddp(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDE, 0xC0, i);
 }
 
 
 void Assembler::fsubp(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDE, 0xE8, i);
 }
 
 
 void Assembler::fsubrp(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDE, 0xE0, i);
 }
 
 
 void Assembler::fmulp(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDE, 0xC8, i);
 }
 
 
 void Assembler::fdivp(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDE, 0xF8, i);
 }
 
 
 void Assembler::fprem() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xF8);
 }
@@ -2011,7 +1707,6 @@
 
 void Assembler::fprem1() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xF5);
 }
@@ -2019,14 +1714,12 @@
 
 void Assembler::fxch(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xD9, 0xC8, i);
 }
 
 
 void Assembler::fincstp() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xF7);
 }
@@ -2034,14 +1727,12 @@
 
 void Assembler::ffree(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDD, 0xC0, i);
 }
 
 
 void Assembler::ftst() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xE4);
 }
@@ -2049,14 +1740,12 @@
 
 void Assembler::fucomp(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDD, 0xE8, i);
 }
 
 
 void Assembler::fucompp() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDA);
   EMIT(0xE9);
 }
@@ -2064,7 +1753,6 @@
 
 void Assembler::fucomi(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDB);
   EMIT(0xE8 + i);
 }
@@ -2072,7 +1760,6 @@
 
 void Assembler::fucomip() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDF);
   EMIT(0xE9);
 }
@@ -2080,7 +1767,6 @@
 
 void Assembler::fcompp() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDE);
   EMIT(0xD9);
 }
@@ -2088,7 +1774,6 @@
 
 void Assembler::fnstsw_ax() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDF);
   EMIT(0xE0);
 }
@@ -2096,14 +1781,12 @@
 
 void Assembler::fwait() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x9B);
 }
 
 
 void Assembler::frndint() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xD9);
   EMIT(0xFC);
 }
@@ -2111,7 +1794,6 @@
 
 void Assembler::fnclex() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xDB);
   EMIT(0xE2);
 }
@@ -2119,7 +1801,6 @@
 
 void Assembler::sahf() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x9E);
 }
 
@@ -2127,7 +1808,6 @@
 void Assembler::setcc(Condition cc, Register reg) {
   ASSERT(reg.is_byte_register());
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0x90 | cc);
   EMIT(0xC0 | reg.code());
@@ -2137,7 +1817,6 @@
 void Assembler::cvttss2si(Register dst, const Operand& src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF3);
   EMIT(0x0F);
   EMIT(0x2C);
@@ -2148,7 +1827,6 @@
 void Assembler::cvttsd2si(Register dst, const Operand& src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF2);
   EMIT(0x0F);
   EMIT(0x2C);
@@ -2159,7 +1837,6 @@
 void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF2);
   EMIT(0x0F);
   EMIT(0x2A);
@@ -2170,7 +1847,6 @@
 void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF3);
   EMIT(0x0F);
   EMIT(0x5A);
@@ -2181,7 +1857,6 @@
 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF2);
   EMIT(0x0F);
   EMIT(0x5A);
@@ -2192,7 +1867,6 @@
 void Assembler::addsd(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF2);
   EMIT(0x0F);
   EMIT(0x58);
@@ -2203,7 +1877,6 @@
 void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF2);
   EMIT(0x0F);
   EMIT(0x59);
@@ -2214,7 +1887,6 @@
 void Assembler::subsd(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF2);
   EMIT(0x0F);
   EMIT(0x5C);
@@ -2225,7 +1897,6 @@
 void Assembler::divsd(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF2);
   EMIT(0x0F);
   EMIT(0x5E);
@@ -2236,7 +1907,6 @@
 void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x57);
@@ -2244,9 +1914,16 @@
 }
 
 
+void Assembler::xorps(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x0F);
+  EMIT(0x57);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF2);
   EMIT(0x0F);
   EMIT(0x51);
@@ -2256,7 +1933,6 @@
 
 void Assembler::andpd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x54);
@@ -2267,7 +1943,6 @@
 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x2E);
@@ -2278,7 +1953,6 @@
 void Assembler::movmskpd(Register dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x50);
@@ -2289,7 +1963,6 @@
 void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF2);
   EMIT(0x0F);
   EMIT(0xC2);
@@ -2301,7 +1974,6 @@
 void Assembler::movaps(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0x28);
   emit_sse_operand(dst, src);
@@ -2311,7 +1983,6 @@
 void Assembler::movdqa(const Operand& dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x7F);
@@ -2322,7 +1993,6 @@
 void Assembler::movdqa(XMMRegister dst, const Operand& src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x6F);
@@ -2333,7 +2003,6 @@
 void Assembler::movdqu(const Operand& dst, XMMRegister src ) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF3);
   EMIT(0x0F);
   EMIT(0x7F);
@@ -2344,7 +2013,6 @@
 void Assembler::movdqu(XMMRegister dst, const Operand& src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF3);
   EMIT(0x0F);
   EMIT(0x6F);
@@ -2355,7 +2023,6 @@
 void Assembler::movntdqa(XMMRegister dst, const Operand& src) {
   ASSERT(CpuFeatures::IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x38);
@@ -2367,7 +2034,6 @@
 void Assembler::movntdq(const Operand& dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0xE7);
@@ -2378,7 +2044,6 @@
 void Assembler::prefetch(const Operand& src, int level) {
   ASSERT(is_uint2(level));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x0F);
   EMIT(0x18);
   XMMRegister code = { level };  // Emit hint number in Reg position of RegR/M.
@@ -2388,14 +2053,12 @@
 
 void Assembler::movdbl(XMMRegister dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   movsd(dst, src);
 }
 
 
 void Assembler::movdbl(const Operand& dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   movsd(dst, src);
 }
 
@@ -2403,7 +2066,6 @@
 void Assembler::movsd(const Operand& dst, XMMRegister src ) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF2);  // double
   EMIT(0x0F);
   EMIT(0x11);  // store
@@ -2414,7 +2076,6 @@
 void Assembler::movsd(XMMRegister dst, const Operand& src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF2);  // double
   EMIT(0x0F);
   EMIT(0x10);  // load
@@ -2425,7 +2086,6 @@
 void Assembler::movsd(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF2);
   EMIT(0x0F);
   EMIT(0x10);
@@ -2436,7 +2096,6 @@
 void Assembler::movss(const Operand& dst, XMMRegister src ) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF3);  // float
   EMIT(0x0F);
   EMIT(0x11);  // store
@@ -2447,7 +2106,6 @@
 void Assembler::movss(XMMRegister dst, const Operand& src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF3);  // float
   EMIT(0x0F);
   EMIT(0x10);  // load
@@ -2458,7 +2116,6 @@
 void Assembler::movss(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0xF3);
   EMIT(0x0F);
   EMIT(0x10);
@@ -2469,7 +2126,6 @@
 void Assembler::movd(XMMRegister dst, const Operand& src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x6E);
@@ -2480,7 +2136,6 @@
 void Assembler::movd(const Operand& dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x7E);
@@ -2491,7 +2146,6 @@
 void Assembler::pand(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0xDB);
@@ -2502,7 +2156,6 @@
 void Assembler::pxor(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0xEF);
@@ -2513,7 +2166,6 @@
 void Assembler::por(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0xEB);
@@ -2524,7 +2176,6 @@
 void Assembler::ptest(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x38);
@@ -2536,7 +2187,6 @@
 void Assembler::psllq(XMMRegister reg, int8_t shift) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x73);
@@ -2548,7 +2198,6 @@
 void Assembler::psllq(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0xF3);
@@ -2559,7 +2208,6 @@
 void Assembler::psrlq(XMMRegister reg, int8_t shift) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x73);
@@ -2571,7 +2219,6 @@
 void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0xD3);
@@ -2582,7 +2229,6 @@
 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x70);
@@ -2594,7 +2240,6 @@
 void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
   ASSERT(CpuFeatures::IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x3A);
@@ -2607,7 +2252,6 @@
 void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t offset) {
   ASSERT(CpuFeatures::IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   EMIT(0x66);
   EMIT(0x0F);
   EMIT(0x3A);
@@ -2706,9 +2350,6 @@
   buffer_ = desc.buffer;
   buffer_size_ = desc.buffer_size;
   pc_ += pc_delta;
-  if (last_pc_ != NULL) {
-    last_pc_ += pc_delta;
-  }
   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
                                reloc_info_writer.last_pc() + pc_delta);
 
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 46fda3b..e933102 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -249,23 +249,6 @@
 }
 
 
-enum Hint {
-  no_hint = 0,
-  not_taken = 0x2e,
-  taken = 0x3e
-};
-
-
-// The result of negating a hint is as if the corresponding condition
-// were negated by NegateCondition.  That is, no_hint is mapped to
-// itself and not_taken and taken are mapped to each other.
-inline Hint NegateHint(Hint hint) {
-  return (hint == no_hint)
-      ? no_hint
-      : ((hint == not_taken) ? taken : not_taken);
-}
-
-
 // -----------------------------------------------------------------------------
 // Machine instruction Immediates
 
@@ -843,30 +826,30 @@
   // but it may be bound only once.
 
   void bind(Label* L);  // binds an unbound label L to the current code position
-  void bind(NearLabel* L);
 
   // Calls
   void call(Label* L);
   void call(byte* entry, RelocInfo::Mode rmode);
+  int CallSize(const Operand& adr);
   void call(const Operand& adr);
-  void call(Handle<Code> code, RelocInfo::Mode rmode);
+  int CallSize(Handle<Code> code, RelocInfo::Mode mode);
+  void call(Handle<Code> code,
+            RelocInfo::Mode rmode,
+            unsigned ast_id = kNoASTId);
 
   // Jumps
-  void jmp(Label* L);  // unconditional jump to L
+  // unconditional jump to L
+  void jmp(Label* L, Label::Distance distance = Label::kFar);
   void jmp(byte* entry, RelocInfo::Mode rmode);
   void jmp(const Operand& adr);
   void jmp(Handle<Code> code, RelocInfo::Mode rmode);
 
-  // Short jump
-  void jmp(NearLabel* L);
-
   // Conditional jumps
-  void j(Condition cc, Label* L, Hint hint = no_hint);
-  void j(Condition cc, byte* entry, RelocInfo::Mode rmode, Hint hint = no_hint);
-  void j(Condition cc, Handle<Code> code, Hint hint = no_hint);
-
-  // Conditional short jump
-  void j(Condition cc, NearLabel* L, Hint hint = no_hint);
+  void j(Condition cc,
+         Label* L,
+         Label::Distance distance = Label::kFar);
+  void j(Condition cc, byte* entry, RelocInfo::Mode rmode);
+  void j(Condition cc, Handle<Code> code);
 
   // Floating-point operations
   void fld(int i);
@@ -951,6 +934,7 @@
   void mulsd(XMMRegister dst, XMMRegister src);
   void divsd(XMMRegister dst, XMMRegister src);
   void xorpd(XMMRegister dst, XMMRegister src);
+  void xorps(XMMRegister dst, XMMRegister src);
   void sqrtsd(XMMRegister dst, XMMRegister src);
 
   void andpd(XMMRegister dst, XMMRegister src);
@@ -1071,7 +1055,9 @@
   void GrowBuffer();
   inline void emit(uint32_t x);
   inline void emit(Handle<Object> handle);
-  inline void emit(uint32_t x, RelocInfo::Mode rmode);
+  inline void emit(uint32_t x,
+                   RelocInfo::Mode rmode,
+                   unsigned ast_id = kNoASTId);
   inline void emit(const Immediate& x);
   inline void emit_w(const Immediate& x);
 
@@ -1099,6 +1085,7 @@
   inline Displacement disp_at(Label* L);
   inline void disp_at_put(Label* L, Displacement disp);
   inline void emit_disp(Label* L, Displacement::Type type);
+  inline void emit_near_disp(Label* L);
 
   // record reloc info for current pc_
   void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
@@ -1117,9 +1104,6 @@
   byte* pc_;  // the program counter; moves forward
   RelocInfoWriter reloc_info_writer;
 
-  // push-pop elimination
-  byte* last_pc_;
-
   PositionsRecorder positions_recorder_;
 
   bool emit_debug_code_;
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 29c67b5..1212566 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -102,6 +102,7 @@
   __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
   Handle<Code> arguments_adaptor =
       masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+  __ SetCallKind(ecx, CALL_AS_METHOD);
   __ jmp(arguments_adaptor, RelocInfo::CODE_TARGET);
 }
 
@@ -339,11 +340,12 @@
     Handle<Code> code =
         masm->isolate()->builtins()->HandleApiCallConstruct();
     ParameterCount expected(0);
-    __ InvokeCode(code, expected, expected,
-                  RelocInfo::CODE_TARGET, CALL_FUNCTION);
+    __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
+                  CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
   } else {
     ParameterCount actual(eax);
-    __ InvokeFunction(edi, actual, CALL_FUNCTION);
+    __ InvokeFunction(edi, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
   }
 
   // Restore context from the frame.
@@ -356,12 +358,12 @@
 
   // If the result is a smi, it is *not* an object in the ECMA sense.
   __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &use_receiver, not_taken);
+  __ j(zero, &use_receiver);
 
   // If the type of the result (stored in its map) is less than
   // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
   __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
-  __ j(above_equal, &exit, not_taken);
+  __ j(above_equal, &exit);
 
   // Throw away the result of the constructor invocation and use the
   // on-stack receiver as the result.
@@ -442,7 +444,8 @@
             RelocInfo::CODE_TARGET);
   } else {
     ParameterCount actual(eax);
-    __ InvokeFunction(edi, actual, CALL_FUNCTION);
+    __ InvokeFunction(edi, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
   }
 
   // Exit the JS frame. Notice that this also removes the empty
@@ -467,19 +470,25 @@
   // Enter an internal frame.
   __ EnterInternalFrame();
 
-  // Push a copy of the function onto the stack.
+  // Push a copy of the function.
   __ push(edi);
+  // Push call kind information.
+  __ push(ecx);
 
   __ push(edi);  // Function is also the parameter to the runtime call.
   __ CallRuntime(Runtime::kLazyCompile, 1);
+
+  // Restore call kind information.
+  __ pop(ecx);
+  // Restore receiver.
   __ pop(edi);
 
   // Tear down temporary frame.
   __ LeaveInternalFrame();
 
   // Do a tail-call of the compiled function.
-  __ lea(ecx, FieldOperand(eax, Code::kHeaderSize));
-  __ jmp(Operand(ecx));
+  __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
+  __ jmp(Operand(eax));
 }
 
 
@@ -489,17 +498,23 @@
 
   // Push a copy of the function onto the stack.
   __ push(edi);
+  // Push call kind information.
+  __ push(ecx);
 
   __ push(edi);  // Function is also the parameter to the runtime call.
   __ CallRuntime(Runtime::kLazyRecompile, 1);
 
-  // Restore function and tear down temporary frame.
+  // Restore call kind information.
+  __ pop(ecx);
+  // Restore receiver.
   __ pop(edi);
+
+  // Tear down temporary frame.
   __ LeaveInternalFrame();
 
   // Do a tail-call of the compiled function.
-  __ lea(ecx, FieldOperand(eax, Code::kHeaderSize));
-  __ jmp(Operand(ecx));
+  __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
+  __ jmp(Operand(eax));
 }
 
 
@@ -520,15 +535,15 @@
   __ SmiUntag(ecx);
 
   // Switch on the state.
-  NearLabel not_no_registers, not_tos_eax;
+  Label not_no_registers, not_tos_eax;
   __ cmp(ecx, FullCodeGenerator::NO_REGISTERS);
-  __ j(not_equal, &not_no_registers);
+  __ j(not_equal, &not_no_registers, Label::kNear);
   __ ret(1 * kPointerSize);  // Remove state.
 
   __ bind(&not_no_registers);
   __ mov(eax, Operand(esp, 2 * kPointerSize));
   __ cmp(ecx, FullCodeGenerator::TOS_REG);
-  __ j(not_equal, &not_tos_eax);
+  __ j(not_equal, &not_tos_eax, Label::kNear);
   __ ret(2 * kPointerSize);  // Remove state, eax.
 
   __ bind(&not_tos_eax);
@@ -568,7 +583,7 @@
   // 1. Make sure we have at least one argument.
   { Label done;
     __ test(eax, Operand(eax));
-    __ j(not_zero, &done, taken);
+    __ j(not_zero, &done);
     __ pop(ebx);
     __ push(Immediate(factory->undefined_value()));
     __ push(ebx);
@@ -582,9 +597,9 @@
   // 1 ~ return address.
   __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
   __ test(edi, Immediate(kSmiTagMask));
-  __ j(zero, &non_function, not_taken);
+  __ j(zero, &non_function);
   __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-  __ j(not_equal, &non_function, not_taken);
+  __ j(not_equal, &non_function);
 
 
   // 3a. Patch the first argument if necessary when calling a function.
@@ -599,22 +614,26 @@
               1 << SharedFunctionInfo::kStrictModeBitWithinByte);
     __ j(not_equal, &shift_arguments);
 
+    // Do not transform the receiver for natives (shared already in ebx).
+    __ test_b(FieldOperand(ebx, SharedFunctionInfo::kES5NativeByteOffset),
+              1 << SharedFunctionInfo::kES5NativeBitWithinByte);
+    __ j(not_equal, &shift_arguments);
+
     // Compute the receiver in non-strict mode.
     __ mov(ebx, Operand(esp, eax, times_4, 0));  // First argument.
+
+    // Call ToObject on the receiver if it is not an object, or use the
+    // global object if it is null or undefined.
     __ test(ebx, Immediate(kSmiTagMask));
     __ j(zero, &convert_to_object);
-
     __ cmp(ebx, factory->null_value());
     __ j(equal, &use_global_receiver);
     __ cmp(ebx, factory->undefined_value());
     __ j(equal, &use_global_receiver);
-
-    // We don't use IsObjectJSObjectType here because we jump on success.
-    __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
-    __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-    __ sub(Operand(ecx), Immediate(FIRST_JS_OBJECT_TYPE));
-    __ cmp(ecx, LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
-    __ j(below_equal, &shift_arguments);
+    STATIC_ASSERT(LAST_JS_OBJECT_TYPE + 1 == LAST_TYPE);
+    STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+    __ CmpObjectType(ebx, FIRST_JS_OBJECT_TYPE, ecx);
+    __ j(above_equal, &shift_arguments);
 
     __ bind(&convert_to_object);
     __ EnterInternalFrame();  // In order to preserve argument count.
@@ -675,9 +694,10 @@
   // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
   { Label function;
     __ test(edi, Operand(edi));
-    __ j(not_zero, &function, taken);
+    __ j(not_zero, &function);
     __ Set(ebx, Immediate(0));
     __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
+    __ SetCallKind(ecx, CALL_AS_METHOD);
     __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
            RelocInfo::CODE_TARGET);
     __ bind(&function);
@@ -691,12 +711,14 @@
          FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
   __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
   __ SmiUntag(ebx);
+  __ SetCallKind(ecx, CALL_AS_METHOD);
   __ cmp(eax, Operand(ebx));
   __ j(not_equal,
        masm->isolate()->builtins()->ArgumentsAdaptorTrampoline());
 
   ParameterCount expected(0);
-  __ InvokeCode(Operand(edx), expected, expected, JUMP_FUNCTION);
+  __ InvokeCode(Operand(edx), expected, expected, JUMP_FUNCTION,
+                NullCallWrapper(), CALL_AS_METHOD);
 }
 
 
@@ -724,7 +746,7 @@
   __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
   // Check if the arguments will overflow the stack.
   __ cmp(ecx, Operand(edx));
-  __ j(greater, &okay, taken);  // Signed comparison.
+  __ j(greater, &okay);  // Signed comparison.
 
   // Out of stack space.
   __ push(Operand(ebp, 4 * kPointerSize));  // push this
@@ -755,25 +777,27 @@
             1 << SharedFunctionInfo::kStrictModeBitWithinByte);
   __ j(not_equal, &push_receiver);
 
+  Factory* factory = masm->isolate()->factory();
+
+  // Do not transform the receiver for natives (shared already in ecx).
+  __ test_b(FieldOperand(ecx, SharedFunctionInfo::kES5NativeByteOffset),
+            1 << SharedFunctionInfo::kES5NativeBitWithinByte);
+  __ j(not_equal, &push_receiver);
+
   // Compute the receiver in non-strict mode.
+  // Call ToObject on the receiver if it is not an object, or use the
+  // global object if it is null or undefined.
   __ test(ebx, Immediate(kSmiTagMask));
   __ j(zero, &call_to_object);
-  Factory* factory = masm->isolate()->factory();
   __ cmp(ebx, factory->null_value());
   __ j(equal, &use_global_receiver);
   __ cmp(ebx, factory->undefined_value());
   __ j(equal, &use_global_receiver);
+  STATIC_ASSERT(LAST_JS_OBJECT_TYPE + 1 == LAST_TYPE);
+  STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+  __ CmpObjectType(ebx, FIRST_JS_OBJECT_TYPE, ecx);
+  __ j(above_equal, &push_receiver);
 
-  // If given receiver is already a JavaScript object then there's no
-  // reason for converting it.
-  // We don't use IsObjectJSObjectType here because we jump on success.
-  __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
-  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-  __ sub(Operand(ecx), Immediate(FIRST_JS_OBJECT_TYPE));
-  __ cmp(ecx, LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
-  __ j(below_equal, &push_receiver);
-
-  // Convert the receiver to an object.
   __ bind(&call_to_object);
   __ push(ebx);
   __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
@@ -824,7 +848,8 @@
   ParameterCount actual(eax);
   __ SmiUntag(eax);
   __ mov(edi, Operand(ebp, 4 * kPointerSize));
-  __ InvokeFunction(edi, actual, CALL_FUNCTION);
+  __ InvokeFunction(edi, actual, CALL_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
 
   __ LeaveInternalFrame();
   __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
@@ -1418,12 +1443,12 @@
   // Push the function on the stack.
   __ push(edi);
 
-  // Preserve the number of arguments on the stack. Must preserve both
-  // eax and ebx because these registers are used when copying the
+  // Preserve the number of arguments on the stack. Must preserve eax,
+  // ebx and ecx because these registers are used when copying the
   // arguments and the receiver.
   ASSERT(kSmiTagSize == 1);
-  __ lea(ecx, Operand(eax, eax, times_1, kSmiTag));
-  __ push(ecx);
+  __ lea(edi, Operand(eax, eax, times_1, kSmiTag));
+  __ push(edi);
 }
 
 
@@ -1446,6 +1471,7 @@
   // ----------- S t a t e -------------
   //  -- eax : actual number of arguments
   //  -- ebx : expected number of arguments
+  //  -- ecx : call kind information
   //  -- edx : code entry to call
   // -----------------------------------
 
@@ -1465,14 +1491,14 @@
     // Copy receiver and all expected arguments.
     const int offset = StandardFrameConstants::kCallerSPOffset;
     __ lea(eax, Operand(ebp, eax, times_4, offset));
-    __ mov(ecx, -1);  // account for receiver
+    __ mov(edi, -1);  // account for receiver
 
     Label copy;
     __ bind(&copy);
-    __ inc(ecx);
+    __ inc(edi);
     __ push(Operand(eax, 0));
     __ sub(Operand(eax), Immediate(kPointerSize));
-    __ cmp(ecx, Operand(ebx));
+    __ cmp(edi, Operand(ebx));
     __ j(less, &copy);
     __ jmp(&invoke);
   }
@@ -1484,30 +1510,33 @@
     // Copy receiver and all actual arguments.
     const int offset = StandardFrameConstants::kCallerSPOffset;
     __ lea(edi, Operand(ebp, eax, times_4, offset));
-    __ mov(ecx, -1);  // account for receiver
+    // ebx = expected - actual.
+    __ sub(ebx, Operand(eax));
+    // eax = -actual - 1
+    __ neg(eax);
+    __ sub(Operand(eax), Immediate(1));
 
     Label copy;
     __ bind(&copy);
-    __ inc(ecx);
+    __ inc(eax);
     __ push(Operand(edi, 0));
     __ sub(Operand(edi), Immediate(kPointerSize));
-    __ cmp(ecx, Operand(eax));
-    __ j(less, &copy);
+    __ test(eax, Operand(eax));
+    __ j(not_zero, &copy);
 
     // Fill remaining expected arguments with undefined values.
     Label fill;
     __ bind(&fill);
-    __ inc(ecx);
+    __ inc(eax);
     __ push(Immediate(masm->isolate()->factory()->undefined_value()));
-    __ cmp(ecx, Operand(ebx));
+    __ cmp(eax, Operand(ebx));
     __ j(less, &fill);
-
-    // Restore function pointer.
-    __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   }
 
   // Call the entry point.
   __ bind(&invoke);
+  // Restore function pointer.
+  __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   __ call(Operand(edx));
 
   // Leave frame and return.
@@ -1558,19 +1587,19 @@
 
   // If the result was -1 it means that we couldn't optimize the
   // function. Just return and continue in the unoptimized version.
-  NearLabel skip;
+  Label skip;
   __ cmp(Operand(eax), Immediate(Smi::FromInt(-1)));
-  __ j(not_equal, &skip);
+  __ j(not_equal, &skip, Label::kNear);
   __ ret(0);
 
   // If we decide not to perform on-stack replacement we perform a
   // stack guard check to enable interrupts.
   __ bind(&stack_check);
-  NearLabel ok;
+  Label ok;
   ExternalReference stack_limit =
       ExternalReference::address_of_stack_limit(masm->isolate());
   __ cmp(esp, Operand::StaticVariable(stack_limit));
-  __ j(above_equal, &ok, taken);
+  __ j(above_equal, &ok, Label::kNear);
   StackCheckStub stub;
   __ TailCallStub(&stub);
   __ Abort("Unreachable code: returned from tail call.");
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 5d32095..8bf2dd4 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -29,10 +29,10 @@
 
 #if defined(V8_TARGET_ARCH_IA32)
 
-#include "code-stubs.h"
 #include "bootstrapper.h"
-#include "jsregexp.h"
+#include "code-stubs.h"
 #include "isolate.h"
+#include "jsregexp.h"
 #include "regexp-macro-assembler.h"
 
 namespace v8 {
@@ -42,16 +42,16 @@
 
 void ToNumberStub::Generate(MacroAssembler* masm) {
   // The ToNumber stub takes one argument in eax.
-  NearLabel check_heap_number, call_builtin;
+  Label check_heap_number, call_builtin;
   __ test(eax, Immediate(kSmiTagMask));
-  __ j(not_zero, &check_heap_number);
+  __ j(not_zero, &check_heap_number, Label::kNear);
   __ ret(0);
 
   __ bind(&check_heap_number);
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
   Factory* factory = masm->isolate()->factory();
   __ cmp(Operand(ebx), Immediate(factory->heap_number_map()));
-  __ j(not_equal, &call_builtin);
+  __ j(not_equal, &call_builtin, Label::kNear);
   __ ret(0);
 
   __ bind(&call_builtin);
@@ -242,13 +242,29 @@
 
 // NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
 void ToBooleanStub::Generate(MacroAssembler* masm) {
-  NearLabel false_result, true_result, not_string;
+  Label false_result, true_result, not_string;
   __ mov(eax, Operand(esp, 1 * kPointerSize));
+  Factory* factory = masm->isolate()->factory();
+
+  // undefined -> false
+  __ cmp(eax, factory->undefined_value());
+  __ j(equal, &false_result);
+
+  // Boolean -> its value
+  __ cmp(eax, factory->true_value());
+  __ j(equal, &true_result);
+  __ cmp(eax, factory->false_value());
+  __ j(equal, &false_result);
+
+  // Smis: 0 -> false, all other -> true
+  __ test(eax, Operand(eax));
+  __ j(zero, &false_result);
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, &true_result);
 
   // 'null' => false.
-  Factory* factory = masm->isolate()->factory();
   __ cmp(eax, factory->null_value());
-  __ j(equal, &false_result);
+  __ j(equal, &false_result, Label::kNear);
 
   // Get the map and type of the heap object.
   __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
@@ -257,28 +273,28 @@
   // Undetectable => false.
   __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
             1 << Map::kIsUndetectable);
-  __ j(not_zero, &false_result);
+  __ j(not_zero, &false_result, Label::kNear);
 
   // JavaScript object => true.
   __ CmpInstanceType(edx, FIRST_JS_OBJECT_TYPE);
-  __ j(above_equal, &true_result);
+  __ j(above_equal, &true_result, Label::kNear);
 
   // String value => false iff empty.
   __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE);
-  __ j(above_equal, &not_string);
+  __ j(above_equal, &not_string, Label::kNear);
   STATIC_ASSERT(kSmiTag == 0);
   __ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0));
-  __ j(zero, &false_result);
-  __ jmp(&true_result);
+  __ j(zero, &false_result, Label::kNear);
+  __ jmp(&true_result, Label::kNear);
 
   __ bind(&not_string);
   // HeapNumber => false iff +0, -0, or NaN.
   __ cmp(edx, factory->heap_number_map());
-  __ j(not_equal, &true_result);
+  __ j(not_equal, &true_result, Label::kNear);
   __ fldz();
   __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
   __ FCmp();
-  __ j(zero, &false_result);
+  __ j(zero, &false_result, Label::kNear);
   // Fall through to |true_result|.
 
   // Return 1/0 for true/false in eax.
@@ -331,14 +347,6 @@
 
   // Takes the operands in edx and eax and loads them as integers in eax
   // and ecx.
-  static void LoadAsIntegers(MacroAssembler* masm,
-                             TypeInfo type_info,
-                             bool use_sse3,
-                             Label* operand_conversion_failure);
-  static void LoadNumbersAsIntegers(MacroAssembler* masm,
-                                    TypeInfo type_info,
-                                    bool use_sse3,
-                                    Label* operand_conversion_failure);
   static void LoadUnknownsAsIntegers(MacroAssembler* masm,
                                      bool use_sse3,
                                      Label* operand_conversion_failure);
@@ -374,15 +382,486 @@
 };
 
 
-Handle<Code> GetTypeRecordingBinaryOpStub(int key,
-    TRBinaryOpIC::TypeInfo type_info,
-    TRBinaryOpIC::TypeInfo result_type_info) {
-  TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
+// Get the integer part of a heap number.  Surprisingly, all this bit twiddling
+// is faster than using the built-in instructions on floating point registers.
+// Trashes edi and ebx.  Dest is ecx.  Source cannot be ecx or one of the
+// trashed registers.
+static void IntegerConvert(MacroAssembler* masm,
+                           Register source,
+                           bool use_sse3,
+                           Label* conversion_failure) {
+  ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
+  Label done, right_exponent, normal_exponent;
+  Register scratch = ebx;
+  Register scratch2 = edi;
+  // Get exponent word.
+  __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
+  // Get exponent alone in scratch2.
+  __ mov(scratch2, scratch);
+  __ and_(scratch2, HeapNumber::kExponentMask);
+  if (use_sse3) {
+    CpuFeatures::Scope scope(SSE3);
+    // Check whether the exponent is too big for a 64 bit signed integer.
+    static const uint32_t kTooBigExponent =
+        (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+    __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
+    __ j(greater_equal, conversion_failure);
+    // Load x87 register with heap number.
+    __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
+    // Reserve space for 64 bit answer.
+    __ sub(Operand(esp), Immediate(sizeof(uint64_t)));  // Nolint.
+    // Do conversion, which cannot fail because we checked the exponent.
+    __ fisttp_d(Operand(esp, 0));
+    __ mov(ecx, Operand(esp, 0));  // Load low word of answer into ecx.
+    __ add(Operand(esp), Immediate(sizeof(uint64_t)));  // Nolint.
+  } else {
+    // Load ecx with zero.  We use this either for the final shift or
+    // for the answer.
+    __ xor_(ecx, Operand(ecx));
+    // Check whether the exponent matches a 32 bit signed int that cannot be
+    // represented by a Smi.  A non-smi 32 bit integer is 1.xxx * 2^30 so the
+    // exponent is 30 (biased).  This is the exponent that we are fastest at and
+    // also the highest exponent we can handle here.
+    const uint32_t non_smi_exponent =
+        (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+    __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
+    // If we have a match of the int32-but-not-Smi exponent then skip some
+    // logic.
+    __ j(equal, &right_exponent);
+    // If the exponent is higher than that then go to slow case.  This catches
+    // numbers that don't fit in a signed int32, infinities and NaNs.
+    __ j(less, &normal_exponent);
+
+    {
+      // Handle a big exponent.  The only reason we have this code is that the
+      // >>> operator has a tendency to generate numbers with an exponent of 31.
+      const uint32_t big_non_smi_exponent =
+          (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
+      __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
+      __ j(not_equal, conversion_failure);
+      // We have the big exponent, typically from >>>.  This means the number is
+      // in the range 2^31 to 2^32 - 1.  Get the top bits of the mantissa.
+      __ mov(scratch2, scratch);
+      __ and_(scratch2, HeapNumber::kMantissaMask);
+      // Put back the implicit 1.
+      __ or_(scratch2, 1 << HeapNumber::kExponentShift);
+      // Shift up the mantissa bits to take up the space the exponent used to
+      // take. We just orred in the implicit bit so that took care of one and
+      // we want to use the full unsigned range so we subtract 1 bit from the
+      // shift distance.
+      const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
+      __ shl(scratch2, big_shift_distance);
+      // Get the second half of the double.
+      __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
+      // Shift down 21 bits to get the most significant 11 bits or the low
+      // mantissa word.
+      __ shr(ecx, 32 - big_shift_distance);
+      __ or_(ecx, Operand(scratch2));
+      // We have the answer in ecx, but we may need to negate it.
+      __ test(scratch, Operand(scratch));
+      __ j(positive, &done);
+      __ neg(ecx);
+      __ jmp(&done);
+    }
+
+    __ bind(&normal_exponent);
+    // Exponent word in scratch, exponent part of exponent word in scratch2.
+    // Zero in ecx.
+    // We know the exponent is smaller than 30 (biased).  If it is less than
+    // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+    // it rounds to zero.
+    const uint32_t zero_exponent =
+        (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
+    __ sub(Operand(scratch2), Immediate(zero_exponent));
+    // ecx already has a Smi zero.
+    __ j(less, &done);
+
+    // We have a shifted exponent between 0 and 30 in scratch2.
+    __ shr(scratch2, HeapNumber::kExponentShift);
+    __ mov(ecx, Immediate(30));
+    __ sub(ecx, Operand(scratch2));
+
+    __ bind(&right_exponent);
+    // Here ecx is the shift, scratch is the exponent word.
+    // Get the top bits of the mantissa.
+    __ and_(scratch, HeapNumber::kMantissaMask);
+    // Put back the implicit 1.
+    __ or_(scratch, 1 << HeapNumber::kExponentShift);
+    // Shift up the mantissa bits to take up the space the exponent used to
+    // take. We have kExponentShift + 1 significant bits int he low end of the
+    // word.  Shift them to the top bits.
+    const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+    __ shl(scratch, shift_distance);
+    // Get the second half of the double. For some exponents we don't
+    // actually need this because the bits get shifted out again, but
+    // it's probably slower to test than just to do it.
+    __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
+    // Shift down 22 bits to get the most significant 10 bits or the low
+    // mantissa word.
+    __ shr(scratch2, 32 - shift_distance);
+    __ or_(scratch2, Operand(scratch));
+    // Move down according to the exponent.
+    __ shr_cl(scratch2);
+    // Now the unsigned answer is in scratch2.  We need to move it to ecx and
+    // we may need to fix the sign.
+    Label negative;
+    __ xor_(ecx, Operand(ecx));
+    __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
+    __ j(greater, &negative, Label::kNear);
+    __ mov(ecx, scratch2);
+    __ jmp(&done, Label::kNear);
+    __ bind(&negative);
+    __ sub(ecx, Operand(scratch2));
+    __ bind(&done);
+  }
+}
+
+
+Handle<Code> GetUnaryOpStub(int key, UnaryOpIC::TypeInfo type_info) {
+  UnaryOpStub stub(key, type_info);
   return stub.GetCode();
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+const char* UnaryOpStub::GetName() {
+  if (name_ != NULL) return name_;
+  const int kMaxNameLength = 100;
+  name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+      kMaxNameLength);
+  if (name_ == NULL) return "OOM";
+  const char* op_name = Token::Name(op_);
+  const char* overwrite_name = NULL;  // Make g++ happy.
+  switch (mode_) {
+    case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
+    case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
+  }
+
+  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+               "UnaryOpStub_%s_%s_%s",
+               op_name,
+               overwrite_name,
+               UnaryOpIC::GetName(operand_type_));
+  return name_;
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::Generate(MacroAssembler* masm) {
+  switch (operand_type_) {
+    case UnaryOpIC::UNINITIALIZED:
+      GenerateTypeTransition(masm);
+      break;
+    case UnaryOpIC::SMI:
+      GenerateSmiStub(masm);
+      break;
+    case UnaryOpIC::HEAP_NUMBER:
+      GenerateHeapNumberStub(masm);
+      break;
+    case UnaryOpIC::GENERIC:
+      GenerateGenericStub(masm);
+      break;
+  }
+}
+
+
+void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+  __ pop(ecx);  // Save return address.
+  __ push(eax);
+  // the argument is now on top.
+  // Push this stub's key. Although the operation and the type info are
+  // encoded into the key, the encoding is opaque, so push them too.
+  __ push(Immediate(Smi::FromInt(MinorKey())));
+  __ push(Immediate(Smi::FromInt(op_)));
+  __ push(Immediate(Smi::FromInt(operand_type_)));
+
+  __ push(ecx);  // Push return address.
+
+  // Patch the caller to an appropriate specialized stub and return the
+  // operation result to the caller of the stub.
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kUnaryOp_Patch),
+                        masm->isolate()), 4, 1);
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+  switch (op_) {
+    case Token::SUB:
+      GenerateSmiStubSub(masm);
+      break;
+    case Token::BIT_NOT:
+      GenerateSmiStubBitNot(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
+  Label non_smi, undo, slow;
+  GenerateSmiCodeSub(masm, &non_smi, &undo, &slow,
+                     Label::kNear, Label::kNear, Label::kNear);
+  __ bind(&undo);
+  GenerateSmiCodeUndo(masm);
+  __ bind(&non_smi);
+  __ bind(&slow);
+  GenerateTypeTransition(masm);
+}
+
+
+void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
+  Label non_smi;
+  GenerateSmiCodeBitNot(masm, &non_smi);
+  __ bind(&non_smi);
+  GenerateTypeTransition(masm);
+}
+
+
+void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
+                                     Label* non_smi,
+                                     Label* undo,
+                                     Label* slow,
+                                     Label::Distance non_smi_near,
+                                     Label::Distance undo_near,
+                                     Label::Distance slow_near) {
+  // Check whether the value is a smi.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(not_zero, non_smi, non_smi_near);
+
+  // We can't handle -0 with smis, so use a type transition for that case.
+  __ test(eax, Operand(eax));
+  __ j(zero, slow, slow_near);
+
+  // Try optimistic subtraction '0 - value', saving operand in eax for undo.
+  __ mov(edx, Operand(eax));
+  __ Set(eax, Immediate(0));
+  __ sub(eax, Operand(edx));
+  __ j(overflow, undo, undo_near);
+  __ ret(0);
+}
+
+
+void UnaryOpStub::GenerateSmiCodeBitNot(
+    MacroAssembler* masm,
+    Label* non_smi,
+    Label::Distance non_smi_near) {
+  // Check whether the value is a smi.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(not_zero, non_smi, non_smi_near);
+
+  // Flip bits and revert inverted smi-tag.
+  __ not_(eax);
+  __ and_(eax, ~kSmiTagMask);
+  __ ret(0);
+}
+
+
+void UnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) {
+  __ mov(eax, Operand(edx));
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+  switch (op_) {
+    case Token::SUB:
+      GenerateHeapNumberStubSub(masm);
+      break;
+    case Token::BIT_NOT:
+      GenerateHeapNumberStubBitNot(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
+  Label non_smi, undo, slow, call_builtin;
+  GenerateSmiCodeSub(masm, &non_smi, &undo, &call_builtin, Label::kNear);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeSub(masm, &slow);
+  __ bind(&undo);
+  GenerateSmiCodeUndo(masm);
+  __ bind(&slow);
+  GenerateTypeTransition(masm);
+  __ bind(&call_builtin);
+  GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateHeapNumberStubBitNot(
+    MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeBitNot(masm, &slow);
+  __ bind(&slow);
+  GenerateTypeTransition(masm);
+}
+
+
+void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
+                                            Label* slow) {
+  __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ cmp(edx, masm->isolate()->factory()->heap_number_map());
+  __ j(not_equal, slow);
+
+  if (mode_ == UNARY_OVERWRITE) {
+    __ xor_(FieldOperand(eax, HeapNumber::kExponentOffset),
+            Immediate(HeapNumber::kSignMask));  // Flip sign.
+  } else {
+    __ mov(edx, Operand(eax));
+    // edx: operand
+
+    Label slow_allocate_heapnumber, heapnumber_allocated;
+    __ AllocateHeapNumber(eax, ebx, ecx, &slow_allocate_heapnumber);
+    __ jmp(&heapnumber_allocated);
+
+    __ bind(&slow_allocate_heapnumber);
+    __ EnterInternalFrame();
+    __ push(edx);
+    __ CallRuntime(Runtime::kNumberAlloc, 0);
+    __ pop(edx);
+    __ LeaveInternalFrame();
+
+    __ bind(&heapnumber_allocated);
+    // eax: allocated 'empty' number
+    __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
+    __ xor_(ecx, HeapNumber::kSignMask);  // Flip sign.
+    __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
+    __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
+    __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
+  }
+  __ ret(0);
+}
+
+
+void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
+                                               Label* slow) {
+  __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ cmp(edx, masm->isolate()->factory()->heap_number_map());
+  __ j(not_equal, slow);
+
+  // Convert the heap number in eax to an untagged integer in ecx.
+  IntegerConvert(masm, eax, CpuFeatures::IsSupported(SSE3), slow);
+
+  // Do the bitwise operation and check if the result fits in a smi.
+  Label try_float;
+  __ not_(ecx);
+  __ cmp(ecx, 0xc0000000);
+  __ j(sign, &try_float, Label::kNear);
+
+  // Tag the result as a smi and we're done.
+  STATIC_ASSERT(kSmiTagSize == 1);
+  __ lea(eax, Operand(ecx, times_2, kSmiTag));
+  __ ret(0);
+
+  // Try to store the result in a heap number.
+  __ bind(&try_float);
+  if (mode_ == UNARY_NO_OVERWRITE) {
+    Label slow_allocate_heapnumber, heapnumber_allocated;
+    __ mov(ebx, eax);
+    __ AllocateHeapNumber(eax, edx, edi, &slow_allocate_heapnumber);
+    __ jmp(&heapnumber_allocated);
+
+    __ bind(&slow_allocate_heapnumber);
+    __ EnterInternalFrame();
+    // Push the original HeapNumber on the stack. The integer value can't
+    // be stored since it's untagged and not in the smi range (so we can't
+    // smi-tag it). We'll recalculate the value after the GC instead.
+    __ push(ebx);
+    __ CallRuntime(Runtime::kNumberAlloc, 0);
+    // New HeapNumber is in eax.
+    __ pop(edx);
+    __ LeaveInternalFrame();
+    // IntegerConvert uses ebx and edi as scratch registers.
+    // This conversion won't go slow-case.
+    IntegerConvert(masm, edx, CpuFeatures::IsSupported(SSE3), slow);
+    __ not_(ecx);
+
+    __ bind(&heapnumber_allocated);
+  }
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope use_sse2(SSE2);
+    __ cvtsi2sd(xmm0, Operand(ecx));
+    __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+  } else {
+    __ push(ecx);
+    __ fild_s(Operand(esp, 0));
+    __ pop(ecx);
+    __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+  }
+  __ ret(0);
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
+  switch (op_) {
+    case Token::SUB:
+      GenerateGenericStubSub(masm);
+      break;
+    case Token::BIT_NOT:
+      GenerateGenericStubBitNot(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm)  {
+  Label non_smi, undo, slow;
+  GenerateSmiCodeSub(masm, &non_smi, &undo, &slow, Label::kNear);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeSub(masm, &slow);
+  __ bind(&undo);
+  GenerateSmiCodeUndo(masm);
+  __ bind(&slow);
+  GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeBitNot(masm, &slow);
+  __ bind(&slow);
+  GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
+  // Handle the slow case by jumping to the corresponding JavaScript builtin.
+  __ pop(ecx);  // pop return address.
+  __ push(eax);
+  __ push(ecx);  // push return address
+  switch (op_) {
+    case Token::SUB:
+      __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+      break;
+    case Token::BIT_NOT:
+      __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+Handle<Code> GetBinaryOpStub(int key,
+    BinaryOpIC::TypeInfo type_info,
+    BinaryOpIC::TypeInfo result_type_info) {
+  BinaryOpStub stub(key, type_info, result_type_info);
+  return stub.GetCode();
+}
+
+
+void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
   __ pop(ecx);  // Save return address.
   __ push(edx);
   __ push(eax);
@@ -398,7 +877,7 @@
   // Patch the caller to an appropriate specialized stub and return the
   // operation result to the caller of the stub.
   __ TailCallExternalReference(
-      ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch),
+      ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
                         masm->isolate()),
       5,
       1);
@@ -407,8 +886,7 @@
 
 // Prepare for a type transition runtime call when the args are already on
 // the stack, under the return address.
-void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
-    MacroAssembler* masm) {
+void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
   __ pop(ecx);  // Save return address.
   // Left and right arguments are already on top of the stack.
   // Push this stub's key. Although the operation and the type info are
@@ -422,34 +900,37 @@
   // Patch the caller to an appropriate specialized stub and return the
   // operation result to the caller of the stub.
   __ TailCallExternalReference(
-      ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch),
+      ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
                         masm->isolate()),
       5,
       1);
 }
 
 
-void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
+void BinaryOpStub::Generate(MacroAssembler* masm) {
   switch (operands_type_) {
-    case TRBinaryOpIC::UNINITIALIZED:
+    case BinaryOpIC::UNINITIALIZED:
       GenerateTypeTransition(masm);
       break;
-    case TRBinaryOpIC::SMI:
+    case BinaryOpIC::SMI:
       GenerateSmiStub(masm);
       break;
-    case TRBinaryOpIC::INT32:
+    case BinaryOpIC::INT32:
       GenerateInt32Stub(masm);
       break;
-    case TRBinaryOpIC::HEAP_NUMBER:
+    case BinaryOpIC::HEAP_NUMBER:
       GenerateHeapNumberStub(masm);
       break;
-    case TRBinaryOpIC::ODDBALL:
+    case BinaryOpIC::ODDBALL:
       GenerateOddballStub(masm);
       break;
-    case TRBinaryOpIC::STRING:
+    case BinaryOpIC::BOTH_STRING:
+      GenerateBothStringStub(masm);
+      break;
+    case BinaryOpIC::STRING:
       GenerateStringStub(masm);
       break;
-    case TRBinaryOpIC::GENERIC:
+    case BinaryOpIC::GENERIC:
       GenerateGeneric(masm);
       break;
     default:
@@ -458,7 +939,7 @@
 }
 
 
-const char* TypeRecordingBinaryOpStub::GetName() {
+const char* BinaryOpStub::GetName() {
   if (name_ != NULL) return name_;
   const int kMaxNameLength = 100;
   name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
@@ -474,15 +955,16 @@
   }
 
   OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
-               "TypeRecordingBinaryOpStub_%s_%s_%s",
+               "BinaryOpStub_%s_%s_%s",
                op_name,
                overwrite_name,
-               TRBinaryOpIC::GetName(operands_type_));
+               BinaryOpIC::GetName(operands_type_));
   return name_;
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
+void BinaryOpStub::GenerateSmiCode(
+    MacroAssembler* masm,
     Label* slow,
     SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
   // 1. Move arguments into edx, eax except for DIV and MOD, which need the
@@ -542,7 +1024,7 @@
   // 3. Perform the smi check of the operands.
   STATIC_ASSERT(kSmiTag == 0);  // Adjust zero check if not the case.
   __ test(combined, Immediate(kSmiTagMask));
-  __ j(not_zero, &not_smis, not_taken);
+  __ j(not_zero, &not_smis);
 
   // 4. Operands are both smis, perform the operation leaving the result in
   // eax and check the result if necessary.
@@ -571,7 +1053,7 @@
       __ shl_cl(left);
       // Check that the *signed* result fits in a smi.
       __ cmp(left, 0xc0000000);
-      __ j(sign, &use_fp_on_smis, not_taken);
+      __ j(sign, &use_fp_on_smis);
       // Tag the result and store it in register eax.
       __ SmiTag(left);
       __ mov(eax, left);
@@ -601,7 +1083,7 @@
       // Smi tagging these two cases can only happen with shifts
       // by 0 or 1 when handed a valid smi.
       __ test(left, Immediate(0xc0000000));
-      __ j(not_zero, slow, not_taken);
+      __ j(not_zero, &use_fp_on_smis);
       // Tag the result and store it in register eax.
       __ SmiTag(left);
       __ mov(eax, left);
@@ -610,12 +1092,12 @@
     case Token::ADD:
       ASSERT(right.is(eax));
       __ add(right, Operand(left));  // Addition is commutative.
-      __ j(overflow, &use_fp_on_smis, not_taken);
+      __ j(overflow, &use_fp_on_smis);
       break;
 
     case Token::SUB:
       __ sub(left, Operand(right));
-      __ j(overflow, &use_fp_on_smis, not_taken);
+      __ j(overflow, &use_fp_on_smis);
       __ mov(eax, left);
       break;
 
@@ -629,7 +1111,7 @@
       __ SmiUntag(right);
       // Do multiplication.
       __ imul(right, Operand(left));  // Multiplication is commutative.
-      __ j(overflow, &use_fp_on_smis, not_taken);
+      __ j(overflow, &use_fp_on_smis);
       // Check for negative zero result.  Use combined = left | right.
       __ NegativeZeroTest(right, combined, &use_fp_on_smis);
       break;
@@ -640,7 +1122,7 @@
       __ mov(edi, left);
       // Check for 0 divisor.
       __ test(right, Operand(right));
-      __ j(zero, &use_fp_on_smis, not_taken);
+      __ j(zero, &use_fp_on_smis);
       // Sign extend left into edx:eax.
       ASSERT(left.is(eax));
       __ cdq();
@@ -664,7 +1146,7 @@
     case Token::MOD:
       // Check for 0 divisor.
       __ test(right, Operand(right));
-      __ j(zero, &not_smis, not_taken);
+      __ j(zero, &not_smis);
 
       // Sign extend left into edx:eax.
       ASSERT(left.is(eax));
@@ -737,26 +1219,35 @@
   } else {
     ASSERT(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS);
     switch (op_) {
-      case Token::SHL: {
+      case Token::SHL:
+      case Token::SHR: {
         Comment perform_float(masm, "-- Perform float operation on smis");
         __ bind(&use_fp_on_smis);
         // Result we want is in left == edx, so we can put the allocated heap
         // number in eax.
         __ AllocateHeapNumber(eax, ecx, ebx, slow);
         // Store the result in the HeapNumber and return.
-        if (CpuFeatures::IsSupported(SSE2)) {
-          CpuFeatures::Scope use_sse2(SSE2);
-          __ cvtsi2sd(xmm0, Operand(left));
-          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
-        } else {
-          // It's OK to overwrite the right argument on the stack because we
-          // are about to return.
+        // It's OK to overwrite the arguments on the stack because we
+        // are about to return.
+        if (op_ == Token::SHR) {
           __ mov(Operand(esp, 1 * kPointerSize), left);
-          __ fild_s(Operand(esp, 1 * kPointerSize));
+          __ mov(Operand(esp, 2 * kPointerSize), Immediate(0));
+          __ fild_d(Operand(esp, 1 * kPointerSize));
           __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        } else {
+          ASSERT_EQ(Token::SHL, op_);
+          if (CpuFeatures::IsSupported(SSE2)) {
+            CpuFeatures::Scope use_sse2(SSE2);
+            __ cvtsi2sd(xmm0, Operand(left));
+            __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+          } else {
+            __ mov(Operand(esp, 1 * kPointerSize), left);
+            __ fild_s(Operand(esp, 1 * kPointerSize));
+            __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+          }
         }
-      __ ret(2 * kPointerSize);
-      break;
+        __ ret(2 * kPointerSize);
+        break;
       }
 
       case Token::ADD:
@@ -848,7 +1339,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
   Label call_runtime;
 
   switch (op_) {
@@ -870,8 +1361,8 @@
       UNREACHABLE();
   }
 
-  if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
-      result_type_ == TRBinaryOpIC::SMI) {
+  if (result_type_ == BinaryOpIC::UNINITIALIZED ||
+      result_type_ == BinaryOpIC::SMI) {
     GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
   } else {
     GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
@@ -899,19 +1390,51 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
-  ASSERT(operands_type_ == TRBinaryOpIC::STRING);
+void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+  ASSERT(operands_type_ == BinaryOpIC::STRING);
   ASSERT(op_ == Token::ADD);
   // Try to add arguments as strings, otherwise, transition to the generic
-  // TRBinaryOpIC type.
+  // BinaryOpIC type.
   GenerateAddStrings(masm);
   GenerateTypeTransition(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
   Label call_runtime;
-  ASSERT(operands_type_ == TRBinaryOpIC::INT32);
+  ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+  ASSERT(op_ == Token::ADD);
+  // If both arguments are strings, call the string add stub.
+  // Otherwise, do a transition.
+
+  // Registers containing left and right operands respectively.
+  Register left = edx;
+  Register right = eax;
+
+  // Test if left operand is a string.
+  __ test(left, Immediate(kSmiTagMask));
+  __ j(zero, &call_runtime);
+  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
+  __ j(above_equal, &call_runtime);
+
+  // Test if right operand is a string.
+  __ test(right, Immediate(kSmiTagMask));
+  __ j(zero, &call_runtime);
+  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
+  __ j(above_equal, &call_runtime);
+
+  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+  GenerateRegisterArgsPush(masm);
+  __ TailCallStub(&string_add_stub);
+
+  __ bind(&call_runtime);
+  GenerateTypeTransition(masm);
+}
+
+
+void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+  Label call_runtime;
+  ASSERT(operands_type_ == BinaryOpIC::INT32);
 
   // Floating point case.
   switch (op_) {
@@ -933,7 +1456,7 @@
           default: UNREACHABLE();
         }
         // Check result type if it is currently Int32.
-        if (result_type_ <= TRBinaryOpIC::INT32) {
+        if (result_type_ <= BinaryOpIC::INT32) {
           __ cvttsd2si(ecx, Operand(xmm0));
           __ cvtsi2sd(xmm2, Operand(ecx));
           __ ucomisd(xmm0, xmm2);
@@ -1024,7 +1547,7 @@
         __ bind(&non_smi_result);
         // Allocate a heap number if needed.
         __ mov(ebx, Operand(eax));  // ebx: result
-        NearLabel skip_allocation;
+        Label skip_allocation;
         switch (mode_) {
           case OVERWRITE_LEFT:
           case OVERWRITE_RIGHT:
@@ -1033,7 +1556,7 @@
             __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
                                 1 * kPointerSize : 2 * kPointerSize));
             __ test(eax, Immediate(kSmiTagMask));
-            __ j(not_zero, &skip_allocation, not_taken);
+            __ j(not_zero, &skip_allocation, Label::kNear);
             // Fall through!
           case NO_OVERWRITE:
             __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
@@ -1111,32 +1634,32 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
-  Label call_runtime;
-
+void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
   if (op_ == Token::ADD) {
     // Handle string addition here, because it is the only operation
     // that does not do a ToNumber conversion on the operands.
     GenerateAddStrings(masm);
   }
 
+  Factory* factory = masm->isolate()->factory();
+
   // Convert odd ball arguments to numbers.
-  NearLabel check, done;
-  __ cmp(edx, FACTORY->undefined_value());
-  __ j(not_equal, &check);
+  Label check, done;
+  __ cmp(edx, factory->undefined_value());
+  __ j(not_equal, &check, Label::kNear);
   if (Token::IsBitOp(op_)) {
     __ xor_(edx, Operand(edx));
   } else {
-    __ mov(edx, Immediate(FACTORY->nan_value()));
+    __ mov(edx, Immediate(factory->nan_value()));
   }
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(&check);
-  __ cmp(eax, FACTORY->undefined_value());
-  __ j(not_equal, &done);
+  __ cmp(eax, factory->undefined_value());
+  __ j(not_equal, &done, Label::kNear);
   if (Token::IsBitOp(op_)) {
     __ xor_(eax, Operand(eax));
   } else {
-    __ mov(eax, Immediate(FACTORY->nan_value()));
+    __ mov(eax, Immediate(factory->nan_value()));
   }
   __ bind(&done);
 
@@ -1144,7 +1667,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
   Label call_runtime;
 
   // Floating point case.
@@ -1239,7 +1762,7 @@
         __ bind(&non_smi_result);
         // Allocate a heap number if needed.
         __ mov(ebx, Operand(eax));  // ebx: result
-        NearLabel skip_allocation;
+        Label skip_allocation;
         switch (mode_) {
           case OVERWRITE_LEFT:
           case OVERWRITE_RIGHT:
@@ -1248,7 +1771,7 @@
             __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
                                 1 * kPointerSize : 2 * kPointerSize));
             __ test(eax, Immediate(kSmiTagMask));
-            __ j(not_zero, &skip_allocation, not_taken);
+            __ j(not_zero, &skip_allocation, Label::kNear);
             // Fall through!
           case NO_OVERWRITE:
             __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
@@ -1325,7 +1848,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
   Label call_runtime;
 
   Counters* counters = masm->isolate()->counters();
@@ -1439,7 +1962,7 @@
         __ bind(&non_smi_result);
         // Allocate a heap number if needed.
         __ mov(ebx, Operand(eax));  // ebx: result
-        NearLabel skip_allocation;
+        Label skip_allocation;
         switch (mode_) {
           case OVERWRITE_LEFT:
           case OVERWRITE_RIGHT:
@@ -1448,7 +1971,7 @@
             __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
                                 1 * kPointerSize : 2 * kPointerSize));
             __ test(eax, Immediate(kSmiTagMask));
-            __ j(not_zero, &skip_allocation, not_taken);
+            __ j(not_zero, &skip_allocation, Label::kNear);
             // Fall through!
           case NO_OVERWRITE:
             __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
@@ -1522,9 +2045,9 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
+void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
   ASSERT(op_ == Token::ADD);
-  NearLabel left_not_string, call_runtime;
+  Label left_not_string, call_runtime;
 
   // Registers containing left and right operands respectively.
   Register left = edx;
@@ -1532,9 +2055,9 @@
 
   // Test if left operand is a string.
   __ test(left, Immediate(kSmiTagMask));
-  __ j(zero, &left_not_string);
+  __ j(zero, &left_not_string, Label::kNear);
   __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
-  __ j(above_equal, &left_not_string);
+  __ j(above_equal, &left_not_string, Label::kNear);
 
   StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
   GenerateRegisterArgsPush(masm);
@@ -1543,9 +2066,9 @@
   // Left operand is not a string, test right.
   __ bind(&left_not_string);
   __ test(right, Immediate(kSmiTagMask));
-  __ j(zero, &call_runtime);
+  __ j(zero, &call_runtime, Label::kNear);
   __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
-  __ j(above_equal, &call_runtime);
+  __ j(above_equal, &call_runtime, Label::kNear);
 
   StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
   GenerateRegisterArgsPush(masm);
@@ -1556,7 +2079,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
+void BinaryOpStub::GenerateHeapResultAllocation(
     MacroAssembler* masm,
     Label* alloc_failure) {
   Label skip_allocation;
@@ -1566,7 +2089,7 @@
       // If the argument in edx is already an object, we skip the
       // allocation of a heap number.
       __ test(edx, Immediate(kSmiTagMask));
-      __ j(not_zero, &skip_allocation, not_taken);
+      __ j(not_zero, &skip_allocation);
       // Allocate a heap number for the result. Keep eax and edx intact
       // for the possible runtime call.
       __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
@@ -1582,7 +2105,7 @@
       // If the argument in eax is already an object, we skip the
       // allocation of a heap number.
       __ test(eax, Immediate(kSmiTagMask));
-      __ j(not_zero, &skip_allocation, not_taken);
+      __ j(not_zero, &skip_allocation);
       // Fall through!
     case NO_OVERWRITE:
       // Allocate a heap number for the result. Keep eax and edx intact
@@ -1598,7 +2121,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
   __ pop(ecx);
   __ push(edx);
   __ push(eax);
@@ -1626,11 +2149,11 @@
   const bool tagged = (argument_type_ == TAGGED);
   if (tagged) {
     // Test that eax is a number.
-    NearLabel input_not_smi;
-    NearLabel loaded;
+    Label input_not_smi;
+    Label loaded;
     __ mov(eax, Operand(esp, kPointerSize));
     __ test(eax, Immediate(kSmiTagMask));
-    __ j(not_zero, &input_not_smi);
+    __ j(not_zero, &input_not_smi, Label::kNear);
     // Input is a smi. Untag and load it onto the FPU stack.
     // Then load the low and high words of the double into ebx, edx.
     STATIC_ASSERT(kSmiTagSize == 1);
@@ -1641,7 +2164,7 @@
     __ fst_d(Operand(esp, 0));
     __ pop(edx);
     __ pop(ebx);
-    __ jmp(&loaded);
+    __ jmp(&loaded, Label::kNear);
     __ bind(&input_not_smi);
     // Check if input is a HeapNumber.
     __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
@@ -1715,11 +2238,11 @@
   __ lea(ecx, Operand(ecx, ecx, times_2, 0));
   __ lea(ecx, Operand(eax, ecx, times_4, 0));
   // Check if cache matches: Double value is stored in uint32_t[2] array.
-  NearLabel cache_miss;
+  Label cache_miss;
   __ cmp(ebx, Operand(ecx, 0));
-  __ j(not_equal, &cache_miss);
+  __ j(not_equal, &cache_miss, Label::kNear);
   __ cmp(edx, Operand(ecx, kIntSize));
-  __ j(not_equal, &cache_miss);
+  __ j(not_equal, &cache_miss, Label::kNear);
   // Cache hit!
   __ mov(eax, Operand(ecx, 2 * kIntSize));
   if (tagged) {
@@ -1817,7 +2340,7 @@
     // Both fsin and fcos require arguments in the range +/-2^63 and
     // return NaN for infinities and NaN. They can share all code except
     // the actual fsin/fcos operation.
-    NearLabel in_range, done;
+    Label in_range, done;
     // If argument is outside the range -2^63..2^63, fsin/cos doesn't
     // work. We must reduce it to the appropriate range.
     __ mov(edi, edx);
@@ -1825,11 +2348,11 @@
     int supported_exponent_limit =
         (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
     __ cmp(Operand(edi), Immediate(supported_exponent_limit));
-    __ j(below, &in_range, taken);
+    __ j(below, &in_range, Label::kNear);
     // Check for infinity and NaN. Both return NaN for sin.
     __ cmp(Operand(edi), Immediate(0x7ff00000));
-    NearLabel non_nan_result;
-    __ j(not_equal, &non_nan_result, taken);
+    Label non_nan_result;
+    __ j(not_equal, &non_nan_result, Label::kNear);
     // Input is +/-Infinity or NaN. Result is NaN.
     __ fstp(0);
     // NaN is represented by 0x7ff8000000000000.
@@ -1837,7 +2360,7 @@
     __ push(Immediate(0));
     __ fld_d(Operand(esp, 0));
     __ add(Operand(esp), Immediate(2 * kPointerSize));
-    __ jmp(&done);
+    __ jmp(&done, Label::kNear);
 
     __ bind(&non_nan_result);
 
@@ -1848,19 +2371,19 @@
     __ fld(1);
     // FPU Stack: input, 2*pi, input.
     {
-      NearLabel no_exceptions;
+      Label no_exceptions;
       __ fwait();
       __ fnstsw_ax();
       // Clear if Illegal Operand or Zero Division exceptions are set.
       __ test(Operand(eax), Immediate(5));
-      __ j(zero, &no_exceptions);
+      __ j(zero, &no_exceptions, Label::kNear);
       __ fnclex();
       __ bind(&no_exceptions);
     }
 
     // Compute st(0) % st(1)
     {
-      NearLabel partial_remainder_loop;
+      Label partial_remainder_loop;
       __ bind(&partial_remainder_loop);
       __ fprem1();
       __ fwait();
@@ -1897,203 +2420,6 @@
 }
 
 
-// Get the integer part of a heap number.  Surprisingly, all this bit twiddling
-// is faster than using the built-in instructions on floating point registers.
-// Trashes edi and ebx.  Dest is ecx.  Source cannot be ecx or one of the
-// trashed registers.
-void IntegerConvert(MacroAssembler* masm,
-                    Register source,
-                    TypeInfo type_info,
-                    bool use_sse3,
-                    Label* conversion_failure) {
-  ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
-  Label done, right_exponent, normal_exponent;
-  Register scratch = ebx;
-  Register scratch2 = edi;
-  if (type_info.IsInteger32() && CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope scope(SSE2);
-    __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset));
-    return;
-  }
-  if (!type_info.IsInteger32() || !use_sse3) {
-    // Get exponent word.
-    __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
-    // Get exponent alone in scratch2.
-    __ mov(scratch2, scratch);
-    __ and_(scratch2, HeapNumber::kExponentMask);
-  }
-  if (use_sse3) {
-    CpuFeatures::Scope scope(SSE3);
-    if (!type_info.IsInteger32()) {
-      // Check whether the exponent is too big for a 64 bit signed integer.
-      static const uint32_t kTooBigExponent =
-          (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
-      __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
-      __ j(greater_equal, conversion_failure);
-    }
-    // Load x87 register with heap number.
-    __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
-    // Reserve space for 64 bit answer.
-    __ sub(Operand(esp), Immediate(sizeof(uint64_t)));  // Nolint.
-    // Do conversion, which cannot fail because we checked the exponent.
-    __ fisttp_d(Operand(esp, 0));
-    __ mov(ecx, Operand(esp, 0));  // Load low word of answer into ecx.
-    __ add(Operand(esp), Immediate(sizeof(uint64_t)));  // Nolint.
-  } else {
-    // Load ecx with zero.  We use this either for the final shift or
-    // for the answer.
-    __ xor_(ecx, Operand(ecx));
-    // Check whether the exponent matches a 32 bit signed int that cannot be
-    // represented by a Smi.  A non-smi 32 bit integer is 1.xxx * 2^30 so the
-    // exponent is 30 (biased).  This is the exponent that we are fastest at and
-    // also the highest exponent we can handle here.
-    const uint32_t non_smi_exponent =
-        (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
-    __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
-    // If we have a match of the int32-but-not-Smi exponent then skip some
-    // logic.
-    __ j(equal, &right_exponent);
-    // If the exponent is higher than that then go to slow case.  This catches
-    // numbers that don't fit in a signed int32, infinities and NaNs.
-    __ j(less, &normal_exponent);
-
-    {
-      // Handle a big exponent.  The only reason we have this code is that the
-      // >>> operator has a tendency to generate numbers with an exponent of 31.
-      const uint32_t big_non_smi_exponent =
-          (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
-      __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
-      __ j(not_equal, conversion_failure);
-      // We have the big exponent, typically from >>>.  This means the number is
-      // in the range 2^31 to 2^32 - 1.  Get the top bits of the mantissa.
-      __ mov(scratch2, scratch);
-      __ and_(scratch2, HeapNumber::kMantissaMask);
-      // Put back the implicit 1.
-      __ or_(scratch2, 1 << HeapNumber::kExponentShift);
-      // Shift up the mantissa bits to take up the space the exponent used to
-      // take. We just orred in the implicit bit so that took care of one and
-      // we want to use the full unsigned range so we subtract 1 bit from the
-      // shift distance.
-      const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
-      __ shl(scratch2, big_shift_distance);
-      // Get the second half of the double.
-      __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
-      // Shift down 21 bits to get the most significant 11 bits or the low
-      // mantissa word.
-      __ shr(ecx, 32 - big_shift_distance);
-      __ or_(ecx, Operand(scratch2));
-      // We have the answer in ecx, but we may need to negate it.
-      __ test(scratch, Operand(scratch));
-      __ j(positive, &done);
-      __ neg(ecx);
-      __ jmp(&done);
-    }
-
-    __ bind(&normal_exponent);
-    // Exponent word in scratch, exponent part of exponent word in scratch2.
-    // Zero in ecx.
-    // We know the exponent is smaller than 30 (biased).  If it is less than
-    // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
-    // it rounds to zero.
-    const uint32_t zero_exponent =
-        (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
-    __ sub(Operand(scratch2), Immediate(zero_exponent));
-    // ecx already has a Smi zero.
-    __ j(less, &done);
-
-    // We have a shifted exponent between 0 and 30 in scratch2.
-    __ shr(scratch2, HeapNumber::kExponentShift);
-    __ mov(ecx, Immediate(30));
-    __ sub(ecx, Operand(scratch2));
-
-    __ bind(&right_exponent);
-    // Here ecx is the shift, scratch is the exponent word.
-    // Get the top bits of the mantissa.
-    __ and_(scratch, HeapNumber::kMantissaMask);
-    // Put back the implicit 1.
-    __ or_(scratch, 1 << HeapNumber::kExponentShift);
-    // Shift up the mantissa bits to take up the space the exponent used to
-    // take. We have kExponentShift + 1 significant bits int he low end of the
-    // word.  Shift them to the top bits.
-    const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
-    __ shl(scratch, shift_distance);
-    // Get the second half of the double. For some exponents we don't
-    // actually need this because the bits get shifted out again, but
-    // it's probably slower to test than just to do it.
-    __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
-    // Shift down 22 bits to get the most significant 10 bits or the low
-    // mantissa word.
-    __ shr(scratch2, 32 - shift_distance);
-    __ or_(scratch2, Operand(scratch));
-    // Move down according to the exponent.
-    __ shr_cl(scratch2);
-    // Now the unsigned answer is in scratch2.  We need to move it to ecx and
-    // we may need to fix the sign.
-    NearLabel negative;
-    __ xor_(ecx, Operand(ecx));
-    __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
-    __ j(greater, &negative);
-    __ mov(ecx, scratch2);
-    __ jmp(&done);
-    __ bind(&negative);
-    __ sub(ecx, Operand(scratch2));
-    __ bind(&done);
-  }
-}
-
-
-// Input: edx, eax are the left and right objects of a bit op.
-// Output: eax, ecx are left and right integers for a bit op.
-void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm,
-                                                TypeInfo type_info,
-                                                bool use_sse3,
-                                                Label* conversion_failure) {
-  // Check float operands.
-  Label arg1_is_object, check_undefined_arg1;
-  Label arg2_is_object, check_undefined_arg2;
-  Label load_arg2, done;
-
-  if (!type_info.IsDouble()) {
-    if (!type_info.IsSmi()) {
-      __ test(edx, Immediate(kSmiTagMask));
-      __ j(not_zero, &arg1_is_object);
-    } else {
-      if (FLAG_debug_code) __ AbortIfNotSmi(edx);
-    }
-    __ SmiUntag(edx);
-    __ jmp(&load_arg2);
-  }
-
-  __ bind(&arg1_is_object);
-
-  // Get the untagged integer version of the edx heap number in ecx.
-  IntegerConvert(masm, edx, type_info, use_sse3, conversion_failure);
-  __ mov(edx, ecx);
-
-  // Here edx has the untagged integer, eax has a Smi or a heap number.
-  __ bind(&load_arg2);
-  if (!type_info.IsDouble()) {
-    // Test if arg2 is a Smi.
-    if (!type_info.IsSmi()) {
-      __ test(eax, Immediate(kSmiTagMask));
-      __ j(not_zero, &arg2_is_object);
-    } else {
-      if (FLAG_debug_code) __ AbortIfNotSmi(eax);
-    }
-    __ SmiUntag(eax);
-    __ mov(ecx, eax);
-    __ jmp(&done);
-  }
-
-  __ bind(&arg2_is_object);
-
-  // Get the untagged integer version of the eax heap number in ecx.
-  IntegerConvert(masm, eax, type_info, use_sse3, conversion_failure);
-  __ bind(&done);
-  __ mov(eax, edx);
-}
-
-
 // Input: edx, eax are the left and right objects of a bit op.
 // Output: eax, ecx are left and right integers for a bit op.
 void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
@@ -2125,11 +2451,7 @@
   __ j(not_equal, &check_undefined_arg1);
 
   // Get the untagged integer version of the edx heap number in ecx.
-  IntegerConvert(masm,
-                 edx,
-                 TypeInfo::Unknown(),
-                 use_sse3,
-                 conversion_failure);
+  IntegerConvert(masm, edx, use_sse3, conversion_failure);
   __ mov(edx, ecx);
 
   // Here edx has the untagged integer, eax has a Smi or a heap number.
@@ -2156,28 +2478,12 @@
   __ j(not_equal, &check_undefined_arg2);
 
   // Get the untagged integer version of the eax heap number in ecx.
-  IntegerConvert(masm,
-                 eax,
-                 TypeInfo::Unknown(),
-                 use_sse3,
-                 conversion_failure);
+  IntegerConvert(masm, eax, use_sse3, conversion_failure);
   __ bind(&done);
   __ mov(eax, edx);
 }
 
 
-void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
-                                         TypeInfo type_info,
-                                         bool use_sse3,
-                                         Label* conversion_failure) {
-  if (type_info.IsNumber()) {
-    LoadNumbersAsIntegers(masm, type_info, use_sse3, conversion_failure);
-  } else {
-    LoadUnknownsAsIntegers(masm, use_sse3, conversion_failure);
-  }
-}
-
-
 void FloatingPointHelper::CheckLoadedIntegersWereInt32(MacroAssembler* masm,
                                                        bool use_sse3,
                                                        Label* not_int32) {
@@ -2187,12 +2493,12 @@
 
 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
                                            Register number) {
-  NearLabel load_smi, done;
+  Label load_smi, done;
 
   __ test(number, Immediate(kSmiTagMask));
-  __ j(zero, &load_smi, not_taken);
+  __ j(zero, &load_smi, Label::kNear);
   __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   __ bind(&load_smi);
   __ SmiUntag(number);
@@ -2205,18 +2511,20 @@
 
 
 void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
-  NearLabel load_smi_edx, load_eax, load_smi_eax, done;
+  Label load_smi_edx, load_eax, load_smi_eax, done;
   // Load operand in edx into xmm0.
   __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &load_smi_edx, not_taken);  // Argument in edx is a smi.
+  // Argument in edx is a smi.
+  __ j(zero, &load_smi_edx, Label::kNear);
   __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
 
   __ bind(&load_eax);
   // Load operand in eax into xmm1.
   __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &load_smi_eax, not_taken);  // Argument in eax is a smi.
+  // Argument in eax is a smi.
+  __ j(zero, &load_smi_eax, Label::kNear);
   __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   __ bind(&load_smi_edx);
   __ SmiUntag(edx);  // Untag smi before converting to float.
@@ -2235,10 +2543,11 @@
 
 void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
                                            Label* not_numbers) {
-  NearLabel load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
+  Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
   // Load operand in edx into xmm0, or branch to not_numbers.
   __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &load_smi_edx, not_taken);  // Argument in edx is a smi.
+  // Argument in edx is a smi.
+  __ j(zero, &load_smi_edx, Label::kNear);
   Factory* factory = masm->isolate()->factory();
   __ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map());
   __ j(not_equal, not_numbers);  // Argument in edx is not a number.
@@ -2246,9 +2555,10 @@
   __ bind(&load_eax);
   // Load operand in eax into xmm1, or branch to not_numbers.
   __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &load_smi_eax, not_taken);  // Argument in eax is a smi.
+  // Argument in eax is a smi.
+  __ j(zero, &load_smi_eax, Label::kNear);
   __ cmp(FieldOperand(eax, HeapObject::kMapOffset), factory->heap_number_map());
-  __ j(equal, &load_float_eax);
+  __ j(equal, &load_float_eax, Label::kNear);
   __ jmp(not_numbers);  // Argument in eax is not a number.
   __ bind(&load_smi_edx);
   __ SmiUntag(edx);  // Untag smi before converting to float.
@@ -2259,7 +2569,7 @@
   __ SmiUntag(eax);  // Untag smi before converting to float.
   __ cvtsi2sd(xmm1, Operand(eax));
   __ SmiTag(eax);  // Retag smi for heap number overwriting test.
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(&load_float_eax);
   __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
   __ bind(&done);
@@ -2300,14 +2610,14 @@
 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
                                             Register scratch,
                                             ArgLocation arg_location) {
-  NearLabel load_smi_1, load_smi_2, done_load_1, done;
+  Label load_smi_1, load_smi_2, done_load_1, done;
   if (arg_location == ARGS_IN_REGISTERS) {
     __ mov(scratch, edx);
   } else {
     __ mov(scratch, Operand(esp, 2 * kPointerSize));
   }
   __ test(scratch, Immediate(kSmiTagMask));
-  __ j(zero, &load_smi_1, not_taken);
+  __ j(zero, &load_smi_1, Label::kNear);
   __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
   __ bind(&done_load_1);
 
@@ -2317,9 +2627,9 @@
     __ mov(scratch, Operand(esp, 1 * kPointerSize));
   }
   __ test(scratch, Immediate(kSmiTagMask));
-  __ j(zero, &load_smi_2, not_taken);
+  __ j(zero, &load_smi_2, Label::kNear);
   __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   __ bind(&load_smi_1);
   __ SmiUntag(scratch);
@@ -2359,11 +2669,11 @@
 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
                                              Label* non_float,
                                              Register scratch) {
-  NearLabel test_other, done;
+  Label test_other, done;
   // Test if both operands are floats or smi -> scratch=k_is_float;
   // Otherwise scratch = k_not_float.
   __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &test_other, not_taken);  // argument in edx is OK
+  __ j(zero, &test_other, Label::kNear);  // argument in edx is OK
   __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
   Factory* factory = masm->isolate()->factory();
   __ cmp(scratch, factory->heap_number_map());
@@ -2371,7 +2681,7 @@
 
   __ bind(&test_other);
   __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &done);  // argument in eax is OK
+  __ j(zero, &done, Label::kNear);  // argument in eax is OK
   __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
   __ cmp(scratch, factory->heap_number_map());
   __ j(not_equal, non_float);  // argument in eax is not a number -> NaN
@@ -2387,140 +2697,6 @@
 }
 
 
-void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
-  Label slow, done, undo;
-
-  if (op_ == Token::SUB) {
-    if (include_smi_code_) {
-      // Check whether the value is a smi.
-      NearLabel try_float;
-      __ test(eax, Immediate(kSmiTagMask));
-      __ j(not_zero, &try_float, not_taken);
-
-      if (negative_zero_ == kStrictNegativeZero) {
-        // Go slow case if the value of the expression is zero
-        // to make sure that we switch between 0 and -0.
-        __ test(eax, Operand(eax));
-        __ j(zero, &slow, not_taken);
-      }
-
-      // The value of the expression is a smi that is not zero.  Try
-      // optimistic subtraction '0 - value'.
-      __ mov(edx, Operand(eax));
-      __ Set(eax, Immediate(0));
-      __ sub(eax, Operand(edx));
-      __ j(overflow, &undo, not_taken);
-      __ StubReturn(1);
-
-      // Try floating point case.
-      __ bind(&try_float);
-    } else if (FLAG_debug_code) {
-      __ AbortIfSmi(eax);
-    }
-
-    __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
-    __ cmp(edx, masm->isolate()->factory()->heap_number_map());
-    __ j(not_equal, &slow);
-    if (overwrite_ == UNARY_OVERWRITE) {
-      __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
-      __ xor_(edx, HeapNumber::kSignMask);  // Flip sign.
-      __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
-    } else {
-      __ mov(edx, Operand(eax));
-      // edx: operand
-      __ AllocateHeapNumber(eax, ebx, ecx, &undo);
-      // eax: allocated 'empty' number
-      __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
-      __ xor_(ecx, HeapNumber::kSignMask);  // Flip sign.
-      __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
-      __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
-      __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
-    }
-  } else if (op_ == Token::BIT_NOT) {
-    if (include_smi_code_) {
-      Label non_smi;
-      __ test(eax, Immediate(kSmiTagMask));
-      __ j(not_zero, &non_smi);
-      __ not_(eax);
-      __ and_(eax, ~kSmiTagMask);  // Remove inverted smi-tag.
-      __ ret(0);
-      __ bind(&non_smi);
-    } else if (FLAG_debug_code) {
-      __ AbortIfSmi(eax);
-    }
-
-    // Check if the operand is a heap number.
-    __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
-    __ cmp(edx, masm->isolate()->factory()->heap_number_map());
-    __ j(not_equal, &slow, not_taken);
-
-    // Convert the heap number in eax to an untagged integer in ecx.
-    IntegerConvert(masm,
-                   eax,
-                   TypeInfo::Unknown(),
-                   CpuFeatures::IsSupported(SSE3),
-                   &slow);
-
-    // Do the bitwise operation and check if the result fits in a smi.
-    NearLabel try_float;
-    __ not_(ecx);
-    __ cmp(ecx, 0xc0000000);
-    __ j(sign, &try_float, not_taken);
-
-    // Tag the result as a smi and we're done.
-    STATIC_ASSERT(kSmiTagSize == 1);
-    __ lea(eax, Operand(ecx, times_2, kSmiTag));
-    __ jmp(&done);
-
-    // Try to store the result in a heap number.
-    __ bind(&try_float);
-    if (overwrite_ == UNARY_NO_OVERWRITE) {
-      // Allocate a fresh heap number, but don't overwrite eax until
-      // we're sure we can do it without going through the slow case
-      // that needs the value in eax.
-      __ AllocateHeapNumber(ebx, edx, edi, &slow);
-      __ mov(eax, Operand(ebx));
-    }
-    if (CpuFeatures::IsSupported(SSE2)) {
-      CpuFeatures::Scope use_sse2(SSE2);
-      __ cvtsi2sd(xmm0, Operand(ecx));
-      __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
-    } else {
-      __ push(ecx);
-      __ fild_s(Operand(esp, 0));
-      __ pop(ecx);
-      __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
-    }
-  } else {
-    UNIMPLEMENTED();
-  }
-
-  // Return from the stub.
-  __ bind(&done);
-  __ StubReturn(1);
-
-  // Restore eax and go slow case.
-  __ bind(&undo);
-  __ mov(eax, Operand(edx));
-
-  // Handle the slow case by jumping to the JavaScript builtin.
-  __ bind(&slow);
-  __ pop(ecx);  // pop return address.
-  __ push(eax);
-  __ push(ecx);  // push return address
-  switch (op_) {
-    case Token::SUB:
-      __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
-      break;
-    case Token::BIT_NOT:
-      __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
-      break;
-    default:
-      UNREACHABLE();
-  }
-}
-
-
 void MathPowStub::Generate(MacroAssembler* masm) {
   // Registers are used as follows:
   // edx = base
@@ -2570,20 +2746,20 @@
   __ mov(edx, eax);
 
   // Get absolute value of exponent.
-  NearLabel no_neg;
+  Label no_neg;
   __ cmp(eax, 0);
-  __ j(greater_equal, &no_neg);
+  __ j(greater_equal, &no_neg, Label::kNear);
   __ neg(eax);
   __ bind(&no_neg);
 
   // Load xmm1 with 1.
   __ movsd(xmm1, xmm3);
-  NearLabel while_true;
-  NearLabel no_multiply;
+  Label while_true;
+  Label no_multiply;
 
   __ bind(&while_true);
   __ shr(eax, 1);
-  __ j(not_carry, &no_multiply);
+  __ j(not_carry, &no_multiply, Label::kNear);
   __ mulsd(xmm1, xmm0);
   __ bind(&no_multiply);
   __ mulsd(xmm0, xmm0);
@@ -2614,13 +2790,13 @@
   __ ucomisd(xmm1, xmm1);
   __ j(parity_even, &call_runtime);
 
-  NearLabel base_not_smi;
-  NearLabel handle_special_cases;
+  Label base_not_smi;
+  Label handle_special_cases;
   __ test(edx, Immediate(kSmiTagMask));
-  __ j(not_zero, &base_not_smi);
+  __ j(not_zero, &base_not_smi, Label::kNear);
   __ SmiUntag(edx);
   __ cvtsi2sd(xmm0, Operand(edx));
-  __ jmp(&handle_special_cases);
+  __ jmp(&handle_special_cases, Label::kNear);
 
   __ bind(&base_not_smi);
   __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
@@ -2635,7 +2811,7 @@
 
   // base is in xmm0 and exponent is in xmm1.
   __ bind(&handle_special_cases);
-  NearLabel not_minus_half;
+  Label not_minus_half;
   // Test for -0.5.
   // Load xmm2 with -0.5.
   __ mov(ecx, Immediate(0xBF000000));
@@ -2643,11 +2819,11 @@
   __ cvtss2sd(xmm2, xmm2);
   // xmm2 now has -0.5.
   __ ucomisd(xmm2, xmm1);
-  __ j(not_equal, &not_minus_half);
+  __ j(not_equal, &not_minus_half, Label::kNear);
 
   // Calculates reciprocal of square root.
   // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-  __ xorpd(xmm1, xmm1);
+  __ xorps(xmm1, xmm1);
   __ addsd(xmm1, xmm0);
   __ sqrtsd(xmm1, xmm1);
   __ divsd(xmm3, xmm1);
@@ -2664,7 +2840,7 @@
   __ j(not_equal, &call_runtime);
   // Calculates square root.
   // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-  __ xorpd(xmm1, xmm1);
+  __ xorps(xmm1, xmm1);
   __ addsd(xmm1, xmm0);
   __ sqrtsd(xmm1, xmm1);
 
@@ -2690,20 +2866,20 @@
   // Check that the key is a smi.
   Label slow;
   __ test(edx, Immediate(kSmiTagMask));
-  __ j(not_zero, &slow, not_taken);
+  __ j(not_zero, &slow);
 
   // Check if the calling frame is an arguments adaptor frame.
-  NearLabel adaptor;
+  Label adaptor;
   __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
   __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(equal, &adaptor);
+  __ j(equal, &adaptor, Label::kNear);
 
   // Check index against formal parameters count limit passed in
   // through register eax. Use unsigned comparison to get negative
   // check for free.
   __ cmp(edx, Operand(eax));
-  __ j(above_equal, &slow, not_taken);
+  __ j(above_equal, &slow);
 
   // Read the argument from the stack and return it.
   STATIC_ASSERT(kSmiTagSize == 1);
@@ -2719,7 +2895,7 @@
   __ bind(&adaptor);
   __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
   __ cmp(edx, Operand(ecx));
-  __ j(above_equal, &slow, not_taken);
+  __ j(above_equal, &slow);
 
   // Read the argument from the stack and return it.
   STATIC_ASSERT(kSmiTagSize == 1);
@@ -2770,10 +2946,10 @@
 
   // Try the new space allocation. Start out with computing the size of
   // the arguments object and the elements array.
-  NearLabel add_arguments_object;
+  Label add_arguments_object;
   __ bind(&try_allocate);
   __ test(ecx, Operand(ecx));
-  __ j(zero, &add_arguments_object);
+  __ j(zero, &add_arguments_object, Label::kNear);
   __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
   __ bind(&add_arguments_object);
   __ add(Operand(ecx), Immediate(GetArgumentsObjectSize()));
@@ -2829,7 +3005,7 @@
   __ SmiUntag(ecx);
 
   // Copy the fixed array slots.
-  NearLabel loop;
+  Label loop;
   __ bind(&loop);
   __ mov(ebx, Operand(edx, -1 * kPointerSize));  // Skip receiver.
   __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
@@ -2882,7 +3058,7 @@
       ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
   __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
   __ test(ebx, Operand(ebx));
-  __ j(zero, &runtime, not_taken);
+  __ j(zero, &runtime);
 
   // Check that the first argument is a JSRegExp object.
   __ mov(eax, Operand(esp, kJSRegExpOffset));
@@ -3023,9 +3199,8 @@
   __ bind(&check_code);
   // Check that the irregexp code has been generated for the actual string
   // encoding. If it has, the field contains a code object otherwise it contains
-  // the hole.
-  __ CmpObjectType(edx, CODE_TYPE, ebx);
-  __ j(not_equal, &runtime);
+  // a smi (code flushing support).
+  __ JumpIfSmi(edx, &runtime);
 
   // eax: subject string
   // edx: code
@@ -3066,16 +3241,16 @@
 
   // Argument 4: End of string data
   // Argument 3: Start of string data
-  NearLabel setup_two_byte, setup_rest;
+  Label setup_two_byte, setup_rest;
   __ test(edi, Operand(edi));
   __ mov(edi, FieldOperand(eax, String::kLengthOffset));
-  __ j(zero, &setup_two_byte);
+  __ j(zero, &setup_two_byte, Label::kNear);
   __ SmiUntag(edi);
   __ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize));
   __ mov(Operand(esp, 3 * kPointerSize), ecx);  // Argument 4.
   __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
   __ mov(Operand(esp, 2 * kPointerSize), ecx);  // Argument 3.
-  __ jmp(&setup_rest);
+  __ jmp(&setup_rest, Label::kNear);
 
   __ bind(&setup_two_byte);
   STATIC_ASSERT(kSmiTag == 0);
@@ -3103,10 +3278,10 @@
   // Check the result.
   Label success;
   __ cmp(eax, NativeRegExpMacroAssembler::SUCCESS);
-  __ j(equal, &success, taken);
+  __ j(equal, &success);
   Label failure;
   __ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
-  __ j(equal, &failure, taken);
+  __ j(equal, &failure);
   __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION);
   // If not exception it can only be retry. Handle that in the runtime system.
   __ j(not_equal, &runtime);
@@ -3183,12 +3358,12 @@
   // ebx: last_match_info backing store (FixedArray)
   // ecx: offsets vector
   // edx: number of capture registers
-  NearLabel next_capture, done;
+  Label next_capture, done;
   // Capture register counter starts from number of capture registers and
   // counts down until wraping after zero.
   __ bind(&next_capture);
   __ sub(Operand(edx), Immediate(1));
-  __ j(negative, &done);
+  __ j(negative, &done, Label::kNear);
   // Read the value from the static offsets vector buffer.
   __ mov(edi, Operand(ecx, edx, times_int_size, 0));
   __ SmiTag(edi);
@@ -3215,7 +3390,7 @@
 void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
   const int kMaxInlineLength = 100;
   Label slowcase;
-  NearLabel done;
+  Label done;
   __ mov(ebx, Operand(esp, kPointerSize * 3));
   __ test(ebx, Immediate(kSmiTagMask));
   __ j(not_zero, &slowcase);
@@ -3281,7 +3456,7 @@
   Label loop;
   __ test(ecx, Operand(ecx));
   __ bind(&loop);
-  __ j(less_equal, &done);  // Jump if ecx is negative or zero.
+  __ j(less_equal, &done, Label::kNear);  // Jump if ecx is negative or zero.
   __ sub(Operand(ecx), Immediate(1));
   __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
   __ jmp(&loop);
@@ -3322,19 +3497,19 @@
   // number string cache for smis is just the smi value, and the hash for
   // doubles is the xor of the upper and lower words. See
   // Heap::GetNumberStringCache.
-  NearLabel smi_hash_calculated;
-  NearLabel load_result_from_cache;
+  Label smi_hash_calculated;
+  Label load_result_from_cache;
   if (object_is_smi) {
     __ mov(scratch, object);
     __ SmiUntag(scratch);
   } else {
-    NearLabel not_smi, hash_calculated;
+    Label not_smi;
     STATIC_ASSERT(kSmiTag == 0);
     __ test(object, Immediate(kSmiTagMask));
-    __ j(not_zero, &not_smi);
+    __ j(not_zero, &not_smi, Label::kNear);
     __ mov(scratch, object);
     __ SmiUntag(scratch);
-    __ jmp(&smi_hash_calculated);
+    __ jmp(&smi_hash_calculated, Label::kNear);
     __ bind(&not_smi);
     __ cmp(FieldOperand(object, HeapObject::kMapOffset),
            masm->isolate()->factory()->heap_number_map());
@@ -3365,7 +3540,7 @@
     }
     __ j(parity_even, not_found);  // Bail out if NaN is involved.
     __ j(not_equal, not_found);  // The cache did not contain this value.
-    __ jmp(&load_result_from_cache);
+    __ jmp(&load_result_from_cache, Label::kNear);
   }
 
   __ bind(&smi_hash_calculated);
@@ -3425,7 +3600,7 @@
     __ mov(ecx, Operand(edx));
     __ or_(ecx, Operand(eax));
     __ test(ecx, Immediate(kSmiTagMask));
-    __ j(not_zero, &non_smi, not_taken);
+    __ j(not_zero, &non_smi);
     __ sub(edx, Operand(eax));  // Return on the result of the subtraction.
     __ j(no_overflow, &smi_done);
     __ not_(edx);  // Correct sign in case of overflow. edx is never 0 here.
@@ -3453,9 +3628,9 @@
     if (cc_ != equal) {
       // Check for undefined.  undefined OP undefined is false even though
       // undefined == undefined.
-      NearLabel check_for_nan;
+      Label check_for_nan;
       __ cmp(edx, masm->isolate()->factory()->undefined_value());
-      __ j(not_equal, &check_for_nan);
+      __ j(not_equal, &check_for_nan, Label::kNear);
       __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
       __ ret(0);
       __ bind(&check_for_nan);
@@ -3468,10 +3643,10 @@
       __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
       __ ret(0);
     } else {
-      NearLabel heap_number;
+      Label heap_number;
       __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
              Immediate(masm->isolate()->factory()->heap_number_map()));
-      __ j(equal, &heap_number);
+      __ j(equal, &heap_number, Label::kNear);
       if (cc_ != equal) {
         // Call runtime on identical JSObjects.  Otherwise return equal.
         __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
@@ -3503,8 +3678,8 @@
         __ setcc(above_equal, eax);
         __ ret(0);
       } else {
-        NearLabel nan;
-        __ j(above_equal, &nan);
+        Label nan;
+        __ j(above_equal, &nan, Label::kNear);
         __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
         __ ret(0);
         __ bind(&nan);
@@ -3520,7 +3695,7 @@
   // Non-strict object equality is slower, so it is handled later in the stub.
   if (cc_ == equal && strict_) {
     Label slow;  // Fallthrough label.
-    NearLabel not_smis;
+    Label not_smis;
     // If we're doing a strict equality comparison, we don't have to do
     // type conversion, so we generate code to do fast comparison for objects
     // and oddballs. Non-smi numbers and strings still go through the usual
@@ -3532,7 +3707,7 @@
     __ mov(ecx, Immediate(kSmiTagMask));
     __ and_(ecx, Operand(eax));
     __ test(ecx, Operand(edx));
-    __ j(not_zero, &not_smis);
+    __ j(not_zero, &not_smis, Label::kNear);
     // One operand is a smi.
 
     // Check whether the non-smi is a heap number.
@@ -3561,13 +3736,13 @@
 
     // Get the type of the first operand.
     // If the first object is a JS object, we have done pointer comparison.
-    NearLabel first_non_object;
+    Label first_non_object;
     STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
     __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
-    __ j(below, &first_non_object);
+    __ j(below, &first_non_object, Label::kNear);
 
     // Return non-zero (eax is not zero)
-    NearLabel return_not_equal;
+    Label return_not_equal;
     STATIC_ASSERT(kHeapObjectTag != 0);
     __ bind(&return_not_equal);
     __ ret(0);
@@ -3600,7 +3775,7 @@
       __ ucomisd(xmm0, xmm1);
 
       // Don't base result on EFLAGS when a NaN is involved.
-      __ j(parity_even, &unordered, not_taken);
+      __ j(parity_even, &unordered);
       // Return a result of -1, 0, or 1, based on EFLAGS.
       __ mov(eax, 0);  // equal
       __ mov(ecx, Immediate(Smi::FromInt(1)));
@@ -3616,12 +3791,12 @@
       __ FCmp();
 
       // Don't base result on EFLAGS when a NaN is involved.
-      __ j(parity_even, &unordered, not_taken);
+      __ j(parity_even, &unordered);
 
-      NearLabel below_label, above_label;
+      Label below_label, above_label;
       // Return a result of -1, 0, or 1, based on EFLAGS.
-      __ j(below, &below_label, not_taken);
-      __ j(above, &above_label, not_taken);
+      __ j(below, &below_label);
+      __ j(above, &above_label);
 
       __ Set(eax, Immediate(0));
       __ ret(0);
@@ -3668,12 +3843,20 @@
                                          &check_unequal_objects);
 
   // Inline comparison of ascii strings.
-  StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+  if (cc_ == equal) {
+    StringCompareStub::GenerateFlatAsciiStringEquals(masm,
                                                      edx,
                                                      eax,
                                                      ecx,
-                                                     ebx,
-                                                     edi);
+                                                     ebx);
+  } else {
+    StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+                                                       edx,
+                                                       eax,
+                                                       ecx,
+                                                       ebx,
+                                                       edi);
+  }
 #ifdef DEBUG
   __ Abort("Unexpected fall-through from string comparison");
 #endif
@@ -3683,8 +3866,8 @@
     // Non-strict equality.  Objects are unequal if
     // they are both JSObjects and not undetectable,
     // and their pointers are different.
-    NearLabel not_both_objects;
-    NearLabel return_unequal;
+    Label not_both_objects;
+    Label return_unequal;
     // At most one is a smi, so we can test for smi by adding the two.
     // A smi plus a heap object has the low bit set, a heap object plus
     // a heap object has the low bit clear.
@@ -3692,20 +3875,20 @@
     STATIC_ASSERT(kSmiTagMask == 1);
     __ lea(ecx, Operand(eax, edx, times_1, 0));
     __ test(ecx, Immediate(kSmiTagMask));
-    __ j(not_zero, &not_both_objects);
+    __ j(not_zero, &not_both_objects, Label::kNear);
     __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
-    __ j(below, &not_both_objects);
+    __ j(below, &not_both_objects, Label::kNear);
     __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ebx);
-    __ j(below, &not_both_objects);
+    __ j(below, &not_both_objects, Label::kNear);
     // We do not bail out after this point.  Both are JSObjects, and
     // they are equal if and only if both are undetectable.
     // The and of the undetectable flags is 1 if and only if they are equal.
     __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
               1 << Map::kIsUndetectable);
-    __ j(zero, &return_unequal);
+    __ j(zero, &return_unequal, Label::kNear);
     __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
               1 << Map::kIsUndetectable);
-    __ j(zero, &return_unequal);
+    __ j(zero, &return_unequal, Label::kNear);
     // The objects are both undetectable, so they both compare as the value
     // undefined, and are equal.
     __ Set(eax, Immediate(EQUAL));
@@ -3761,31 +3944,22 @@
 void CallFunctionStub::Generate(MacroAssembler* masm) {
   Label slow;
 
-  // If the receiver might be a value (string, number or boolean) check for this
-  // and box it if it is.
-  if (ReceiverMightBeValue()) {
+  // The receiver might implicitly be the global object. This is
+  // indicated by passing the hole as the receiver to the call
+  // function stub.
+  if (ReceiverMightBeImplicit()) {
+    Label call;
     // Get the receiver from the stack.
     // +1 ~ return address
-    Label receiver_is_value, receiver_is_js_object;
     __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
-
-    // Check if receiver is a smi (which is a number value).
-    __ test(eax, Immediate(kSmiTagMask));
-    __ j(zero, &receiver_is_value, not_taken);
-
-    // Check if the receiver is a valid JS object.
-    __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, edi);
-    __ j(above_equal, &receiver_is_js_object);
-
-    // Call the runtime to box the value.
-    __ bind(&receiver_is_value);
-    __ EnterInternalFrame();
-    __ push(eax);
-    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-    __ LeaveInternalFrame();
-    __ mov(Operand(esp, (argc_ + 1) * kPointerSize), eax);
-
-    __ bind(&receiver_is_js_object);
+    // Call as function is indicated with the hole.
+    __ cmp(eax, masm->isolate()->factory()->the_hole_value());
+    __ j(not_equal, &call, Label::kNear);
+    // Patch the receiver on the stack with the global receiver object.
+    __ mov(ebx, GlobalObjectOperand());
+    __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+    __ mov(Operand(esp, (argc_ + 1) * kPointerSize), ebx);
+    __ bind(&call);
   }
 
   // Get the function to call from the stack.
@@ -3794,14 +3968,30 @@
 
   // Check that the function really is a JavaScript function.
   __ test(edi, Immediate(kSmiTagMask));
-  __ j(zero, &slow, not_taken);
+  __ j(zero, &slow);
   // Goto slow case if we do not have a function.
   __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-  __ j(not_equal, &slow, not_taken);
+  __ j(not_equal, &slow);
 
   // Fast-case: Just invoke the function.
   ParameterCount actual(argc_);
-  __ InvokeFunction(edi, actual, JUMP_FUNCTION);
+
+  if (ReceiverMightBeImplicit()) {
+    Label call_as_function;
+    __ cmp(eax, masm->isolate()->factory()->the_hole_value());
+    __ j(equal, &call_as_function);
+    __ InvokeFunction(edi,
+                      actual,
+                      JUMP_FUNCTION,
+                      NullCallWrapper(),
+                      CALL_AS_METHOD);
+    __ bind(&call_as_function);
+  }
+  __ InvokeFunction(edi,
+                    actual,
+                    JUMP_FUNCTION,
+                    NullCallWrapper(),
+                    CALL_AS_FUNCTION);
 
   // Slow-case: Non-function called.
   __ bind(&slow);
@@ -3878,9 +4068,9 @@
   // Make sure we're not trying to return 'the hole' from the runtime
   // call as this may lead to crashes in the IC code later.
   if (FLAG_debug_code) {
-    NearLabel okay;
+    Label okay;
     __ cmp(eax, masm->isolate()->factory()->the_hole_value());
-    __ j(not_equal, &okay);
+    __ j(not_equal, &okay, Label::kNear);
     __ int3();
     __ bind(&okay);
   }
@@ -3891,7 +4081,7 @@
   __ lea(ecx, Operand(eax, 1));
   // Lower 2 bits of ecx are 0 iff eax has failure tag.
   __ test(ecx, Immediate(kFailureTagMask));
-  __ j(zero, &failure_returned, not_taken);
+  __ j(zero, &failure_returned);
 
   ExternalReference pending_exception_address(
       Isolate::k_pending_exception_address, masm->isolate());
@@ -3902,10 +4092,10 @@
     __ push(edx);
     __ mov(edx, Operand::StaticVariable(
         ExternalReference::the_hole_value_location(masm->isolate())));
-    NearLabel okay;
+    Label okay;
     __ cmp(edx, Operand::StaticVariable(pending_exception_address));
     // Cannot use check here as it attempts to generate call into runtime.
-    __ j(equal, &okay);
+    __ j(equal, &okay, Label::kNear);
     __ int3();
     __ bind(&okay);
     __ pop(edx);
@@ -3922,7 +4112,7 @@
   // If the returned exception is RETRY_AFTER_GC continue at retry label
   STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
   __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
-  __ j(zero, &retry, taken);
+  __ j(zero, &retry);
 
   // Special handling of out of memory exceptions.
   __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
@@ -4178,22 +4368,22 @@
 
   // Check that the left hand is a JS object.
   __ test(object, Immediate(kSmiTagMask));
-  __ j(zero, &not_js_object, not_taken);
+  __ j(zero, &not_js_object);
   __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
 
   // If there is a call site cache don't look in the global cache, but do the
   // real lookup and update the call site cache.
   if (!HasCallSiteInlineCheck()) {
     // Look up the function and the map in the instanceof cache.
-    NearLabel miss;
+    Label miss;
     __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
     __ cmp(function,
            Operand::StaticArray(scratch, times_pointer_size, roots_address));
-    __ j(not_equal, &miss);
+    __ j(not_equal, &miss, Label::kNear);
     __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
     __ cmp(map, Operand::StaticArray(
         scratch, times_pointer_size, roots_address));
-    __ j(not_equal, &miss);
+    __ j(not_equal, &miss, Label::kNear);
     __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
     __ mov(eax, Operand::StaticArray(
         scratch, times_pointer_size, roots_address));
@@ -4206,7 +4396,7 @@
 
   // Check that the function prototype is a JS object.
   __ test(prototype, Immediate(kSmiTagMask));
-  __ j(zero, &slow, not_taken);
+  __ j(zero, &slow);
   __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
 
   // Update the global instanceof or call site inlined cache with the current
@@ -4236,13 +4426,13 @@
   // Loop through the prototype chain of the object looking for the function
   // prototype.
   __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
-  NearLabel loop, is_instance, is_not_instance;
+  Label loop, is_instance, is_not_instance;
   __ bind(&loop);
   __ cmp(scratch, Operand(prototype));
-  __ j(equal, &is_instance);
+  __ j(equal, &is_instance, Label::kNear);
   Factory* factory = masm->isolate()->factory();
   __ cmp(Operand(scratch), Immediate(factory->null_value()));
-  __ j(equal, &is_not_instance);
+  __ j(equal, &is_not_instance, Label::kNear);
   __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
   __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
   __ jmp(&loop);
@@ -4296,9 +4486,9 @@
   // Before null, smi and string value checks, check that the rhs is a function
   // as for a non-function rhs an exception needs to be thrown.
   __ test(function, Immediate(kSmiTagMask));
-  __ j(zero, &slow, not_taken);
+  __ j(zero, &slow);
   __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
-  __ j(not_equal, &slow, not_taken);
+  __ j(not_equal, &slow);
 
   // Null is not instance of anything.
   __ cmp(object, factory->null_value());
@@ -4309,7 +4499,7 @@
   __ bind(&object_not_null);
   // Smi values is not instance of anything.
   __ test(object, Immediate(kSmiTagMask));
-  __ j(not_zero, &object_not_null_or_smi, not_taken);
+  __ j(not_zero, &object_not_null_or_smi);
   __ Set(eax, Immediate(Smi::FromInt(1)));
   __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
 
@@ -4339,11 +4529,11 @@
     __ push(function);
     __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
     __ LeaveInternalFrame();
-    NearLabel true_value, done;
+    Label true_value, done;
     __ test(eax, Operand(eax));
-    __ j(zero, &true_value);
+    __ j(zero, &true_value, Label::kNear);
     __ mov(eax, factory->false_value());
-    __ jmp(&done);
+    __ jmp(&done, Label::kNear);
     __ bind(&true_value);
     __ mov(eax, factory->true_value());
     __ bind(&done);
@@ -4522,7 +4712,7 @@
   __ CheckMap(index_,
               masm->isolate()->factory()->heap_number_map(),
               index_not_number_,
-              true);
+              DONT_DO_SMI_CHECK);
   call_helper.BeforeCall(masm);
   __ push(object_);
   __ push(index_);
@@ -4581,7 +4771,7 @@
   __ test(code_,
           Immediate(kSmiTagMask |
                     ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
-  __ j(not_zero, &slow_case_, not_taken);
+  __ j(not_zero, &slow_case_);
 
   Factory* factory = masm->isolate()->factory();
   __ Set(result_, Immediate(factory->single_character_string_cache()));
@@ -4593,7 +4783,7 @@
                                code_, times_half_pointer_size,
                                FixedArray::kHeaderSize));
   __ cmp(result_, factory->undefined_value());
-  __ j(equal, &slow_case_, not_taken);
+  __ j(equal, &slow_case_);
   __ bind(&exit_);
 }
 
@@ -4672,11 +4862,11 @@
   // eax: first string
   // edx: second string
   // Check if either of the strings are empty. In that case return the other.
-  NearLabel second_not_zero_length, both_not_zero_length;
+  Label second_not_zero_length, both_not_zero_length;
   __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
   STATIC_ASSERT(kSmiTag == 0);
   __ test(ecx, Operand(ecx));
-  __ j(not_zero, &second_not_zero_length);
+  __ j(not_zero, &second_not_zero_length, Label::kNear);
   // Second string is empty, result is first string which is already in eax.
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->string_add_native(), 1);
@@ -4685,7 +4875,7 @@
   __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
   STATIC_ASSERT(kSmiTag == 0);
   __ test(ebx, Operand(ebx));
-  __ j(not_zero, &both_not_zero_length);
+  __ j(not_zero, &both_not_zero_length, Label::kNear);
   // First string is empty, result is second string which is in edx.
   __ mov(eax, edx);
   __ IncrementCounter(counters->string_add_native(), 1);
@@ -4959,7 +5149,7 @@
                                           Register count,
                                           Register scratch,
                                           bool ascii) {
-  NearLabel loop;
+  Label loop;
   __ bind(&loop);
   // This loop just copies one character at a time, as it is only used for very
   // short strings.
@@ -5006,9 +5196,9 @@
   }
 
   // Don't enter the rep movs if there are less than 4 bytes to copy.
-  NearLabel last_bytes;
+  Label last_bytes;
   __ test(count, Immediate(~3));
-  __ j(zero, &last_bytes);
+  __ j(zero, &last_bytes, Label::kNear);
 
   // Copy from edi to esi using rep movs instruction.
   __ mov(scratch, count);
@@ -5026,7 +5216,7 @@
   __ j(zero, &done);
 
   // Copy remaining characters.
-  NearLabel loop;
+  Label loop;
   __ bind(&loop);
   __ mov_b(scratch, Operand(src, 0));
   __ mov_b(Operand(dest, 0), scratch);
@@ -5052,11 +5242,11 @@
 
   // Make sure that both characters are not digits as such strings has a
   // different hash algorithm. Don't try to look for these in the symbol table.
-  NearLabel not_array_index;
+  Label not_array_index;
   __ mov(scratch, c1);
   __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
   __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
-  __ j(above, &not_array_index);
+  __ j(above, &not_array_index, Label::kNear);
   __ mov(scratch, c2);
   __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
   __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
@@ -5214,9 +5404,9 @@
   __ add(hash, Operand(scratch));
 
   // if (hash == 0) hash = 27;
-  NearLabel hash_not_zero;
+  Label hash_not_zero;
   __ test(hash, Operand(hash));
-  __ j(not_zero, &hash_not_zero);
+  __ j(not_zero, &hash_not_zero, Label::kNear);
   __ mov(hash, Immediate(27));
   __ bind(&hash_not_zero);
 }
@@ -5371,28 +5561,60 @@
 }
 
 
+void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+                                                      Register left,
+                                                      Register right,
+                                                      Register scratch1,
+                                                      Register scratch2) {
+  Register length = scratch1;
+
+  // Compare lengths.
+  Label strings_not_equal, check_zero_length;
+  __ mov(length, FieldOperand(left, String::kLengthOffset));
+  __ cmp(length, FieldOperand(right, String::kLengthOffset));
+  __ j(equal, &check_zero_length, Label::kNear);
+  __ bind(&strings_not_equal);
+  __ Set(eax, Immediate(Smi::FromInt(NOT_EQUAL)));
+  __ ret(0);
+
+  // Check if the length is zero.
+  Label compare_chars;
+  __ bind(&check_zero_length);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ test(length, Operand(length));
+  __ j(not_zero, &compare_chars, Label::kNear);
+  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+  __ ret(0);
+
+  // Compare characters.
+  __ bind(&compare_chars);
+  GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
+                                &strings_not_equal, Label::kNear);
+
+  // Characters are equal.
+  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+  __ ret(0);
+}
+
+
 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
                                                         Register left,
                                                         Register right,
                                                         Register scratch1,
                                                         Register scratch2,
                                                         Register scratch3) {
-  Label result_not_equal;
-  Label result_greater;
-  Label compare_lengths;
-
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->string_compare_native(), 1);
 
   // Find minimum length.
-  NearLabel left_shorter;
+  Label left_shorter;
   __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
   __ mov(scratch3, scratch1);
   __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
 
   Register length_delta = scratch3;
 
-  __ j(less_equal, &left_shorter);
+  __ j(less_equal, &left_shorter, Label::kNear);
   // Right string is shorter. Change scratch1 to be length of right string.
   __ sub(scratch1, Operand(length_delta));
   __ bind(&left_shorter);
@@ -5400,41 +5622,19 @@
   Register min_length = scratch1;
 
   // If either length is zero, just compare lengths.
+  Label compare_lengths;
   __ test(min_length, Operand(min_length));
-  __ j(zero, &compare_lengths);
+  __ j(zero, &compare_lengths, Label::kNear);
 
-  // Change index to run from -min_length to -1 by adding min_length
-  // to string start. This means that loop ends when index reaches zero,
-  // which doesn't need an additional compare.
-  __ SmiUntag(min_length);
-  __ lea(left,
-         FieldOperand(left,
-                      min_length, times_1,
-                      SeqAsciiString::kHeaderSize));
-  __ lea(right,
-         FieldOperand(right,
-                      min_length, times_1,
-                      SeqAsciiString::kHeaderSize));
-  __ neg(min_length);
-
-  Register index = min_length;  // index = -min_length;
-
-  {
-    // Compare loop.
-    NearLabel loop;
-    __ bind(&loop);
-    // Compare characters.
-    __ mov_b(scratch2, Operand(left, index, times_1, 0));
-    __ cmpb(scratch2, Operand(right, index, times_1, 0));
-    __ j(not_equal, &result_not_equal);
-    __ add(Operand(index), Immediate(1));
-    __ j(not_zero, &loop);
-  }
+  // Compare characters.
+  Label result_not_equal;
+  GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
+                                &result_not_equal, Label::kNear);
 
   // Compare lengths -  strings up to min-length are equal.
   __ bind(&compare_lengths);
   __ test(length_delta, Operand(length_delta));
-  __ j(not_zero, &result_not_equal);
+  __ j(not_zero, &result_not_equal, Label::kNear);
 
   // Result is EQUAL.
   STATIC_ASSERT(EQUAL == 0);
@@ -5442,8 +5642,9 @@
   __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
   __ ret(0);
 
+  Label result_greater;
   __ bind(&result_not_equal);
-  __ j(greater, &result_greater);
+  __ j(greater, &result_greater, Label::kNear);
 
   // Result is LESS.
   __ Set(eax, Immediate(Smi::FromInt(LESS)));
@@ -5456,6 +5657,36 @@
 }
 
 
+void StringCompareStub::GenerateAsciiCharsCompareLoop(
+    MacroAssembler* masm,
+    Register left,
+    Register right,
+    Register length,
+    Register scratch,
+    Label* chars_not_equal,
+    Label::Distance chars_not_equal_near) {
+  // Change index to run from -length to -1 by adding length to string
+  // start. This means that loop ends when index reaches zero, which
+  // doesn't need an additional compare.
+  __ SmiUntag(length);
+  __ lea(left,
+         FieldOperand(left, length, times_1, SeqAsciiString::kHeaderSize));
+  __ lea(right,
+         FieldOperand(right, length, times_1, SeqAsciiString::kHeaderSize));
+  __ neg(length);
+  Register index = length;  // index = -length;
+
+  // Compare loop.
+  Label loop;
+  __ bind(&loop);
+  __ mov_b(scratch, Operand(left, index, times_1, 0));
+  __ cmpb(scratch, Operand(right, index, times_1, 0));
+  __ j(not_equal, chars_not_equal, chars_not_equal_near);
+  __ add(Operand(index), Immediate(1));
+  __ j(not_zero, &loop);
+}
+
+
 void StringCompareStub::Generate(MacroAssembler* masm) {
   Label runtime;
 
@@ -5467,9 +5698,9 @@
   __ mov(edx, Operand(esp, 2 * kPointerSize));  // left
   __ mov(eax, Operand(esp, 1 * kPointerSize));  // right
 
-  NearLabel not_same;
+  Label not_same;
   __ cmp(edx, Operand(eax));
-  __ j(not_equal, &not_same);
+  __ j(not_equal, &not_same, Label::kNear);
   STATIC_ASSERT(EQUAL == 0);
   STATIC_ASSERT(kSmiTag == 0);
   __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
@@ -5497,19 +5728,19 @@
 
 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::SMIS);
-  NearLabel miss;
+  Label miss;
   __ mov(ecx, Operand(edx));
   __ or_(ecx, Operand(eax));
   __ test(ecx, Immediate(kSmiTagMask));
-  __ j(not_zero, &miss, not_taken);
+  __ j(not_zero, &miss, Label::kNear);
 
   if (GetCondition() == equal) {
     // For equality we do not care about the sign of the result.
     __ sub(eax, Operand(edx));
   } else {
-    NearLabel done;
+    Label done;
     __ sub(edx, Operand(eax));
-    __ j(no_overflow, &done);
+    __ j(no_overflow, &done, Label::kNear);
     // Correct sign of result in case of overflow.
     __ not_(edx);
     __ bind(&done);
@@ -5525,18 +5756,18 @@
 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::HEAP_NUMBERS);
 
-  NearLabel generic_stub;
-  NearLabel unordered;
-  NearLabel miss;
+  Label generic_stub;
+  Label unordered;
+  Label miss;
   __ mov(ecx, Operand(edx));
   __ and_(ecx, Operand(eax));
   __ test(ecx, Immediate(kSmiTagMask));
-  __ j(zero, &generic_stub, not_taken);
+  __ j(zero, &generic_stub, Label::kNear);
 
   __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss, Label::kNear);
   __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss, Label::kNear);
 
   // Inlining the double comparison and falling back to the general compare
   // stub if NaN is involved or SS2 or CMOV is unsupported.
@@ -5552,7 +5783,7 @@
     __ ucomisd(xmm0, xmm1);
 
     // Don't base result on EFLAGS when a NaN is involved.
-    __ j(parity_even, &unordered, not_taken);
+    __ j(parity_even, &unordered, Label::kNear);
 
     // Return a result of -1, 0, or 1, based on EFLAGS.
     // Performing mov, because xor would destroy the flag register.
@@ -5575,18 +5806,141 @@
 }
 
 
+void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::SYMBOLS);
+  ASSERT(GetCondition() == equal);
+
+  // Registers containing left and right operands respectively.
+  Register left = edx;
+  Register right = eax;
+  Register tmp1 = ecx;
+  Register tmp2 = ebx;
+
+  // Check that both operands are heap objects.
+  Label miss;
+  __ mov(tmp1, Operand(left));
+  STATIC_ASSERT(kSmiTag == 0);
+  __ and_(tmp1, Operand(right));
+  __ test(tmp1, Immediate(kSmiTagMask));
+  __ j(zero, &miss, Label::kNear);
+
+  // Check that both operands are symbols.
+  __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+  __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+  __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+  __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kSymbolTag != 0);
+  __ and_(tmp1, Operand(tmp2));
+  __ test(tmp1, Immediate(kIsSymbolMask));
+  __ j(zero, &miss, Label::kNear);
+
+  // Symbols are compared by identity.
+  Label done;
+  __ cmp(left, Operand(right));
+  // Make sure eax is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  ASSERT(right.is(eax));
+  __ j(not_equal, &done, Label::kNear);
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+  __ bind(&done);
+  __ ret(0);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::STRINGS);
+  ASSERT(GetCondition() == equal);
+  Label miss;
+
+  // Registers containing left and right operands respectively.
+  Register left = edx;
+  Register right = eax;
+  Register tmp1 = ecx;
+  Register tmp2 = ebx;
+  Register tmp3 = edi;
+
+  // Check that both operands are heap objects.
+  __ mov(tmp1, Operand(left));
+  STATIC_ASSERT(kSmiTag == 0);
+  __ and_(tmp1, Operand(right));
+  __ test(tmp1, Immediate(kSmiTagMask));
+  __ j(zero, &miss);
+
+  // Check that both operands are strings. This leaves the instance
+  // types loaded in tmp1 and tmp2.
+  __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+  __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+  __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+  __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+  __ mov(tmp3, tmp1);
+  STATIC_ASSERT(kNotStringTag != 0);
+  __ or_(tmp3, Operand(tmp2));
+  __ test(tmp3, Immediate(kIsNotStringMask));
+  __ j(not_zero, &miss);
+
+  // Fast check for identical strings.
+  Label not_same;
+  __ cmp(left, Operand(right));
+  __ j(not_equal, &not_same, Label::kNear);
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+  __ ret(0);
+
+  // Handle not identical strings.
+  __ bind(&not_same);
+
+  // Check that both strings are symbols. If they are, we're done
+  // because we already know they are not identical.
+  Label do_compare;
+  STATIC_ASSERT(kSymbolTag != 0);
+  __ and_(tmp1, Operand(tmp2));
+  __ test(tmp1, Immediate(kIsSymbolMask));
+  __ j(zero, &do_compare, Label::kNear);
+  // Make sure eax is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  ASSERT(right.is(eax));
+  __ ret(0);
+
+  // Check that both strings are sequential ASCII.
+  Label runtime;
+  __ bind(&do_compare);
+  __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
+
+  // Compare flat ASCII strings. Returns when done.
+  StringCompareStub::GenerateFlatAsciiStringEquals(
+      masm, left, right, tmp1, tmp2);
+
+  // Handle more complex cases in runtime.
+  __ bind(&runtime);
+  __ pop(tmp1);  // Return address.
+  __ push(left);
+  __ push(right);
+  __ push(tmp1);
+  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::OBJECTS);
-  NearLabel miss;
+  Label miss;
   __ mov(ecx, Operand(edx));
   __ and_(ecx, Operand(eax));
   __ test(ecx, Immediate(kSmiTagMask));
-  __ j(zero, &miss, not_taken);
+  __ j(zero, &miss, Label::kNear);
 
   __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss, Label::kNear);
   __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss, Label::kNear);
 
   ASSERT(GetCondition() == equal);
   __ sub(eax, Operand(edx));
@@ -5628,6 +5982,218 @@
 }
 
 
+// Helper function used to check that the dictionary doesn't contain
+// the property. This function may return false negatives, so miss_label
+// must always call a backup property check that is complete.
+// This function is safe to call if the receiver has fast properties.
+// Name must be a symbol and receiver must be a heap object.
+MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
+    MacroAssembler* masm,
+    Label* miss,
+    Label* done,
+    Register properties,
+    String* name,
+    Register r0) {
+  ASSERT(name->IsSymbol());
+
+  // If names of slots in range from 1 to kProbes - 1 for the hash value are
+  // not equal to the name and kProbes-th slot is not used (its name is the
+  // undefined value), it guarantees the hash table doesn't contain the
+  // property. It's true even if some slots represent deleted properties
+  // (their names are the null value).
+  for (int i = 0; i < kInlinedProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    Register index = r0;
+    // Capacity is smi 2^n.
+    __ mov(index, FieldOperand(properties, kCapacityOffset));
+    __ dec(index);
+    __ and_(Operand(index),
+           Immediate(Smi::FromInt(name->Hash() +
+                                   StringDictionary::GetProbeOffset(i))));
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    __ lea(index, Operand(index, index, times_2, 0));  // index *= 3.
+    Register entity_name = r0;
+    // Having undefined at this place means the name is not contained.
+    ASSERT_EQ(kSmiTagSize, 1);
+    __ mov(entity_name, Operand(properties, index, times_half_pointer_size,
+                                kElementsStartOffset - kHeapObjectTag));
+    __ cmp(entity_name, masm->isolate()->factory()->undefined_value());
+    __ j(equal, done);
+
+    // Stop if found the property.
+    __ cmp(entity_name, Handle<String>(name));
+    __ j(equal, miss);
+
+    // Check if the entry name is not a symbol.
+    __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
+    __ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
+              kIsSymbolMask);
+    __ j(zero, miss);
+  }
+
+  StringDictionaryLookupStub stub(properties,
+                                  r0,
+                                  r0,
+                                  StringDictionaryLookupStub::NEGATIVE_LOOKUP);
+  __ push(Immediate(Handle<Object>(name)));
+  __ push(Immediate(name->Hash()));
+  MaybeObject* result = masm->TryCallStub(&stub);
+  if (result->IsFailure()) return result;
+  __ test(r0, Operand(r0));
+  __ j(not_zero, miss);
+  __ jmp(done);
+  return result;
+}
+
+
+// Probe the string dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found leaving the
+// index into the dictionary in |r0|. Jump to the |miss| label
+// otherwise.
+void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
+                                                        Label* miss,
+                                                        Label* done,
+                                                        Register elements,
+                                                        Register name,
+                                                        Register r0,
+                                                        Register r1) {
+  // Assert that name contains a string.
+  if (FLAG_debug_code) __ AbortIfNotString(name);
+
+  __ mov(r1, FieldOperand(elements, kCapacityOffset));
+  __ shr(r1, kSmiTagSize);  // convert smi to int
+  __ dec(r1);
+
+  // Generate an unrolled loop that performs a few probes before
+  // giving up. Measurements done on Gmail indicate that 2 probes
+  // cover ~93% of loads from dictionaries.
+  for (int i = 0; i < kInlinedProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
+    __ shr(r0, String::kHashShift);
+    if (i > 0) {
+      __ add(Operand(r0), Immediate(StringDictionary::GetProbeOffset(i)));
+    }
+    __ and_(r0, Operand(r1));
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    __ lea(r0, Operand(r0, r0, times_2, 0));  // r0 = r0 * 3
+
+    // Check if the key is identical to the name.
+    __ cmp(name, Operand(elements,
+                         r0,
+                         times_4,
+                         kElementsStartOffset - kHeapObjectTag));
+    __ j(equal, done);
+  }
+
+  StringDictionaryLookupStub stub(elements,
+                                  r1,
+                                  r0,
+                                  POSITIVE_LOOKUP);
+  __ push(name);
+  __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
+  __ shr(r0, String::kHashShift);
+  __ push(r0);
+  __ CallStub(&stub);
+
+  __ test(r1, Operand(r1));
+  __ j(zero, miss);
+  __ jmp(done);
+}
+
+
+void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+  // Stack frame on entry:
+  //  esp[0 * kPointerSize]: return address.
+  //  esp[1 * kPointerSize]: key's hash.
+  //  esp[2 * kPointerSize]: key.
+  // Registers:
+  //  dictionary_: StringDictionary to probe.
+  //  result_: used as scratch.
+  //  index_: will hold an index of entry if lookup is successful.
+  //          might alias with result_.
+  // Returns:
+  //  result_ is zero if lookup failed, non zero otherwise.
+
+  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+  Register scratch = result_;
+
+  __ mov(scratch, FieldOperand(dictionary_, kCapacityOffset));
+  __ dec(scratch);
+  __ SmiUntag(scratch);
+  __ push(scratch);
+
+  // If names of slots in range from 1 to kProbes - 1 for the hash value are
+  // not equal to the name and kProbes-th slot is not used (its name is the
+  // undefined value), it guarantees the hash table doesn't contain the
+  // property. It's true even if some slots represent deleted properties
+  // (their names are the null value).
+  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    __ mov(scratch, Operand(esp, 2 * kPointerSize));
+    if (i > 0) {
+      __ add(Operand(scratch),
+             Immediate(StringDictionary::GetProbeOffset(i)));
+    }
+    __ and_(scratch, Operand(esp, 0));
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    __ lea(index_, Operand(scratch, scratch, times_2, 0));  // index *= 3.
+
+    // Having undefined at this place means the name is not contained.
+    ASSERT_EQ(kSmiTagSize, 1);
+    __ mov(scratch, Operand(dictionary_,
+                            index_,
+                            times_pointer_size,
+                            kElementsStartOffset - kHeapObjectTag));
+    __ cmp(scratch, masm->isolate()->factory()->undefined_value());
+    __ j(equal, &not_in_dictionary);
+
+    // Stop if found the property.
+    __ cmp(scratch, Operand(esp, 3 * kPointerSize));
+    __ j(equal, &in_dictionary);
+
+    if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+      // If we hit a non symbol key during negative lookup
+      // we have to bailout as this key might be equal to the
+      // key we are looking for.
+
+      // Check if the entry name is not a symbol.
+      __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
+      __ test_b(FieldOperand(scratch, Map::kInstanceTypeOffset),
+                kIsSymbolMask);
+      __ j(zero, &maybe_in_dictionary);
+    }
+  }
+
+  __ bind(&maybe_in_dictionary);
+  // If we are doing negative lookup then probing failure should be
+  // treated as a lookup success. For positive lookup probing failure
+  // should be treated as lookup failure.
+  if (mode_ == POSITIVE_LOOKUP) {
+    __ mov(result_, Immediate(0));
+    __ Drop(1);
+    __ ret(2 * kPointerSize);
+  }
+
+  __ bind(&in_dictionary);
+  __ mov(result_, Immediate(1));
+  __ Drop(1);
+  __ ret(2 * kPointerSize);
+
+  __ bind(&not_in_dictionary);
+  __ mov(result_, Immediate(0));
+  __ Drop(1);
+  __ ret(2 * kPointerSize);
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index 80a75cd..ead7761 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -72,22 +72,115 @@
 };
 
 
-class TypeRecordingBinaryOpStub: public CodeStub {
+class UnaryOpStub: public CodeStub {
  public:
-  TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
+  UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
       : op_(op),
         mode_(mode),
-        operands_type_(TRBinaryOpIC::UNINITIALIZED),
-        result_type_(TRBinaryOpIC::UNINITIALIZED),
+        operand_type_(UnaryOpIC::UNINITIALIZED),
+        name_(NULL) {
+  }
+
+  UnaryOpStub(int key, UnaryOpIC::TypeInfo operand_type)
+      : op_(OpBits::decode(key)),
+        mode_(ModeBits::decode(key)),
+        operand_type_(operand_type),
+        name_(NULL) {
+  }
+
+ private:
+  Token::Value op_;
+  UnaryOverwriteMode mode_;
+
+  // Operand type information determined at runtime.
+  UnaryOpIC::TypeInfo operand_type_;
+
+  char* name_;
+
+  const char* GetName();
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("TypeRecordingUnaryOpStub %d (op %s), "
+           "(mode %d, runtime_type_info %s)\n",
+           MinorKey(),
+           Token::String(op_),
+           static_cast<int>(mode_),
+           UnaryOpIC::GetName(operand_type_));
+  }
+#endif
+
+  class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
+  class OpBits: public BitField<Token::Value, 1, 7> {};
+  class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
+
+  Major MajorKey() { return UnaryOp; }
+  int MinorKey() {
+    return ModeBits::encode(mode_)
+           | OpBits::encode(op_)
+           | OperandTypeInfoBits::encode(operand_type_);
+  }
+
+  // Note: A lot of the helper functions below will vanish when we use virtual
+  // function instead of switch more often.
+  void Generate(MacroAssembler* masm);
+
+  void GenerateTypeTransition(MacroAssembler* masm);
+
+  void GenerateSmiStub(MacroAssembler* masm);
+  void GenerateSmiStubSub(MacroAssembler* masm);
+  void GenerateSmiStubBitNot(MacroAssembler* masm);
+  void GenerateSmiCodeSub(MacroAssembler* masm,
+                          Label* non_smi,
+                          Label* undo,
+                          Label* slow,
+                          Label::Distance non_smi_near = Label::kFar,
+                          Label::Distance undo_near = Label::kFar,
+                          Label::Distance slow_near = Label::kFar);
+  void GenerateSmiCodeBitNot(MacroAssembler* masm,
+                             Label* non_smi,
+                             Label::Distance non_smi_near = Label::kFar);
+  void GenerateSmiCodeUndo(MacroAssembler* masm);
+
+  void GenerateHeapNumberStub(MacroAssembler* masm);
+  void GenerateHeapNumberStubSub(MacroAssembler* masm);
+  void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
+  void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
+  void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
+
+  void GenerateGenericStub(MacroAssembler* masm);
+  void GenerateGenericStubSub(MacroAssembler* masm);
+  void GenerateGenericStubBitNot(MacroAssembler* masm);
+  void GenerateGenericCodeFallback(MacroAssembler* masm);
+
+  virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
+
+  virtual InlineCacheState GetICState() {
+    return UnaryOpIC::ToState(operand_type_);
+  }
+
+  virtual void FinishCode(Code* code) {
+    code->set_unary_op_type(operand_type_);
+  }
+};
+
+
+class BinaryOpStub: public CodeStub {
+ public:
+  BinaryOpStub(Token::Value op, OverwriteMode mode)
+      : op_(op),
+        mode_(mode),
+        operands_type_(BinaryOpIC::UNINITIALIZED),
+        result_type_(BinaryOpIC::UNINITIALIZED),
         name_(NULL) {
     use_sse3_ = CpuFeatures::IsSupported(SSE3);
     ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
   }
 
-  TypeRecordingBinaryOpStub(
+  BinaryOpStub(
       int key,
-      TRBinaryOpIC::TypeInfo operands_type,
-      TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
+      BinaryOpIC::TypeInfo operands_type,
+      BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
       : op_(OpBits::decode(key)),
         mode_(ModeBits::decode(key)),
         use_sse3_(SSE3Bits::decode(key)),
@@ -106,8 +199,8 @@
   bool use_sse3_;
 
   // Operand type information determined at runtime.
-  TRBinaryOpIC::TypeInfo operands_type_;
-  TRBinaryOpIC::TypeInfo result_type_;
+  BinaryOpIC::TypeInfo operands_type_;
+  BinaryOpIC::TypeInfo result_type_;
 
   char* name_;
 
@@ -115,12 +208,12 @@
 
 #ifdef DEBUG
   void Print() {
-    PrintF("TypeRecordingBinaryOpStub %d (op %s), "
+    PrintF("BinaryOpStub %d (op %s), "
            "(mode %d, runtime_type_info %s)\n",
            MinorKey(),
            Token::String(op_),
            static_cast<int>(mode_),
-           TRBinaryOpIC::GetName(operands_type_));
+           BinaryOpIC::GetName(operands_type_));
   }
 #endif
 
@@ -128,10 +221,10 @@
   class ModeBits: public BitField<OverwriteMode, 0, 2> {};
   class OpBits: public BitField<Token::Value, 2, 7> {};
   class SSE3Bits: public BitField<bool, 9, 1> {};
-  class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
-  class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
+  class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
+  class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
 
-  Major MajorKey() { return TypeRecordingBinaryOp; }
+  Major MajorKey() { return BinaryOp; }
   int MinorKey() {
     return OpBits::encode(op_)
            | ModeBits::encode(mode_)
@@ -153,6 +246,7 @@
   void GenerateHeapNumberStub(MacroAssembler* masm);
   void GenerateOddballStub(MacroAssembler* masm);
   void GenerateStringStub(MacroAssembler* masm);
+  void GenerateBothStringStub(MacroAssembler* masm);
   void GenerateGenericStub(MacroAssembler* masm);
   void GenerateAddStrings(MacroAssembler* masm);
 
@@ -161,15 +255,15 @@
   void GenerateTypeTransition(MacroAssembler* masm);
   void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
 
-  virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
+  virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
 
   virtual InlineCacheState GetICState() {
-    return TRBinaryOpIC::ToState(operands_type_);
+    return BinaryOpIC::ToState(operands_type_);
   }
 
   virtual void FinishCode(Code* code) {
-    code->set_type_recording_binary_op_type(operands_type_);
-    code->set_type_recording_binary_op_result_type(result_type_);
+    code->set_binary_op_type(operands_type_);
+    code->set_binary_op_result_type(result_type_);
   }
 
   friend class CodeGenerator;
@@ -283,11 +377,9 @@
 
 class StringCompareStub: public CodeStub {
  public:
-  explicit StringCompareStub() {
-  }
+  StringCompareStub() { }
 
-  // Compare two flat ascii strings and returns result in eax after popping two
-  // arguments from the stack.
+  // Compares two flat ASCII strings and returns result in eax.
   static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
                                               Register left,
                                               Register right,
@@ -295,11 +387,27 @@
                                               Register scratch2,
                                               Register scratch3);
 
- private:
-  Major MajorKey() { return StringCompare; }
-  int MinorKey() { return 0; }
+  // Compares two flat ASCII strings for equality and returns result
+  // in eax.
+  static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+                                            Register left,
+                                            Register right,
+                                            Register scratch1,
+                                            Register scratch2);
 
-  void Generate(MacroAssembler* masm);
+ private:
+  virtual Major MajorKey() { return StringCompare; }
+  virtual int MinorKey() { return 0; }
+  virtual void Generate(MacroAssembler* masm);
+
+  static void GenerateAsciiCharsCompareLoop(
+      MacroAssembler* masm,
+      Register left,
+      Register right,
+      Register length,
+      Register scratch,
+      Label* chars_not_equal,
+      Label::Distance chars_not_equal_near = Label::kFar);
 };
 
 
@@ -335,6 +443,75 @@
 #endif
 };
 
+
+class StringDictionaryLookupStub: public CodeStub {
+ public:
+  enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+  StringDictionaryLookupStub(Register dictionary,
+                             Register result,
+                             Register index,
+                             LookupMode mode)
+      : dictionary_(dictionary), result_(result), index_(index), mode_(mode) { }
+
+  void Generate(MacroAssembler* masm);
+
+  MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
+      MacroAssembler* masm,
+      Label* miss,
+      Label* done,
+      Register properties,
+      String* name,
+      Register r0);
+
+  static void GeneratePositiveLookup(MacroAssembler* masm,
+                                     Label* miss,
+                                     Label* done,
+                                     Register elements,
+                                     Register name,
+                                     Register r0,
+                                     Register r1);
+
+ private:
+  static const int kInlinedProbes = 4;
+  static const int kTotalProbes = 20;
+
+  static const int kCapacityOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kCapacityIndex * kPointerSize;
+
+  static const int kElementsStartOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
+
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("StringDictionaryLookupStub\n");
+  }
+#endif
+
+  Major MajorKey() { return StringDictionaryNegativeLookup; }
+
+  int MinorKey() {
+    return DictionaryBits::encode(dictionary_.code()) |
+        ResultBits::encode(result_.code()) |
+        IndexBits::encode(index_.code()) |
+        LookupModeBits::encode(mode_);
+  }
+
+  class DictionaryBits: public BitField<int, 0, 3> {};
+  class ResultBits: public BitField<int, 3, 3> {};
+  class IndexBits: public BitField<int, 6, 3> {};
+  class LookupModeBits: public BitField<LookupMode, 9, 1> {};
+
+  Register dictionary_;
+  Register result_;
+  Register index_;
+  LookupMode mode_;
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_IA32_CODE_STUBS_IA32_H_
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index d1c869a..7a59a4f 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -981,6 +981,14 @@
                            NameOfXMMRegister(regop),
                            NameOfXMMRegister(rm));
             data++;
+          } else if (f0byte == 0x57) {
+            data += 2;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("xorps %s,%s",
+                           NameOfXMMRegister(regop),
+                           NameOfXMMRegister(rm));
+            data++;
           } else if ((f0byte & 0xF0) == 0x80) {
             data += JumpConditional(data, branch_hint);
           } else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
diff --git a/src/ia32/frames-ia32.h b/src/ia32/frames-ia32.h
index 0f95abd..bc65ddf 100644
--- a/src/ia32/frames-ia32.h
+++ b/src/ia32/frames-ia32.h
@@ -80,8 +80,8 @@
 
 class ExitFrameConstants : public AllStatic {
  public:
-  static const int kCodeOffset      = -2 * kPointerSize;
-  static const int kSPOffset        = -1 * kPointerSize;
+  static const int kCodeOffset     = -2 * kPointerSize;
+  static const int kSPOffset       = -1 * kPointerSize;
 
   static const int kCallerFPOffset =  0 * kPointerSize;
   static const int kCallerPCOffset = +1 * kPointerSize;
@@ -94,7 +94,9 @@
 
 class StandardFrameConstants : public AllStatic {
  public:
-  static const int kFixedFrameSize    =  4;
+  // StandardFrame::IterateExpressions assumes that kContextOffset is the last
+  // object pointer.
+  static const int kFixedFrameSize    =  4;  // Currently unused.
   static const int kExpressionsOffset = -3 * kPointerSize;
   static const int kMarkerOffset      = -2 * kPointerSize;
   static const int kContextOffset     = -1 * kPointerSize;
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 5d153a8..5f0a0b6 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -45,6 +45,12 @@
 #define __ ACCESS_MASM(masm_)
 
 
+static unsigned GetPropertyId(Property* property) {
+  if (property->is_synthetic()) return AstNode::kNoNumber;
+  return property->id();
+}
+
+
 class JumpPatchSite BASE_EMBEDDED {
  public:
   explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
@@ -57,14 +63,18 @@
     ASSERT(patch_site_.is_bound() == info_emitted_);
   }
 
-  void EmitJumpIfNotSmi(Register reg, NearLabel* target) {
+  void EmitJumpIfNotSmi(Register reg,
+                        Label* target,
+                        Label::Distance distance = Label::kFar) {
     __ test(reg, Immediate(kSmiTagMask));
-    EmitJump(not_carry, target);  // Always taken before patched.
+    EmitJump(not_carry, target, distance);  // Always taken before patched.
   }
 
-  void EmitJumpIfSmi(Register reg, NearLabel* target) {
+  void EmitJumpIfSmi(Register reg,
+                     Label* target,
+                     Label::Distance distance = Label::kFar) {
     __ test(reg, Immediate(kSmiTagMask));
-    EmitJump(carry, target);  // Never taken before patched.
+    EmitJump(carry, target, distance);  // Never taken before patched.
   }
 
   void EmitPatchInfo() {
@@ -80,11 +90,11 @@
 
  private:
   // jc will be patched with jz, jnc will become jnz.
-  void EmitJump(Condition cc, NearLabel* target) {
+  void EmitJump(Condition cc, Label* target, Label::Distance distance) {
     ASSERT(!patch_site_.is_bound() && !info_emitted_);
     ASSERT(cc == carry || cc == not_carry);
     __ bind(&patch_site_);
-    __ j(cc, target);
+    __ j(cc, target, distance);
   }
 
   MacroAssembler* masm_;
@@ -121,6 +131,21 @@
   }
 #endif
 
+  // Strict mode functions need to replace the receiver with undefined
+  // when called as functions (without an explicit receiver
+  // object). ecx is zero for method calls and non-zero for function
+  // calls.
+  if (info->is_strict_mode()) {
+    Label ok;
+    __ test(ecx, Operand(ecx));
+    __ j(zero, &ok, Label::kNear);
+    // +1 for return address.
+    int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+    __ mov(Operand(esp, receiver_offset),
+           Immediate(isolate()->factory()->undefined_value()));
+    __ bind(&ok);
+  }
+
   __ push(ebp);  // Caller's frame pointer.
   __ mov(ebp, esp);
   __ push(esi);  // Callee's context.
@@ -232,11 +257,11 @@
 
     { Comment cmnt(masm_, "[ Stack check");
       PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
-      NearLabel ok;
+      Label ok;
       ExternalReference stack_limit =
           ExternalReference::address_of_stack_limit(isolate());
       __ cmp(esp, Operand::StaticVariable(stack_limit));
-      __ j(above_equal, &ok, taken);
+      __ j(above_equal, &ok, Label::kNear);
       StackCheckStub stub;
       __ CallStub(&stub);
       __ bind(&ok);
@@ -265,11 +290,11 @@
 
 void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
   Comment cmnt(masm_, "[ Stack check");
-  NearLabel ok;
+  Label ok;
   ExternalReference stack_limit =
       ExternalReference::address_of_stack_limit(isolate());
   __ cmp(esp, Operand::StaticVariable(stack_limit));
-  __ j(above_equal, &ok, taken);
+  __ j(above_equal, &ok, Label::kNear);
   StackCheckStub stub;
   __ CallStub(&stub);
   // Record a mapping of this PC offset to the OSR id.  This is used to find
@@ -473,10 +498,10 @@
 void FullCodeGenerator::AccumulatorValueContext::Plug(
     Label* materialize_true,
     Label* materialize_false) const {
-  NearLabel done;
+  Label done;
   __ bind(materialize_true);
   __ mov(result_register(), isolate()->factory()->true_value());
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(materialize_false);
   __ mov(result_register(), isolate()->factory()->false_value());
   __ bind(&done);
@@ -486,10 +511,10 @@
 void FullCodeGenerator::StackValueContext::Plug(
     Label* materialize_true,
     Label* materialize_false) const {
-  NearLabel done;
+  Label done;
   __ bind(materialize_true);
   __ push(Immediate(isolate()->factory()->true_value()));
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(materialize_false);
   __ push(Immediate(isolate()->factory()->false_value()));
   __ bind(&done);
@@ -539,25 +564,10 @@
 void FullCodeGenerator::DoTest(Label* if_true,
                                Label* if_false,
                                Label* fall_through) {
-  // Emit the inlined tests assumed by the stub.
-  __ cmp(result_register(), isolate()->factory()->undefined_value());
-  __ j(equal, if_false);
-  __ cmp(result_register(), isolate()->factory()->true_value());
-  __ j(equal, if_true);
-  __ cmp(result_register(), isolate()->factory()->false_value());
-  __ j(equal, if_false);
-  STATIC_ASSERT(kSmiTag == 0);
-  __ test(result_register(), Operand(result_register()));
-  __ j(zero, if_false);
-  __ test(result_register(), Immediate(kSmiTagMask));
-  __ j(zero, if_true);
-
-  // Call the ToBoolean stub for all other cases.
   ToBooleanStub stub;
   __ push(result_register());
   __ CallStub(&stub);
   __ test(eax, Operand(eax));
-
   // The stub returns nonzero for true.
   Split(not_zero, if_true, if_false, fall_through);
 }
@@ -629,8 +639,8 @@
   // preparation to avoid preparing with the same AST id twice.
   if (!context()->IsTest() || !info_->IsOptimizable()) return;
 
-  NearLabel skip;
-  if (should_normalize) __ jmp(&skip);
+  Label skip;
+  if (should_normalize) __ jmp(&skip, Label::kNear);
 
   ForwardBailoutStack* current = forward_bailout_stack_;
   while (current != NULL) {
@@ -717,24 +727,22 @@
     }
 
   } else if (prop != NULL) {
-    if (function != NULL || mode == Variable::CONST) {
-      // We are declaring a function or constant that rewrites to a
-      // property.  Use (keyed) IC to set the initial value.  We cannot
-      // visit the rewrite because it's shared and we risk recording
-      // duplicate AST IDs for bailouts from optimized code.
+    // A const declaration aliasing a parameter is an illegal redeclaration.
+    ASSERT(mode != Variable::CONST);
+    if (function != NULL) {
+      // We are declaring a function that rewrites to a property.
+      // Use (keyed) IC to set the initial value.  We cannot visit the
+      // rewrite because it's shared and we risk recording duplicate AST
+      // IDs for bailouts from optimized code.
       ASSERT(prop->obj()->AsVariableProxy() != NULL);
       { AccumulatorValueContext for_object(this);
         EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
       }
 
-      if (function != NULL) {
-        __ push(eax);
-        VisitForAccumulatorValue(function);
-        __ pop(edx);
-      } else {
-        __ mov(edx, eax);
-        __ mov(eax, isolate()->factory()->the_hole_value());
-      }
+      __ push(eax);
+      VisitForAccumulatorValue(function);
+      __ pop(edx);
+
       ASSERT(prop->key()->AsLiteral() != NULL &&
              prop->key()->AsLiteral()->handle()->IsSmi());
       __ SafeSet(ecx, Immediate(prop->key()->AsLiteral()->handle()));
@@ -742,7 +750,7 @@
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
           : isolate()->builtins()->KeyedStoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
     }
   }
 }
@@ -800,10 +808,10 @@
     bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
     JumpPatchSite patch_site(masm_);
     if (inline_smi_code) {
-      NearLabel slow_case;
+      Label slow_case;
       __ mov(ecx, edx);
       __ or_(ecx, Operand(eax));
-      patch_site.EmitJumpIfNotSmi(ecx, &slow_case);
+      patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
 
       __ cmp(edx, Operand(eax));
       __ j(not_equal, &next_test);
@@ -815,7 +823,7 @@
     // Record position before stub call for type feedback.
     SetSourcePosition(clause->position());
     Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
-    EmitCallIC(ic, &patch_site);
+    EmitCallIC(ic, &patch_site, clause->CompareId());
     __ test(eax, Operand(eax));
     __ j(not_equal, &next_test);
     __ Drop(1);  // Switch value is no longer needed.
@@ -864,11 +872,11 @@
   __ j(equal, &exit);
 
   // Convert the object to a JS object.
-  NearLabel convert, done_convert;
+  Label convert, done_convert;
   __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &convert);
+  __ j(zero, &convert, Label::kNear);
   __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
-  __ j(above_equal, &done_convert);
+  __ j(above_equal, &done_convert, Label::kNear);
   __ bind(&convert);
   __ push(eax);
   __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
@@ -893,9 +901,8 @@
   // check for an enum cache.  Leave the map in ebx for the subsequent
   // prototype load.
   __ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
-  __ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOffset));
-  __ cmp(edx, isolate()->factory()->empty_descriptor_array());
-  __ j(equal, &call_runtime);
+  __ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOrBitField3Offset));
+  __ JumpIfSmi(edx, &call_runtime);
 
   // Check that there is an enum cache in the non-empty instance
   // descriptors (edx).  This is the case if the next enumeration
@@ -905,9 +912,9 @@
   __ j(zero, &call_runtime);
 
   // For all objects but the receiver, check that the cache is empty.
-  NearLabel check_prototype;
+  Label check_prototype;
   __ cmp(ecx, Operand(eax));
-  __ j(equal, &check_prototype);
+  __ j(equal, &check_prototype, Label::kNear);
   __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
   __ cmp(edx, isolate()->factory()->empty_fixed_array());
   __ j(not_equal, &call_runtime);
@@ -920,9 +927,9 @@
 
   // The enum cache is valid.  Load the map of the object being
   // iterated over and use the cache for the iteration.
-  NearLabel use_cache;
+  Label use_cache;
   __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
-  __ jmp(&use_cache);
+  __ jmp(&use_cache, Label::kNear);
 
   // Get the set of properties to enumerate.
   __ bind(&call_runtime);
@@ -932,14 +939,14 @@
   // If we got a map from the runtime call, we can do a fast
   // modification check. Otherwise, we got a fixed array, and we have
   // to do a slow check.
-  NearLabel fixed_array;
+  Label fixed_array;
   __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
          isolate()->factory()->meta_map());
-  __ j(not_equal, &fixed_array);
+  __ j(not_equal, &fixed_array, Label::kNear);
 
   // We got a map in register eax. Get the enumeration cache from it.
   __ bind(&use_cache);
-  __ mov(ecx, FieldOperand(eax, Map::kInstanceDescriptorsOffset));
+  __ LoadInstanceDescriptors(eax, ecx);
   __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
   __ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
 
@@ -975,10 +982,10 @@
 
   // Check if the expected map still matches that of the enumerable.
   // If not, we have to filter the key.
-  NearLabel update_each;
+  Label update_each;
   __ mov(ecx, Operand(esp, 4 * kPointerSize));
   __ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
-  __ j(equal, &update_each);
+  __ j(equal, &update_each, Label::kNear);
 
   // Convert the entry to a string or null if it isn't a property
   // anymore. If the property has been removed while iterating, we
@@ -1086,7 +1093,7 @@
   if (s != NULL && s->is_eval_scope()) {
     // Loop up the context chain.  There is no frame effect so it is
     // safe to use raw labels here.
-    NearLabel next, fast;
+    Label next, fast;
     if (!context.is(temp)) {
       __ mov(temp, context);
     }
@@ -1094,7 +1101,7 @@
     // Terminate at global context.
     __ cmp(FieldOperand(temp, HeapObject::kMapOffset),
            Immediate(isolate()->factory()->global_context_map()));
-    __ j(equal, &fast);
+    __ j(equal, &fast, Label::kNear);
     // Check that extension is NULL.
     __ cmp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
     __ j(not_equal, slow);
@@ -1113,7 +1120,7 @@
   RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
       ? RelocInfo::CODE_TARGET
       : RelocInfo::CODE_TARGET_CONTEXT;
-  EmitCallIC(ic, mode);
+  EmitCallIC(ic, mode, AstNode::kNoNumber);
 }
 
 
@@ -1194,7 +1201,7 @@
           __ SafeSet(eax, Immediate(key_literal->handle()));
           Handle<Code> ic =
               isolate()->builtins()->KeyedLoadIC_Initialize();
-          EmitCallIC(ic, RelocInfo::CODE_TARGET);
+          EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
           __ jmp(done);
         }
       }
@@ -1217,7 +1224,7 @@
     __ mov(eax, GlobalObjectOperand());
     __ mov(ecx, var->name());
     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
     context()->Plug(eax);
 
   } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
@@ -1243,11 +1250,11 @@
     if (var->mode() == Variable::CONST) {
       // Constants may be the hole value if they have not been initialized.
       // Unhole them.
-      NearLabel done;
+      Label done;
       MemOperand slot_operand = EmitSlotSearch(slot, eax);
       __ mov(eax, slot_operand);
       __ cmp(eax, isolate()->factory()->the_hole_value());
-      __ j(not_equal, &done);
+      __ j(not_equal, &done, Label::kNear);
       __ mov(eax, isolate()->factory()->undefined_value());
       __ bind(&done);
       context()->Plug(eax);
@@ -1280,7 +1287,7 @@
 
     // Do a keyed property load.
     Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-    EmitCallIC(ic, RelocInfo::CODE_TARGET);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
 
     // Drop key and object left on the stack by IC.
     context()->Plug(eax);
@@ -1290,7 +1297,7 @@
 
 void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
   Comment cmnt(masm_, "[ RegExpLiteral");
-  NearLabel materialized;
+  Label materialized;
   // Registers will be used as follows:
   // edi = JS function.
   // ecx = literals array.
@@ -1302,7 +1309,7 @@
       FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
   __ mov(ebx, FieldOperand(ecx, literal_offset));
   __ cmp(ebx, isolate()->factory()->undefined_value());
-  __ j(not_equal, &materialized);
+  __ j(not_equal, &materialized, Label::kNear);
 
   // Create regexp literal using runtime function
   // Result will be in eax.
@@ -1393,7 +1400,7 @@
             Handle<Code> ic = is_strict_mode()
                 ? isolate()->builtins()->StoreIC_Initialize_Strict()
                 : isolate()->builtins()->StoreIC_Initialize();
-            EmitCallIC(ic, RelocInfo::CODE_TARGET);
+            EmitCallIC(ic, RelocInfo::CODE_TARGET, key->id());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
           } else {
             VisitForEffect(value);
@@ -1600,13 +1607,13 @@
     SetSourcePosition(expr->position() + 1);
     AccumulatorValueContext context(this);
     if (ShouldInlineSmiCase(op)) {
-      EmitInlineSmiBinaryOp(expr,
+      EmitInlineSmiBinaryOp(expr->binary_operation(),
                             op,
                             mode,
                             expr->target(),
                             expr->value());
     } else {
-      EmitBinaryOp(op, mode);
+      EmitBinaryOp(expr->binary_operation(), op, mode);
     }
 
     // Deoptimization point in case the binary operation may have side effects.
@@ -1642,36 +1649,36 @@
   ASSERT(!key->handle()->IsSmi());
   __ mov(ecx, Immediate(key->handle()));
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  EmitCallIC(ic, RelocInfo::CODE_TARGET);
+  EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  EmitCallIC(ic, RelocInfo::CODE_TARGET);
+  EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
 }
 
 
-void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
                                               Token::Value op,
                                               OverwriteMode mode,
                                               Expression* left,
                                               Expression* right) {
   // Do combined smi check of the operands. Left operand is on the
   // stack. Right operand is in eax.
-  NearLabel done, smi_case, stub_call;
+  Label smi_case, done, stub_call;
   __ pop(edx);
   __ mov(ecx, eax);
   __ or_(eax, Operand(edx));
   JumpPatchSite patch_site(masm_);
-  patch_site.EmitJumpIfSmi(eax, &smi_case);
+  patch_site.EmitJumpIfSmi(eax, &smi_case, Label::kNear);
 
   __ bind(&stub_call);
   __ mov(eax, ecx);
-  TypeRecordingBinaryOpStub stub(op, mode);
-  EmitCallIC(stub.GetCode(), &patch_site);
-  __ jmp(&done);
+  BinaryOpStub stub(op, mode);
+  EmitCallIC(stub.GetCode(), &patch_site, expr->id());
+  __ jmp(&done, Label::kNear);
 
   // Smi case.
   __ bind(&smi_case);
@@ -1724,7 +1731,7 @@
       __ imul(eax, Operand(ecx));
       __ j(overflow, &stub_call);
       __ test(eax, Operand(eax));
-      __ j(not_zero, &done, taken);
+      __ j(not_zero, &done, Label::kNear);
       __ mov(ebx, edx);
       __ or_(ebx, Operand(ecx));
       __ j(negative, &stub_call);
@@ -1748,11 +1755,13 @@
 }
 
 
-void FullCodeGenerator::EmitBinaryOp(Token::Value op,
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
+                                     Token::Value op,
                                      OverwriteMode mode) {
   __ pop(edx);
-  TypeRecordingBinaryOpStub stub(op, mode);
-  EmitCallIC(stub.GetCode(), NULL);  // NULL signals no inlined smi code.
+  BinaryOpStub stub(op, mode);
+  // NULL signals no inlined smi code.
+  EmitCallIC(stub.GetCode(), NULL, expr->id());
   context()->Plug(eax);
 }
 
@@ -1792,7 +1801,7 @@
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->StoreIC_Initialize_Strict()
           : isolate()->builtins()->StoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
       break;
     }
     case KEYED_PROPERTY: {
@@ -1815,7 +1824,7 @@
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
           : isolate()->builtins()->KeyedStoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
       break;
     }
   }
@@ -1841,7 +1850,7 @@
     Handle<Code> ic = is_strict_mode()
         ? isolate()->builtins()->StoreIC_Initialize_Strict()
         : isolate()->builtins()->StoreIC_Initialize();
-    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
 
   } else if (op == Token::INIT_CONST) {
     // Like var declarations, const declarations are hoisted to function
@@ -1944,7 +1953,7 @@
   Handle<Code> ic = is_strict_mode()
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
-  EmitCallIC(ic, RelocInfo::CODE_TARGET);
+  EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -1984,7 +1993,7 @@
   Handle<Code> ic = is_strict_mode()
       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
       : isolate()->builtins()->KeyedStoreIC_Initialize();
-  EmitCallIC(ic, RelocInfo::CODE_TARGET);
+  EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -2033,9 +2042,9 @@
   // Record source position of the IC call.
   SetSourcePosition(expr->position());
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
-  Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
-      arg_count, in_loop);
-  EmitCallIC(ic, mode);
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
+  EmitCallIC(ic, mode, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2044,8 +2053,7 @@
 
 
 void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
-                                            Expression* key,
-                                            RelocInfo::Mode mode) {
+                                            Expression* key) {
   // Load the key.
   VisitForAccumulatorValue(key);
 
@@ -2069,7 +2077,7 @@
   Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(
       arg_count, in_loop);
   __ mov(ecx, Operand(esp, (arg_count + 1) * kPointerSize));  // Key.
-  EmitCallIC(ic, mode);
+  EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2077,7 +2085,7 @@
 }
 
 
-void FullCodeGenerator::EmitCallWithStub(Call* expr) {
+void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
   // Code common for calls using the call stub.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
@@ -2089,7 +2097,7 @@
   // Record source position for debugger.
   SetSourcePosition(expr->position());
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
-  CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+  CallFunctionStub stub(arg_count, in_loop, flags);
   __ CallStub(&stub);
   RecordJSReturnSite(expr);
   // Restore context register.
@@ -2180,7 +2188,7 @@
     // Record source position for debugger.
     SetSourcePosition(expr->position());
     InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
-    CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+    CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_IMPLICIT);
     __ CallStub(&stub);
     RecordJSReturnSite(expr);
     // Restore context register.
@@ -2228,7 +2236,10 @@
       __ bind(&call);
     }
 
-    EmitCallWithStub(expr);
+    // The receiver is either the global receiver or an object found
+    // by LoadContextSlot. That object could be the hole if the
+    // receiver is implicitly the global object.
+    EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
   } else if (fun->AsProperty() != NULL) {
     // Call to an object property.
     Property* prop = fun->AsProperty();
@@ -2260,18 +2271,18 @@
         SetSourcePosition(prop->position());
 
         Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-        EmitCallIC(ic, RelocInfo::CODE_TARGET);
+        EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
         // Push result (function).
         __ push(eax);
         // Push Global receiver.
         __ mov(ecx, GlobalObjectOperand());
         __ push(FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
-        EmitCallWithStub(expr);
+        EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
       } else {
         { PreservePositionScope scope(masm()->positions_recorder());
           VisitForStackValue(prop->obj());
         }
-        EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET);
+        EmitKeyedCallWithIC(expr, prop->key());
       }
     }
   } else {
@@ -2282,7 +2293,7 @@
     __ mov(ebx, GlobalObjectOperand());
     __ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
     // Emit function call.
-    EmitCallWithStub(expr);
+    EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
   }
 
 #ifdef DEBUG
@@ -2474,7 +2485,7 @@
   // Look for valueOf symbol in the descriptor array, and indicate false if
   // found. The type is not checked, so if it is a transition it is a false
   // negative.
-  __ mov(ebx, FieldOperand(ebx, Map::kInstanceDescriptorsOffset));
+  __ LoadInstanceDescriptors(ebx, ebx);
   __ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
   // ebx: descriptor array
   // ecx: length of descriptor array
@@ -2793,7 +2804,7 @@
     __ movd(xmm1, Operand(ebx));
     __ movd(xmm0, Operand(eax));
     __ cvtss2sd(xmm1, xmm1);
-    __ pxor(xmm0, xmm1);
+    __ xorps(xmm0, xmm1);
     __ subsd(xmm0, xmm1);
     __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
   } else {
@@ -2842,13 +2853,13 @@
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
 
-  NearLabel done;
+  Label done;
   // If the object is a smi return the object.
   __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &done);
+  __ j(zero, &done, Label::kNear);
   // If the object is not a value type, return the object.
   __ CmpObjectType(eax, JS_VALUE_TYPE, ebx);
-  __ j(not_equal, &done);
+  __ j(not_equal, &done, Label::kNear);
   __ mov(eax, FieldOperand(eax, JSValue::kValueOffset));
 
   __ bind(&done);
@@ -2879,14 +2890,14 @@
   VisitForAccumulatorValue(args->at(1));  // Load the value.
   __ pop(ebx);  // eax = value. ebx = object.
 
-  NearLabel done;
+  Label done;
   // If the object is a smi, return the value.
   __ test(ebx, Immediate(kSmiTagMask));
-  __ j(zero, &done);
+  __ j(zero, &done, Label::kNear);
 
   // If the object is not a value type, return the value.
   __ CmpObjectType(ebx, JS_VALUE_TYPE, ecx);
-  __ j(not_equal, &done);
+  __ j(not_equal, &done, Label::kNear);
 
   // Store the value.
   __ mov(FieldOperand(ebx, JSValue::kValueOffset), eax);
@@ -3095,17 +3106,17 @@
 void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
   ASSERT(args->length() >= 2);
 
-  int arg_count = args->length() - 2;  // For receiver and function.
-  VisitForStackValue(args->at(0));  // Receiver.
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i + 1));
+  int arg_count = args->length() - 2;  // 2 ~ receiver and function.
+  for (int i = 0; i < arg_count + 1; ++i) {
+    VisitForStackValue(args->at(i));
   }
-  VisitForAccumulatorValue(args->at(arg_count + 1));  // Function.
+  VisitForAccumulatorValue(args->last());  // Function.
 
-  // InvokeFunction requires function in edi. Move it in there.
-  if (!result_register().is(edi)) __ mov(edi, result_register());
+  // InvokeFunction requires the function in edi. Move it in there.
+  __ mov(edi, result_register());
   ParameterCount count(arg_count);
-  __ InvokeFunction(edi, count, CALL_FUNCTION);
+  __ InvokeFunction(edi, count, CALL_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   context()->Plug(eax);
 }
@@ -3618,9 +3629,10 @@
     // Call the JS runtime function via a call IC.
     __ Set(ecx, Immediate(expr->name()));
     InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+    RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
     Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
-        arg_count, in_loop);
-    EmitCallIC(ic, RelocInfo::CODE_TARGET);
+        arg_count, in_loop, mode);
+    EmitCallIC(ic, mode, expr->id());
     // Restore context register.
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   } else {
@@ -3734,48 +3746,13 @@
       break;
     }
 
-    case Token::SUB: {
-      Comment cmt(masm_, "[ UnaryOperation (SUB)");
-      bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
-      UnaryOverwriteMode overwrite =
-          can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-      GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS);
-      // GenericUnaryOpStub expects the argument to be in the
-      // accumulator register eax.
-      VisitForAccumulatorValue(expr->expression());
-      __ CallStub(&stub);
-      context()->Plug(eax);
+    case Token::SUB:
+      EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
       break;
-    }
 
-    case Token::BIT_NOT: {
-      Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
-      // The generic unary operation stub expects the argument to be
-      // in the accumulator register eax.
-      VisitForAccumulatorValue(expr->expression());
-      Label done;
-      bool inline_smi_case = ShouldInlineSmiCase(expr->op());
-      if (inline_smi_case) {
-        NearLabel call_stub;
-        __ test(eax, Immediate(kSmiTagMask));
-        __ j(not_zero, &call_stub);
-        __ lea(eax, Operand(eax, kSmiTagMask));
-        __ not_(eax);
-        __ jmp(&done);
-        __ bind(&call_stub);
-      }
-      bool overwrite = expr->expression()->ResultOverwriteAllowed();
-      UnaryOverwriteMode mode =
-          overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-      UnaryOpFlags flags = inline_smi_case
-          ? NO_UNARY_SMI_CODE_IN_STUB
-          : NO_UNARY_FLAGS;
-      GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
-      __ CallStub(&stub);
-      __ bind(&done);
-      context()->Plug(eax);
+    case Token::BIT_NOT:
+      EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
       break;
-    }
 
     default:
       UNREACHABLE();
@@ -3783,6 +3760,23 @@
 }
 
 
+void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
+                                           const char* comment) {
+  // TODO(svenpanne): Allowing format strings in Comment would be nice here...
+  Comment cmt(masm_, comment);
+  bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
+  UnaryOverwriteMode overwrite =
+      can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
+  UnaryOpStub stub(expr->op(), overwrite);
+  // UnaryOpStub expects the argument to be in the
+  // accumulator register eax.
+  VisitForAccumulatorValue(expr->expression());
+  SetSourcePosition(expr->position());
+  EmitCallIC(stub.GetCode(), NULL, expr->id());
+  context()->Plug(eax);
+}
+
+
 void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
   Comment cmnt(masm_, "[ CountOperation");
   SetSourcePosition(expr->position());
@@ -3847,10 +3841,10 @@
   }
 
   // Call ToNumber only if operand is not a smi.
-  NearLabel no_conversion;
+  Label no_conversion;
   if (ShouldInlineSmiCase(expr->op())) {
     __ test(eax, Immediate(kSmiTagMask));
-    __ j(zero, &no_conversion);
+    __ j(zero, &no_conversion, Label::kNear);
   }
   ToNumberStub convert_stub;
   __ CallStub(&convert_stub);
@@ -3877,7 +3871,7 @@
   }
 
   // Inline smi case if we are in a loop.
-  NearLabel stub_call, done;
+  Label done, stub_call;
   JumpPatchSite patch_site(masm_);
 
   if (ShouldInlineSmiCase(expr->op())) {
@@ -3886,10 +3880,10 @@
     } else {
       __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
     }
-    __ j(overflow, &stub_call);
+    __ j(overflow, &stub_call, Label::kNear);
     // We could eliminate this smi check if we split the code at
     // the first smi check before calling ToNumber.
-    patch_site.EmitJumpIfSmi(eax, &done);
+    patch_site.EmitJumpIfSmi(eax, &done, Label::kNear);
 
     __ bind(&stub_call);
     // Call stub. Undo operation first.
@@ -3906,8 +3900,8 @@
   // Call stub for +1/-1.
   __ mov(edx, eax);
   __ mov(eax, Immediate(Smi::FromInt(1)));
-  TypeRecordingBinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
-  EmitCallIC(stub.GetCode(), &patch_site);
+  BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
+  EmitCallIC(stub.GetCode(), &patch_site, expr->CountId());
   __ bind(&done);
 
   // Store the value returned in eax.
@@ -3940,7 +3934,7 @@
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->StoreIC_Initialize_Strict()
           : isolate()->builtins()->StoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -3957,7 +3951,7 @@
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
           : isolate()->builtins()->KeyedStoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         // Result is on the stack
@@ -3985,7 +3979,7 @@
     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
-    EmitCallIC(ic, RelocInfo::CODE_TARGET);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
     PrepareForBailout(expr, TOS_REG);
     context()->Plug(eax);
   } else if (proxy != NULL &&
@@ -4172,10 +4166,10 @@
       bool inline_smi_code = ShouldInlineSmiCase(op);
       JumpPatchSite patch_site(masm_);
       if (inline_smi_code) {
-        NearLabel slow_case;
+        Label slow_case;
         __ mov(ecx, Operand(edx));
         __ or_(ecx, Operand(eax));
-        patch_site.EmitJumpIfNotSmi(ecx, &slow_case);
+        patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
         __ cmp(edx, Operand(eax));
         Split(cc, if_true, if_false, NULL);
         __ bind(&slow_case);
@@ -4184,7 +4178,7 @@
       // Record position and call the compare IC.
       SetSourcePosition(expr->position());
       Handle<Code> ic = CompareIC::GetUninitialized(op);
-      EmitCallIC(ic, &patch_site);
+      EmitCallIC(ic, &patch_site, expr->id());
 
       PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
       __ test(eax, Operand(eax));
@@ -4244,7 +4238,9 @@
 }
 
 
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
+                                   RelocInfo::Mode mode,
+                                   unsigned ast_id) {
   ASSERT(mode == RelocInfo::CODE_TARGET ||
          mode == RelocInfo::CODE_TARGET_CONTEXT);
   switch (ic->kind()) {
@@ -4262,34 +4258,13 @@
     default:
       break;
   }
-
-  __ call(ic, mode);
-
-  // Crankshaft doesn't need patching of inlined loads and stores.
-  // When compiling the snapshot we need to produce code that works
-  // with and without Crankshaft.
-  if (V8::UseCrankshaft() && !Serializer::enabled()) {
-    return;
-  }
-
-  // If we're calling a (keyed) load or store stub, we have to mark
-  // the call as containing no inlined code so we will not attempt to
-  // patch it.
-  switch (ic->kind()) {
-    case Code::LOAD_IC:
-    case Code::KEYED_LOAD_IC:
-    case Code::STORE_IC:
-    case Code::KEYED_STORE_IC:
-      __ nop();  // Signals no inlined code.
-      break;
-    default:
-      // Do nothing.
-      break;
-  }
+  __ call(ic, mode, ast_id);
 }
 
 
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
+                                   JumpPatchSite* patch_site,
+                                   unsigned ast_id) {
   Counters* counters = isolate()->counters();
   switch (ic->kind()) {
     case Code::LOAD_IC:
@@ -4306,8 +4281,7 @@
     default:
       break;
   }
-
-  __ call(ic, RelocInfo::CODE_TARGET);
+  __ call(ic, RelocInfo::CODE_TARGET, ast_id);
   if (patch_site != NULL && patch_site->is_bound()) {
     patch_site->EmitPatchInfo();
   } else {
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index b7af03c..3941cfc 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -50,11 +50,11 @@
   // Register usage:
   //   type: holds the receiver instance type on entry.
   __ cmp(type, JS_GLOBAL_OBJECT_TYPE);
-  __ j(equal, global_object, not_taken);
+  __ j(equal, global_object);
   __ cmp(type, JS_BUILTINS_OBJECT_TYPE);
-  __ j(equal, global_object, not_taken);
+  __ j(equal, global_object);
   __ cmp(type, JS_GLOBAL_PROXY_TYPE);
-  __ j(equal, global_object, not_taken);
+  __ j(equal, global_object);
 }
 
 
@@ -73,13 +73,13 @@
 
   // Check that the receiver isn't a smi.
   __ test(receiver, Immediate(kSmiTagMask));
-  __ j(zero, miss, not_taken);
+  __ j(zero, miss);
 
   // Check that the receiver is a valid JS object.
   __ mov(r1, FieldOperand(receiver, HeapObject::kMapOffset));
   __ movzx_b(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
   __ cmp(r0, FIRST_JS_OBJECT_TYPE);
-  __ j(below, miss, not_taken);
+  __ j(below, miss);
 
   // If this assert fails, we have to check upper bound too.
   ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
@@ -90,68 +90,13 @@
   __ test_b(FieldOperand(r1, Map::kBitFieldOffset),
             (1 << Map::kIsAccessCheckNeeded) |
             (1 << Map::kHasNamedInterceptor));
-  __ j(not_zero, miss, not_taken);
+  __ j(not_zero, miss);
 
   __ mov(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
-  __ CheckMap(r0, FACTORY->hash_table_map(), miss, true);
+  __ CheckMap(r0, FACTORY->hash_table_map(), miss, DONT_DO_SMI_CHECK);
 }
 
 
-// Probe the string dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found leaving the
-// index into the dictionary in |r0|. Jump to the |miss| label
-// otherwise.
-static void GenerateStringDictionaryProbes(MacroAssembler* masm,
-                                           Label* miss,
-                                           Label* done,
-                                           Register elements,
-                                           Register name,
-                                           Register r0,
-                                           Register r1) {
-  // Assert that name contains a string.
-  if (FLAG_debug_code) __ AbortIfNotString(name);
-
-  // Compute the capacity mask.
-  const int kCapacityOffset =
-      StringDictionary::kHeaderSize +
-      StringDictionary::kCapacityIndex * kPointerSize;
-  __ mov(r1, FieldOperand(elements, kCapacityOffset));
-  __ shr(r1, kSmiTagSize);  // convert smi to int
-  __ dec(r1);
-
-  // Generate an unrolled loop that performs a few probes before
-  // giving up. Measurements done on Gmail indicate that 2 probes
-  // cover ~93% of loads from dictionaries.
-  static const int kProbes = 4;
-  const int kElementsStartOffset =
-      StringDictionary::kHeaderSize +
-      StringDictionary::kElementsStartIndex * kPointerSize;
-  for (int i = 0; i < kProbes; i++) {
-    // Compute the masked index: (hash + i + i * i) & mask.
-    __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
-    __ shr(r0, String::kHashShift);
-    if (i > 0) {
-      __ add(Operand(r0), Immediate(StringDictionary::GetProbeOffset(i)));
-    }
-    __ and_(r0, Operand(r1));
-
-    // Scale the index by multiplying by the entry size.
-    ASSERT(StringDictionary::kEntrySize == 3);
-    __ lea(r0, Operand(r0, r0, times_2, 0));  // r0 = r0 * 3
-
-    // Check if the key is identical to the name.
-    __ cmp(name, Operand(elements, r0, times_4,
-                         kElementsStartOffset - kHeapObjectTag));
-    if (i != kProbes - 1) {
-      __ j(equal, done, taken);
-    } else {
-      __ j(not_equal, miss, not_taken);
-    }
-  }
-}
-
-
-
 // Helper function used to load a property from a dictionary backing
 // storage. This function may fail to load a property even though it is
 // in the dictionary, so code at miss_label must always call a backup
@@ -183,13 +128,13 @@
   Label done;
 
   // Probe the dictionary.
-  GenerateStringDictionaryProbes(masm,
-                                 miss_label,
-                                 &done,
-                                 elements,
-                                 name,
-                                 r0,
-                                 r1);
+  StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+                                                     miss_label,
+                                                     &done,
+                                                     elements,
+                                                     name,
+                                                     r0,
+                                                     r1);
 
   // If probing finds an entry in the dictionary, r0 contains the
   // index into the dictionary. Check that the value is a normal
@@ -201,7 +146,7 @@
   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
   __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
           Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
-  __ j(not_zero, miss_label, not_taken);
+  __ j(not_zero, miss_label);
 
   // Get the value at the masked, scaled index.
   const int kValueOffset = kElementsStartOffset + kPointerSize;
@@ -238,13 +183,13 @@
 
 
   // Probe the dictionary.
-  GenerateStringDictionaryProbes(masm,
-                                 miss_label,
-                                 &done,
-                                 elements,
-                                 name,
-                                 r0,
-                                 r1);
+  StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+                                                     miss_label,
+                                                     &done,
+                                                     elements,
+                                                     name,
+                                                     r0,
+                                                     r1);
 
   // If probing finds an entry in the dictionary, r0 contains the
   // index into the dictionary. Check that the value is a normal
@@ -259,7 +204,7 @@
          PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
   __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
           Immediate(kTypeAndReadOnlyMask));
-  __ j(not_zero, miss_label, not_taken);
+  __ j(not_zero, miss_label);
 
   // Store the value at the masked, scaled index.
   const int kValueOffset = kElementsStartOffset + kPointerSize;
@@ -349,9 +294,9 @@
                              times_pointer_size,
                              NumberDictionary::kElementsStartOffset));
     if (i != (kProbes - 1)) {
-      __ j(equal, &done, taken);
+      __ j(equal, &done);
     } else {
-      __ j(not_equal, miss, not_taken);
+      __ j(not_equal, miss);
     }
   }
 
@@ -371,12 +316,6 @@
 }
 
 
-// The offset from the inlined patch site to the start of the
-// inlined load instruction.  It is 7 bytes (test eax, imm) plus
-// 6 bytes (jne slow_label).
-const int LoadIC::kOffsetToLoadInstruction = 13;
-
-
 void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
@@ -435,7 +374,7 @@
 
   // Check that the object isn't a smi.
   __ test(receiver, Immediate(kSmiTagMask));
-  __ j(zero, slow, not_taken);
+  __ j(zero, slow);
 
   // Get the map of the receiver.
   __ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
@@ -443,7 +382,7 @@
   // Check bit field.
   __ test_b(FieldOperand(map, Map::kBitFieldOffset),
             (1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit));
-  __ j(not_zero, slow, not_taken);
+  __ j(not_zero, slow);
   // Check that the object is some kind of JS object EXCEPT JS Value type.
   // In the case that the object is a value-wrapper object,
   // we enter the runtime system to make sure that indexing
@@ -451,7 +390,7 @@
   ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
 
   __ CmpInstanceType(map, JS_OBJECT_TYPE);
-  __ j(below, slow, not_taken);
+  __ j(below, slow);
 }
 
 
@@ -475,7 +414,10 @@
   __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
   if (not_fast_array != NULL) {
     // Check that the object is in fast mode and writable.
-    __ CheckMap(scratch, FACTORY->fixed_array_map(), not_fast_array, true);
+    __ CheckMap(scratch,
+                FACTORY->fixed_array_map(),
+                not_fast_array,
+                DONT_DO_SMI_CHECK);
   } else {
     __ AssertFastElements(scratch);
   }
@@ -514,12 +456,12 @@
   // Is the string an array index, with cached numeric value?
   __ mov(hash, FieldOperand(key, String::kHashFieldOffset));
   __ test(hash, Immediate(String::kContainsCachedArrayIndexMask));
-  __ j(zero, index_string, not_taken);
+  __ j(zero, index_string);
 
   // Is the string a symbol?
   ASSERT(kSymbolTag != 0);
   __ test_b(FieldOperand(map, Map::kInstanceTypeOffset), kIsSymbolMask);
-  __ j(zero, not_symbol, not_taken);
+  __ j(zero, not_symbol);
 }
 
 
@@ -534,7 +476,7 @@
 
   // Check that the key is a smi.
   __ test(eax, Immediate(kSmiTagMask));
-  __ j(not_zero, &check_string, not_taken);
+  __ j(not_zero, &check_string);
   __ bind(&index_smi);
   // Now the key is known to be a smi. This place is also jumped to from
   // where a numeric string is converted to a smi.
@@ -546,7 +488,7 @@
   // now in ecx.
   __ test_b(FieldOperand(ecx, Map::kBitField2Offset),
             1 << Map::kHasFastElements);
-  __ j(zero, &check_number_dictionary, not_taken);
+  __ j(zero, &check_number_dictionary);
 
   GenerateFastArrayLoad(masm,
                         edx,
@@ -570,7 +512,10 @@
   // ebx: untagged index
   // eax: key
   // ecx: elements
-  __ CheckMap(ecx, isolate->factory()->hash_table_map(), &slow, true);
+  __ CheckMap(ecx,
+              isolate->factory()->hash_table_map(),
+              &slow,
+              DONT_DO_SMI_CHECK);
   Label slow_pop_receiver;
   // Push receiver on the stack to free up a register for the dictionary
   // probing.
@@ -710,7 +655,7 @@
   char_at_generator.GenerateSlow(masm, call_helper);
 
   __ bind(&miss);
-  GenerateMiss(masm);
+  GenerateMiss(masm, false);
 }
 
 
@@ -724,11 +669,11 @@
 
   // Check that the receiver isn't a smi.
   __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &slow, not_taken);
+  __ j(zero, &slow);
 
   // Check that the key is an array index, that is Uint32.
   __ test(eax, Immediate(kSmiTagMask | kSmiSignMask));
-  __ j(not_zero, &slow, not_taken);
+  __ j(not_zero, &slow);
 
   // Get the map of the receiver.
   __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
@@ -738,7 +683,7 @@
   __ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset));
   __ and_(Operand(ecx), Immediate(kSlowCaseBitFieldMask));
   __ cmp(Operand(ecx), Immediate(1 << Map::kHasIndexedInterceptor));
-  __ j(not_zero, &slow, not_taken);
+  __ j(not_zero, &slow);
 
   // Everything is fine, call runtime.
   __ pop(ecx);
@@ -753,7 +698,7 @@
   __ TailCallExternalReference(ref, 2, 1);
 
   __ bind(&slow);
-  GenerateMiss(masm);
+  GenerateMiss(masm, false);
 }
 
 
@@ -769,22 +714,22 @@
 
   // Check that the object isn't a smi.
   __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &slow, not_taken);
+  __ j(zero, &slow);
   // Get the map from the receiver.
   __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
   // Check that the receiver does not require access checks.  We need
   // to do this because this generic stub does not perform map checks.
   __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
             1 << Map::kIsAccessCheckNeeded);
-  __ j(not_zero, &slow, not_taken);
+  __ j(not_zero, &slow);
   // Check that the key is a smi.
   __ test(ecx, Immediate(kSmiTagMask));
-  __ j(not_zero, &slow, not_taken);
+  __ j(not_zero, &slow);
   __ CmpInstanceType(edi, JS_ARRAY_TYPE);
   __ j(equal, &array);
   // Check that the object is some kind of JS object.
   __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
-  __ j(below, &slow, not_taken);
+  __ j(below, &slow);
 
   // Object case: Check key against length in the elements array.
   // eax: value
@@ -792,9 +737,9 @@
   // ecx: key (a smi)
   __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
   // Check that the object is in fast mode and writable.
-  __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, true);
+  __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK);
   __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
-  __ j(below, &fast, taken);
+  __ j(below, &fast);
 
   // Slow case: call runtime.
   __ bind(&slow);
@@ -809,9 +754,10 @@
   // ecx: key, a smi.
   // edi: receiver->elements, a FixedArray
   // flags: compare (ecx, edx.length())
-  __ j(not_equal, &slow, not_taken);  // do not leave holes in the array
+  // do not leave holes in the array:
+  __ j(not_equal, &slow);
   __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
-  __ j(above_equal, &slow, not_taken);
+  __ j(above_equal, &slow);
   // Add 1 to receiver->length, and go to fast array write.
   __ add(FieldOperand(edx, JSArray::kLengthOffset),
          Immediate(Smi::FromInt(1)));
@@ -825,12 +771,12 @@
   // edx: receiver, a JSArray
   // ecx: key, a smi.
   __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-  __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, true);
+  __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK);
 
   // Check the key against the length in the array, compute the
   // address to store into and fall through to fast case.
   __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset));  // Compare smis.
-  __ j(above_equal, &extra, not_taken);
+  __ j(above_equal, &extra);
 
   // Fast case: Do the store.
   __ bind(&fast);
@@ -850,7 +796,8 @@
 // The generated code falls through if both probes miss.
 static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
                                           int argc,
-                                          Code::Kind kind) {
+                                          Code::Kind kind,
+                                          Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- edx                 : receiver
@@ -861,7 +808,7 @@
   Code::Flags flags = Code::ComputeFlags(kind,
                                          NOT_IN_LOOP,
                                          MONOMORPHIC,
-                                         Code::kNoExtraICState,
+                                         extra_ic_state,
                                          NORMAL,
                                          argc);
   Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
@@ -874,9 +821,9 @@
   //
   // Check for number.
   __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &number, not_taken);
+  __ j(zero, &number);
   __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ebx);
-  __ j(not_equal, &non_number, taken);
+  __ j(not_equal, &non_number);
   __ bind(&number);
   StubCompiler::GenerateLoadGlobalFunctionPrototype(
       masm, Context::NUMBER_FUNCTION_INDEX, edx);
@@ -885,7 +832,7 @@
   // Check for string.
   __ bind(&non_number);
   __ CmpInstanceType(ebx, FIRST_NONSTRING_TYPE);
-  __ j(above_equal, &non_string, taken);
+  __ j(above_equal, &non_string);
   StubCompiler::GenerateLoadGlobalFunctionPrototype(
       masm, Context::STRING_FUNCTION_INDEX, edx);
   __ jmp(&probe);
@@ -893,9 +840,9 @@
   // Check for boolean.
   __ bind(&non_string);
   __ cmp(edx, FACTORY->true_value());
-  __ j(equal, &boolean, not_taken);
+  __ j(equal, &boolean);
   __ cmp(edx, FACTORY->false_value());
-  __ j(not_equal, &miss, taken);
+  __ j(not_equal, &miss);
   __ bind(&boolean);
   StubCompiler::GenerateLoadGlobalFunctionPrototype(
       masm, Context::BOOLEAN_FUNCTION_INDEX, edx);
@@ -922,15 +869,16 @@
 
   // Check that the result is not a smi.
   __ test(edi, Immediate(kSmiTagMask));
-  __ j(zero, miss, not_taken);
+  __ j(zero, miss);
 
   // Check that the value is a JavaScript function, fetching its map into eax.
   __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
-  __ j(not_equal, miss, not_taken);
+  __ j(not_equal, miss);
 
   // Invoke the function.
   ParameterCount actual(argc);
-  __ InvokeFunction(edi, actual, JUMP_FUNCTION);
+  __ InvokeFunction(edi, actual, JUMP_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
 }
 
 // The generated code falls through if the call should be handled by runtime.
@@ -960,7 +908,8 @@
 
 static void GenerateCallMiss(MacroAssembler* masm,
                              int argc,
-                             IC::UtilityId id) {
+                             IC::UtilityId id,
+                             Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -1002,13 +951,13 @@
     Label invoke, global;
     __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));  // receiver
     __ test(edx, Immediate(kSmiTagMask));
-    __ j(zero, &invoke, not_taken);
+    __ j(zero, &invoke, Label::kNear);
     __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
     __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
     __ cmp(ebx, JS_GLOBAL_OBJECT_TYPE);
-    __ j(equal, &global);
+    __ j(equal, &global, Label::kNear);
     __ cmp(ebx, JS_BUILTINS_OBJECT_TYPE);
-    __ j(not_equal, &invoke);
+    __ j(not_equal, &invoke, Label::kNear);
 
     // Patch the receiver on the stack.
     __ bind(&global);
@@ -1018,12 +967,21 @@
   }
 
   // Invoke the function.
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
   ParameterCount actual(argc);
-  __ InvokeFunction(edi, actual, JUMP_FUNCTION);
+  __ InvokeFunction(edi,
+                    actual,
+                    JUMP_FUNCTION,
+                    NullCallWrapper(),
+                    call_kind);
 }
 
 
-void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+void CallIC::GenerateMegamorphic(MacroAssembler* masm,
+                                 int argc,
+                                 Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -1034,8 +992,9 @@
 
   // Get the receiver of the function from the stack; 1 ~ return address.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-  GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC);
-  GenerateMiss(masm, argc);
+  GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
+
+  GenerateMiss(masm, argc, extra_ic_state);
 }
 
 
@@ -1049,11 +1008,13 @@
   // -----------------------------------
 
   GenerateCallNormal(masm, argc);
-  GenerateMiss(masm, argc);
+  GenerateMiss(masm, argc, Code::kNoExtraICState);
 }
 
 
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+void CallIC::GenerateMiss(MacroAssembler* masm,
+                          int argc,
+                          Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -1062,7 +1023,7 @@
   //  -- esp[(argc + 1) * 4] : receiver
   // -----------------------------------
 
-  GenerateCallMiss(masm, argc, IC::kCallIC_Miss);
+  GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
 }
 
 
@@ -1084,7 +1045,7 @@
 
   // Check that the key is a smi.
   __ test(ecx, Immediate(kSmiTagMask));
-  __ j(not_zero, &check_string, not_taken);
+  __ j(not_zero, &check_string);
 
   __ bind(&index_smi);
   // Now the key is known to be a smi. This place is also jumped to from
@@ -1109,7 +1070,10 @@
   // eax: elements
   // ecx: smi key
   // Check whether the elements is a number dictionary.
-  __ CheckMap(eax, isolate->factory()->hash_table_map(), &slow_load, true);
+  __ CheckMap(eax,
+              isolate->factory()->hash_table_map(),
+              &slow_load,
+              DONT_DO_SMI_CHECK);
   __ mov(ebx, ecx);
   __ SmiUntag(ebx);
   // ebx: untagged index
@@ -1150,7 +1114,7 @@
   __ CheckMap(ebx,
               isolate->factory()->hash_table_map(),
               &lookup_monomorphic_cache,
-              true);
+              DONT_DO_SMI_CHECK);
 
   GenerateDictionaryLoad(masm, &slow_load, ebx, ecx, eax, edi, edi);
   __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1);
@@ -1158,7 +1122,10 @@
 
   __ bind(&lookup_monomorphic_cache);
   __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1);
-  GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC);
+  GenerateMonomorphicCacheProbe(masm,
+                                argc,
+                                Code::KEYED_CALL_IC,
+                                Code::kNoExtraICState);
   // Fall through on miss.
 
   __ bind(&slow_call);
@@ -1208,7 +1175,7 @@
   //  -- esp[(argc + 1) * 4] : receiver
   // -----------------------------------
 
-  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss);
+  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
 }
 
 
@@ -1273,173 +1240,7 @@
 }
 
 
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
-  if (V8::UseCrankshaft()) return false;
-
-  // The address of the instruction following the call.
-  Address test_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-  // If the instruction following the call is not a test eax, nothing
-  // was inlined.
-  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
-  Address delta_address = test_instruction_address + 1;
-  // The delta to the start of the map check instruction.
-  int delta = *reinterpret_cast<int*>(delta_address);
-
-  // The map address is the last 4 bytes of the 7-byte
-  // operand-immediate compare instruction, so we add 3 to get the
-  // offset to the last 4 bytes.
-  Address map_address = test_instruction_address + delta + 3;
-  *(reinterpret_cast<Object**>(map_address)) = map;
-
-  // The offset is in the last 4 bytes of a six byte
-  // memory-to-register move instruction, so we add 2 to get the
-  // offset to the last 4 bytes.
-  Address offset_address =
-      test_instruction_address + delta + kOffsetToLoadInstruction + 2;
-  *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-  return true;
-}
-
-
-// One byte opcode for mov ecx,0xXXXXXXXX.
-// Marks inlined contextual loads using all kinds of cells. Generated
-// code has the hole check:
-//   mov reg, <cell>
-//   mov reg, (<cell>, value offset)
-//   cmp reg, <the hole>
-//   je  slow
-//   ;; use reg
-static const byte kMovEcxByte = 0xB9;
-
-// One byte opcode for mov edx,0xXXXXXXXX.
-// Marks inlined contextual loads using only "don't delete"
-// cells. Generated code doesn't have the hole check:
-//   mov reg, <cell>
-//   mov reg, (<cell>, value offset)
-//   ;; use reg
-static const byte kMovEdxByte = 0xBA;
-
-bool LoadIC::PatchInlinedContextualLoad(Address address,
-                                        Object* map,
-                                        Object* cell,
-                                        bool is_dont_delete) {
-  if (V8::UseCrankshaft()) return false;
-
-  // The address of the instruction following the call.
-  Address mov_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-  // If the instruction following the call is not a mov ecx/edx,
-  // nothing was inlined.
-  byte b = *mov_instruction_address;
-  if (b != kMovEcxByte && b != kMovEdxByte) return false;
-  // If we don't have the hole check generated, we can only support
-  // "don't delete" cells.
-  if (b == kMovEdxByte && !is_dont_delete) return false;
-
-  Address delta_address = mov_instruction_address + 1;
-  // The delta to the start of the map check instruction.
-  int delta = *reinterpret_cast<int*>(delta_address);
-
-  // The map address is the last 4 bytes of the 7-byte
-  // operand-immediate compare instruction, so we add 3 to get the
-  // offset to the last 4 bytes.
-  Address map_address = mov_instruction_address + delta + 3;
-  *(reinterpret_cast<Object**>(map_address)) = map;
-
-  // The cell is in the last 4 bytes of a five byte mov reg, imm32
-  // instruction, so we add 1 to get the offset to the last 4 bytes.
-  Address offset_address =
-      mov_instruction_address + delta + kOffsetToLoadInstruction + 1;
-  *reinterpret_cast<Object**>(offset_address) = cell;
-  return true;
-}
-
-
-bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
-  if (V8::UseCrankshaft()) return false;
-
-  // The address of the instruction following the call.
-  Address test_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-
-  // If the instruction following the call is not a test eax, nothing
-  // was inlined.
-  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
-  // Extract the encoded deltas from the test eax instruction.
-  Address encoded_offsets_address = test_instruction_address + 1;
-  int encoded_offsets = *reinterpret_cast<int*>(encoded_offsets_address);
-  int delta_to_map_check = -(encoded_offsets & 0xFFFF);
-  int delta_to_record_write = encoded_offsets >> 16;
-
-  // Patch the map to check. The map address is the last 4 bytes of
-  // the 7-byte operand-immediate compare instruction.
-  Address map_check_address = test_instruction_address + delta_to_map_check;
-  Address map_address = map_check_address + 3;
-  *(reinterpret_cast<Object**>(map_address)) = map;
-
-  // Patch the offset in the store instruction. The offset is in the
-  // last 4 bytes of a six byte register-to-memory move instruction.
-  Address offset_address =
-      map_check_address + StoreIC::kOffsetToStoreInstruction + 2;
-  // The offset should have initial value (kMaxInt - 1), cleared value
-  // (-1) or we should be clearing the inlined version.
-  ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt - 1 ||
-         *reinterpret_cast<int*>(offset_address) == -1 ||
-         (offset == 0 && map == HEAP->null_value()));
-  *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-
-  // Patch the offset in the write-barrier code. The offset is the
-  // last 4 bytes of a six byte lea instruction.
-  offset_address = map_check_address + delta_to_record_write + 2;
-  // The offset should have initial value (kMaxInt), cleared value
-  // (-1) or we should be clearing the inlined version.
-  ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt ||
-         *reinterpret_cast<int*>(offset_address) == -1 ||
-         (offset == 0 && map == HEAP->null_value()));
-  *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-
-  return true;
-}
-
-
-static bool PatchInlinedMapCheck(Address address, Object* map) {
-  if (V8::UseCrankshaft()) return false;
-
-  Address test_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-  // The keyed load has a fast inlined case if the IC call instruction
-  // is immediately followed by a test instruction.
-  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
-  // Fetch the offset from the test instruction to the map cmp
-  // instruction.  This offset is stored in the last 4 bytes of the 5
-  // byte test instruction.
-  Address delta_address = test_instruction_address + 1;
-  int delta = *reinterpret_cast<int*>(delta_address);
-  // Compute the map address.  The map address is in the last 4 bytes
-  // of the 7-byte operand-immediate compare instruction, so we add 3
-  // to the offset to get the map address.
-  Address map_address = test_instruction_address + delta + 3;
-  // Patch the map check.
-  *(reinterpret_cast<Object**>(map_address)) = map;
-  return true;
-}
-
-
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
-  return PatchInlinedMapCheck(address, map);
-}
-
-
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
-  return PatchInlinedMapCheck(address, map);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
@@ -1454,8 +1255,10 @@
   __ push(ebx);  // return address
 
   // Perform tail call to the entry.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
+  ExternalReference ref = force_generic
+      ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric),
+                          masm->isolate())
+      : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
   __ TailCallExternalReference(ref, 2, 1);
 }
 
@@ -1519,12 +1322,6 @@
 }
 
 
-// The offset from the inlined patch site to the start of the inlined
-// store instruction.  It is 7 bytes (test reg, imm) plus 6 bytes (jne
-// slow_label).
-const int StoreIC::kOffsetToStoreInstruction = 13;
-
-
 void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax    : value
@@ -1546,22 +1343,22 @@
 
   // Check that the receiver isn't a smi.
   __ test(receiver, Immediate(kSmiTagMask));
-  __ j(zero, &miss, not_taken);
+  __ j(zero, &miss);
 
   // Check that the object is a JS array.
   __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   // Check that elements are FixedArray.
   // We rely on StoreIC_ArrayLength below to deal with all types of
   // fast elements (including COW).
   __ mov(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
   __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   // Check that value is a smi.
   __ test(value, Immediate(kSmiTagMask));
-  __ j(not_zero, &miss, not_taken);
+  __ j(not_zero, &miss);
 
   // Prepare tail call to StoreIC_ArrayLength.
   __ pop(scratch);
@@ -1653,7 +1450,7 @@
 }
 
 
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : key
@@ -1668,8 +1465,30 @@
   __ push(ebx);
 
   // Do tail-call to runtime routine.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+  ExternalReference ref = force_generic
+      ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
+                          masm->isolate())
+      : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+
+  __ pop(ebx);
+  __ push(edx);
+  __ push(ecx);
+  __ push(eax);
+  __ push(ebx);   // return address
+
+  // Do tail-call to runtime routine.
+  ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
   __ TailCallExternalReference(ref, 3, 1);
 }
 
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 8bcce33..3e95867 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -40,7 +40,7 @@
 
 // When invoking builtins, we need to record the safepoint in the middle of
 // the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator : public PostCallGenerator {
+class SafepointGenerator : public CallWrapper {
  public:
   SafepointGenerator(LCodeGen* codegen,
                      LPointerMap* pointers,
@@ -50,7 +50,9 @@
         deoptimization_index_(deoptimization_index) {}
   virtual ~SafepointGenerator() { }
 
-  virtual void Generate() {
+  virtual void BeforeCall(int call_size) const {}
+
+  virtual void AfterCall() const {
     codegen_->RecordSafepoint(pointers_, deoptimization_index_);
   }
 
@@ -77,7 +79,7 @@
 
 void LCodeGen::FinishCode(Handle<Code> code) {
   ASSERT(is_done());
-  code->set_stack_slots(StackSlotCount());
+  code->set_stack_slots(GetStackSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   PopulateDeoptimizationData(code);
   Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
@@ -126,13 +128,28 @@
   }
 #endif
 
+  // Strict mode functions need to replace the receiver with undefined
+  // when called as functions (without an explicit receiver
+  // object). ecx is zero for method calls and non-zero for function
+  // calls.
+  if (info_->is_strict_mode()) {
+    Label ok;
+    __ test(ecx, Operand(ecx));
+    __ j(zero, &ok, Label::kNear);
+    // +1 for return address.
+    int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+    __ mov(Operand(esp, receiver_offset),
+           Immediate(isolate()->factory()->undefined_value()));
+    __ bind(&ok);
+  }
+
   __ push(ebp);  // Caller's frame pointer.
   __ mov(ebp, esp);
   __ push(esi);  // Callee's context.
   __ push(edi);  // Callee's JS function.
 
   // Reserve space for the stack slots needed by the code.
-  int slots = StackSlotCount();
+  int slots = GetStackSlotCount();
   if (slots > 0) {
     if (FLAG_debug_code) {
       __ mov(Operand(eax), Immediate(slots));
@@ -254,7 +271,7 @@
 
 bool LCodeGen::GenerateSafepointTable() {
   ASSERT(is_done());
-  safepoints_.Emit(masm(), StackSlotCount());
+  safepoints_.Emit(masm(), GetStackSlotCount());
   return !is_aborted();
 }
 
@@ -386,7 +403,7 @@
     translation->StoreDoubleStackSlot(op->index());
   } else if (op->IsArgument()) {
     ASSERT(is_tagged);
-    int src_index = StackSlotCount() + op->index();
+    int src_index = GetStackSlotCount() + op->index();
     translation->StoreStackSlot(src_index);
   } else if (op->IsRegister()) {
     Register reg = ToRegister(op);
@@ -426,7 +443,7 @@
 
   // Signal that we don't inline smi code before these stubs in the
   // optimizing code generator.
-  if (code->kind() == Code::TYPE_RECORDING_BINARY_OP_IC ||
+  if (code->kind() == Code::BINARY_OP_IC ||
       code->kind() == Code::COMPARE_IC) {
     __ nop();
   }
@@ -543,7 +560,7 @@
     __ mov(ebx, shared);
     __ mov(eax, FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset));
     __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
-    __ j(not_zero, &no_deopt);
+    __ j(not_zero, &no_deopt, Label::kNear);
     if (FLAG_trap_on_deopt) __ int3();
     __ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times)));
     __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
@@ -564,13 +581,13 @@
     __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
   } else {
     if (FLAG_trap_on_deopt) {
-      NearLabel done;
-      __ j(NegateCondition(cc), &done);
+      Label done;
+      __ j(NegateCondition(cc), &done, Label::kNear);
       __ int3();
       __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
       __ bind(&done);
     } else {
-      __ j(cc, entry, RelocInfo::RUNTIME_ENTRY, not_taken);
+      __ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
     }
   }
 }
@@ -689,7 +706,7 @@
   }
   __ bind(label->label());
   current_block_ = label->block_id();
-  LCodeGen::DoGap(label);
+  DoGap(label);
 }
 
 
@@ -715,6 +732,11 @@
 }
 
 
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+  DoGap(instr);
+}
+
+
 void LCodeGen::DoParameter(LParameter* instr) {
   // Nothing to do.
 }
@@ -780,50 +802,91 @@
 
     if (divisor < 0) divisor = -divisor;
 
-    NearLabel positive_dividend, done;
+    Label positive_dividend, done;
     __ test(dividend, Operand(dividend));
-    __ j(not_sign, &positive_dividend);
+    __ j(not_sign, &positive_dividend, Label::kNear);
     __ neg(dividend);
     __ and_(dividend, divisor - 1);
     __ neg(dividend);
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      __ j(not_zero, &done);
+      __ j(not_zero, &done, Label::kNear);
       DeoptimizeIf(no_condition, instr->environment());
     } else {
-      __ jmp(&done);
+      __ jmp(&done, Label::kNear);
     }
     __ bind(&positive_dividend);
     __ and_(dividend, divisor - 1);
     __ bind(&done);
   } else {
-    LOperand* right = instr->InputAt(1);
-    ASSERT(ToRegister(instr->InputAt(0)).is(eax));
-    ASSERT(ToRegister(instr->result()).is(edx));
+    Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
+    Register left_reg = ToRegister(instr->InputAt(0));
+    Register right_reg = ToRegister(instr->InputAt(1));
+    Register result_reg = ToRegister(instr->result());
 
-    Register right_reg = ToRegister(right);
+    ASSERT(left_reg.is(eax));
+    ASSERT(result_reg.is(edx));
     ASSERT(!right_reg.is(eax));
     ASSERT(!right_reg.is(edx));
 
     // Check for x % 0.
     if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
-      __ test(right_reg, ToOperand(right));
+      __ test(right_reg, Operand(right_reg));
       DeoptimizeIf(zero, instr->environment());
     }
 
+    __ test(left_reg, Operand(left_reg));
+    __ j(zero, &remainder_eq_dividend, Label::kNear);
+    __ j(sign, &slow, Label::kNear);
+
+    __ test(right_reg, Operand(right_reg));
+    __ j(not_sign, &both_positive, Label::kNear);
+    // The sign of the divisor doesn't matter.
+    __ neg(right_reg);
+
+    __ bind(&both_positive);
+    // If the dividend is smaller than the nonnegative
+    // divisor, the dividend is the result.
+    __ cmp(left_reg, Operand(right_reg));
+    __ j(less, &remainder_eq_dividend, Label::kNear);
+
+    // Check if the divisor is a PowerOfTwo integer.
+    Register scratch = ToRegister(instr->TempAt(0));
+    __ mov(scratch, right_reg);
+    __ sub(Operand(scratch), Immediate(1));
+    __ test(scratch, Operand(right_reg));
+    __ j(not_zero, &do_subtraction, Label::kNear);
+    __ and_(left_reg, Operand(scratch));
+    __ jmp(&remainder_eq_dividend, Label::kNear);
+
+    __ bind(&do_subtraction);
+    const int kUnfolds = 3;
+    // Try a few subtractions of the dividend.
+    __ mov(scratch, left_reg);
+    for (int i = 0; i < kUnfolds; i++) {
+      // Reduce the dividend by the divisor.
+      __ sub(left_reg, Operand(right_reg));
+      // Check if the dividend is less than the divisor.
+      __ cmp(left_reg, Operand(right_reg));
+      __ j(less, &remainder_eq_dividend, Label::kNear);
+    }
+    __ mov(left_reg, scratch);
+
+    // Slow case, using idiv instruction.
+    __ bind(&slow);
     // Sign extend to edx.
     __ cdq();
 
     // Check for (0 % -x) that will produce negative zero.
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      NearLabel positive_left;
-      NearLabel done;
-      __ test(eax, Operand(eax));
-      __ j(not_sign, &positive_left);
+      Label positive_left;
+      Label done;
+      __ test(left_reg, Operand(left_reg));
+      __ j(not_sign, &positive_left, Label::kNear);
       __ idiv(right_reg);
 
       // Test the remainder for 0, because then the result would be -0.
-      __ test(edx, Operand(edx));
-      __ j(not_zero, &done);
+      __ test(result_reg, Operand(result_reg));
+      __ j(not_zero, &done, Label::kNear);
 
       DeoptimizeIf(no_condition, instr->environment());
       __ bind(&positive_left);
@@ -832,6 +895,12 @@
     } else {
       __ idiv(right_reg);
     }
+    __ jmp(&done, Label::kNear);
+
+    __ bind(&remainder_eq_dividend);
+    __ mov(result_reg, left_reg);
+
+    __ bind(&done);
   }
 }
 
@@ -854,9 +923,9 @@
 
   // Check for (0 / -x) that will produce negative zero.
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    NearLabel left_not_zero;
+    Label left_not_zero;
     __ test(left_reg, Operand(left_reg));
-    __ j(not_zero, &left_not_zero);
+    __ j(not_zero, &left_not_zero, Label::kNear);
     __ test(right_reg, ToOperand(right));
     DeoptimizeIf(sign, instr->environment());
     __ bind(&left_not_zero);
@@ -864,9 +933,9 @@
 
   // Check for (-kMinInt / -1).
   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
-    NearLabel left_not_min_int;
+    Label left_not_min_int;
     __ cmp(left_reg, kMinInt);
-    __ j(not_zero, &left_not_min_int);
+    __ j(not_zero, &left_not_min_int, Label::kNear);
     __ cmp(right_reg, -1);
     DeoptimizeIf(zero, instr->environment());
     __ bind(&left_not_min_int);
@@ -944,9 +1013,9 @@
 
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
     // Bail out if the result is supposed to be negative zero.
-    NearLabel done;
+    Label done;
     __ test(left, Operand(left));
-    __ j(not_zero, &done);
+    __ j(not_zero, &done, Label::kNear);
     if (right->IsConstantOperand()) {
       if (ToInteger32(LConstantOperand::cast(right)) <= 0) {
         DeoptimizeIf(no_condition, instr->environment());
@@ -1087,7 +1156,7 @@
   // Use xor to produce +0.0 in a fast and compact way, but avoid to
   // do so if the constant is -0.0.
   if (BitCast<uint64_t, double>(v) == 0) {
-    __ xorpd(res, res);
+    __ xorps(res, res);
   } else {
     Register temp = ToRegister(instr->TempAt(0));
     uint64_t int_val = BitCast<uint64_t, double>(v);
@@ -1101,7 +1170,7 @@
         __ Set(temp, Immediate(upper));
         __ pinsrd(res, Operand(temp), 1);
       } else {
-        __ xorpd(res, res);
+        __ xorps(res, res);
         __ Set(temp, Immediate(upper));
         __ pinsrd(res, Operand(temp), 1);
       }
@@ -1151,14 +1220,14 @@
   Register result = ToRegister(instr->result());
   Register map = ToRegister(instr->TempAt(0));
   ASSERT(input.is(result));
-  NearLabel done;
+  Label done;
   // If the object is a smi return the object.
   __ test(input, Immediate(kSmiTagMask));
-  __ j(zero, &done);
+  __ j(zero, &done, Label::kNear);
 
   // If the object is not a value type, return the object.
   __ CmpObjectType(input, JS_VALUE_TYPE, map);
-  __ j(not_equal, &done);
+  __ j(not_equal, &done, Label::kNear);
   __ mov(result, FieldOperand(input, JSValue::kValueOffset));
 
   __ bind(&done);
@@ -1248,7 +1317,7 @@
   ASSERT(ToRegister(instr->InputAt(1)).is(eax));
   ASSERT(ToRegister(instr->result()).is(eax));
 
-  TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
+  BinaryOpStub stub(instr->op(), NO_OVERWRITE);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
   __ nop();  // Signals no inlined code.
 }
@@ -1292,7 +1361,7 @@
     EmitBranch(true_block, false_block, not_zero);
   } else if (r.IsDouble()) {
     XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
-    __ xorpd(xmm0, xmm0);
+    __ xorps(xmm0, xmm0);
     __ ucomisd(reg, xmm0);
     EmitBranch(true_block, false_block, not_equal);
   } else {
@@ -1317,10 +1386,10 @@
       __ j(zero, true_label);
 
       // Test for double values. Zero is false.
-      NearLabel call_stub;
+      Label call_stub;
       __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
              factory()->heap_number_map());
-      __ j(not_equal, &call_stub);
+      __ j(not_equal, &call_stub, Label::kNear);
       __ fldz();
       __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
       __ FCmp();
@@ -1426,20 +1495,20 @@
   LOperand* right = instr->InputAt(1);
   LOperand* result = instr->result();
 
-  NearLabel unordered;
+  Label unordered;
   if (instr->is_double()) {
     // Don't base result on EFLAGS when a NaN is involved. Instead
     // jump to the unordered case, which produces a false value.
     __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
-    __ j(parity_even, &unordered, not_taken);
+    __ j(parity_even, &unordered, Label::kNear);
   } else {
     EmitCmpI(left, right);
   }
 
-  NearLabel done;
+  Label done;
   Condition cc = TokenToCondition(instr->op(), instr->is_double());
   __ mov(ToRegister(result), factory()->true_value());
-  __ j(cc, &done);
+  __ j(cc, &done, Label::kNear);
 
   __ bind(&unordered);
   __ mov(ToRegister(result), factory()->false_value());
@@ -1474,8 +1543,8 @@
 
   __ cmp(left, Operand(right));
   __ mov(result, factory()->true_value());
-  NearLabel done;
-  __ j(equal, &done);
+  Label done;
+  __ j(equal, &done, Label::kNear);
   __ mov(result, factory()->false_value());
   __ bind(&done);
 }
@@ -1492,6 +1561,31 @@
 }
 
 
+void LCodeGen::DoCmpSymbolEq(LCmpSymbolEq* instr) {
+  Register left = ToRegister(instr->InputAt(0));
+  Register right = ToRegister(instr->InputAt(1));
+  Register result = ToRegister(instr->result());
+
+  Label done;
+  __ cmp(left, Operand(right));
+  __ mov(result, factory()->false_value());
+  __ j(not_equal, &done, Label::kNear);
+  __ mov(result, factory()->true_value());
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoCmpSymbolEqAndBranch(LCmpSymbolEqAndBranch* instr) {
+  Register left = ToRegister(instr->InputAt(0));
+  Register right = ToRegister(instr->InputAt(1));
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+  __ cmp(left, Operand(right));
+  EmitBranch(true_block, false_block, equal);
+}
+
+
 void LCodeGen::DoIsNull(LIsNull* instr) {
   Register reg = ToRegister(instr->InputAt(0));
   Register result = ToRegister(instr->result());
@@ -1502,27 +1596,27 @@
   __ cmp(reg, factory()->null_value());
   if (instr->is_strict()) {
     __ mov(result, factory()->true_value());
-    NearLabel done;
-    __ j(equal, &done);
+    Label done;
+    __ j(equal, &done, Label::kNear);
     __ mov(result, factory()->false_value());
     __ bind(&done);
   } else {
-    NearLabel true_value, false_value, done;
-    __ j(equal, &true_value);
+    Label true_value, false_value, done;
+    __ j(equal, &true_value, Label::kNear);
     __ cmp(reg, factory()->undefined_value());
-    __ j(equal, &true_value);
+    __ j(equal, &true_value, Label::kNear);
     __ test(reg, Immediate(kSmiTagMask));
-    __ j(zero, &false_value);
+    __ j(zero, &false_value, Label::kNear);
     // Check for undetectable objects by looking in the bit field in
     // the map. The object has already been smi checked.
     Register scratch = result;
     __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
     __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
     __ test(scratch, Immediate(1 << Map::kIsUndetectable));
-    __ j(not_zero, &true_value);
+    __ j(not_zero, &true_value, Label::kNear);
     __ bind(&false_value);
     __ mov(result, factory()->false_value());
-    __ jmp(&done);
+    __ jmp(&done, Label::kNear);
     __ bind(&true_value);
     __ mov(result, factory()->true_value());
     __ bind(&done);
@@ -1633,8 +1727,8 @@
   ASSERT(instr->hydrogen()->value()->representation().IsTagged());
   __ test(input, Immediate(kSmiTagMask));
   __ mov(result, factory()->true_value());
-  NearLabel done;
-  __ j(zero, &done);
+  Label done;
+  __ j(zero, &done, Label::kNear);
   __ mov(result, factory()->false_value());
   __ bind(&done);
 }
@@ -1651,6 +1745,44 @@
 }
 
 
+void LCodeGen::DoIsUndetectable(LIsUndetectable* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+
+  ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+  Label false_label, done;
+  STATIC_ASSERT(kSmiTag == 0);
+  __ test(input, Immediate(kSmiTagMask));
+  __ j(zero, &false_label, Label::kNear);
+  __ mov(result, FieldOperand(input, HeapObject::kMapOffset));
+  __ test_b(FieldOperand(result, Map::kBitFieldOffset),
+            1 << Map::kIsUndetectable);
+  __ j(zero, &false_label, Label::kNear);
+  __ mov(result, factory()->true_value());
+  __ jmp(&done);
+  __ bind(&false_label);
+  __ mov(result, factory()->false_value());
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register temp = ToRegister(instr->TempAt(0));
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  STATIC_ASSERT(kSmiTag == 0);
+  __ test(input, Immediate(kSmiTagMask));
+  __ j(zero, chunk_->GetAssemblyLabel(false_block));
+  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
+  __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
+            1 << Map::kIsUndetectable);
+  EmitBranch(true_block, false_block, not_zero);
+}
+
+
 static InstanceType TestType(HHasInstanceType* instr) {
   InstanceType from = instr->from();
   InstanceType to = instr->to();
@@ -1677,12 +1809,13 @@
 
   ASSERT(instr->hydrogen()->value()->representation().IsTagged());
   __ test(input, Immediate(kSmiTagMask));
-  NearLabel done, is_false;
-  __ j(zero, &is_false);
+  Label done, is_false;
+  __ j(zero, &is_false, Label::kNear);
   __ CmpObjectType(input, TestType(instr->hydrogen()), result);
-  __ j(NegateCondition(BranchCondition(instr->hydrogen())), &is_false);
+  __ j(NegateCondition(BranchCondition(instr->hydrogen())),
+       &is_false, Label::kNear);
   __ mov(result, factory()->true_value());
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(&is_false);
   __ mov(result, factory()->false_value());
   __ bind(&done);
@@ -1727,8 +1860,8 @@
   __ mov(result, factory()->true_value());
   __ test(FieldOperand(input, String::kHashFieldOffset),
           Immediate(String::kContainsCachedArrayIndexMask));
-  NearLabel done;
-  __ j(zero, &done);
+  Label done;
+  __ j(zero, &done, Label::kNear);
   __ mov(result, factory()->false_value());
   __ bind(&done);
 }
@@ -1810,16 +1943,16 @@
   ASSERT(input.is(result));
   Register temp = ToRegister(instr->TempAt(0));
   Handle<String> class_name = instr->hydrogen()->class_name();
-  NearLabel done;
+  Label done;
   Label is_true, is_false;
 
   EmitClassOfTest(&is_true, &is_false, class_name, input, temp, input);
 
-  __ j(not_equal, &is_false);
+  __ j(not_equal, &is_false, Label::kNear);
 
   __ bind(&is_true);
   __ mov(result, factory()->true_value());
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   __ bind(&is_false);
   __ mov(result, factory()->false_value());
@@ -1867,11 +2000,11 @@
   InstanceofStub stub(InstanceofStub::kArgsInRegisters);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
 
-  NearLabel true_value, done;
+  Label true_value, done;
   __ test(eax, Operand(eax));
-  __ j(zero, &true_value);
+  __ j(zero, &true_value, Label::kNear);
   __ mov(ToRegister(instr->result()), factory()->false_value());
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(&true_value);
   __ mov(ToRegister(instr->result()), factory()->true_value());
   __ bind(&done);
@@ -1916,17 +2049,17 @@
 
   // A Smi is not an instance of anything.
   __ test(object, Immediate(kSmiTagMask));
-  __ j(zero, &false_result, not_taken);
+  __ j(zero, &false_result);
 
   // This is the inlined call site instanceof cache. The two occurences of the
   // hole value will be patched to the last map/result pair generated by the
   // instanceof stub.
-  NearLabel cache_miss;
+  Label cache_miss;
   Register map = ToRegister(instr->TempAt(0));
   __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
   __ bind(deferred->map_check());  // Label for calculating code patching.
   __ cmp(map, factory()->the_hole_value());  // Patched to cached map.
-  __ j(not_equal, &cache_miss, not_taken);
+  __ j(not_equal, &cache_miss, Label::kNear);
   __ mov(eax, factory()->the_hole_value());  // Patched to either true or false.
   __ jmp(&done);
 
@@ -2018,11 +2151,11 @@
   if (op == Token::GT || op == Token::LTE) {
     condition = ReverseCondition(condition);
   }
-  NearLabel true_value, done;
+  Label true_value, done;
   __ test(eax, Operand(eax));
-  __ j(condition, &true_value);
+  __ j(condition, &true_value, Label::kNear);
   __ mov(ToRegister(instr->result()), factory()->false_value());
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(&true_value);
   __ mov(ToRegister(instr->result()), factory()->true_value());
   __ bind(&done);
@@ -2060,7 +2193,7 @@
   }
   __ mov(esp, ebp);
   __ pop(ebp);
-  __ Ret((ParameterCount() + 1) * kPointerSize, ecx);
+  __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
 }
 
 
@@ -2149,23 +2282,29 @@
 }
 
 
-void LCodeGen::EmitLoadField(Register result,
-                             Register object,
-                             Handle<Map> type,
-                             Handle<String> name) {
+void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
+                                               Register object,
+                                               Handle<Map> type,
+                                               Handle<String> name) {
   LookupResult lookup;
   type->LookupInDescriptors(NULL, *name, &lookup);
-  ASSERT(lookup.IsProperty() && lookup.type() == FIELD);
-  int index = lookup.GetLocalFieldIndexFromMap(*type);
-  int offset = index * kPointerSize;
-  if (index < 0) {
-    // Negative property indices are in-object properties, indexed
-    // from the end of the fixed part of the object.
-    __ mov(result, FieldOperand(object, offset + type->instance_size()));
+  ASSERT(lookup.IsProperty() &&
+         (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
+  if (lookup.type() == FIELD) {
+    int index = lookup.GetLocalFieldIndexFromMap(*type);
+    int offset = index * kPointerSize;
+    if (index < 0) {
+      // Negative property indices are in-object properties, indexed
+      // from the end of the fixed part of the object.
+      __ mov(result, FieldOperand(object, offset + type->instance_size()));
+    } else {
+      // Non-negative property indices are in the properties array.
+      __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
+      __ mov(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
+    }
   } else {
-    // Non-negative property indices are in the properties array.
-    __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
-    __ mov(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
+    Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
+    LoadHeapObject(result, Handle<HeapObject>::cast(function));
   }
 }
 
@@ -2182,30 +2321,30 @@
     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
     CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
   } else {
-    NearLabel done;
+    Label done;
     for (int i = 0; i < map_count - 1; ++i) {
       Handle<Map> map = instr->hydrogen()->types()->at(i);
-      NearLabel next;
+      Label next;
       __ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
-      __ j(not_equal, &next);
-      EmitLoadField(result, object, map, name);
-      __ jmp(&done);
+      __ j(not_equal, &next, Label::kNear);
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
+      __ jmp(&done, Label::kNear);
       __ bind(&next);
     }
     Handle<Map> map = instr->hydrogen()->types()->last();
     __ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
     if (instr->hydrogen()->need_generic()) {
-      NearLabel generic;
-      __ j(not_equal, &generic);
-      EmitLoadField(result, object, map, name);
-      __ jmp(&done);
+      Label generic;
+      __ j(not_equal, &generic, Label::kNear);
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
+      __ jmp(&done, Label::kNear);
       __ bind(&generic);
       __ mov(ecx, name);
       Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
       CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
     } else {
       DeoptimizeIf(not_equal, instr->environment());
-      EmitLoadField(result, object, map, name);
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
     }
     __ bind(&done);
   }
@@ -2233,10 +2372,10 @@
   DeoptimizeIf(not_equal, instr->environment());
 
   // Check whether the function has an instance prototype.
-  NearLabel non_instance;
+  Label non_instance;
   __ test_b(FieldOperand(result, Map::kBitFieldOffset),
             1 << Map::kHasNonInstancePrototype);
-  __ j(not_zero, &non_instance);
+  __ j(not_zero, &non_instance, Label::kNear);
 
   // Get the prototype or initial map from the function.
   __ mov(result,
@@ -2247,13 +2386,13 @@
   DeoptimizeIf(equal, instr->environment());
 
   // If the function does not have an initial map, we're done.
-  NearLabel done;
+  Label done;
   __ CmpObjectType(result, MAP_TYPE, temp);
-  __ j(not_equal, &done);
+  __ j(not_equal, &done, Label::kNear);
 
   // Get the prototype from the initial map.
   __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   // Non-instance prototype: Fetch prototype from constructor field
   // in the function's map.
@@ -2270,13 +2409,13 @@
   Register input = ToRegister(instr->InputAt(0));
   __ mov(result, FieldOperand(input, JSObject::kElementsOffset));
   if (FLAG_debug_code) {
-    NearLabel done;
+    Label done;
     __ cmp(FieldOperand(result, HeapObject::kMapOffset),
            Immediate(factory()->fixed_array_map()));
-    __ j(equal, &done);
+    __ j(equal, &done, Label::kNear);
     __ cmp(FieldOperand(result, HeapObject::kMapOffset),
            Immediate(factory()->fixed_cow_array_map()));
-    __ j(equal, &done);
+    __ j(equal, &done, Label::kNear);
     Register temp((result.is(eax)) ? ebx : eax);
     __ push(temp);
     __ mov(temp, FieldOperand(result, HeapObject::kMapOffset));
@@ -2327,41 +2466,63 @@
                               FixedArray::kHeaderSize));
 
   // Check for the hole value.
-  __ cmp(result, factory()->the_hole_value());
-  DeoptimizeIf(equal, instr->environment());
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ cmp(result, factory()->the_hole_value());
+    DeoptimizeIf(equal, instr->environment());
+  }
+}
+
+
+Operand LCodeGen::BuildExternalArrayOperand(LOperand* external_pointer,
+                                            LOperand* key,
+                                            ExternalArrayType array_type) {
+  Register external_pointer_reg = ToRegister(external_pointer);
+  int shift_size = ExternalArrayTypeToShiftSize(array_type);
+  if (key->IsConstantOperand()) {
+    int constant_value = ToInteger32(LConstantOperand::cast(key));
+    if (constant_value & 0xF0000000) {
+      Abort("array index constant value too big");
+    }
+    return Operand(external_pointer_reg, constant_value * (1 << shift_size));
+  } else {
+    ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
+    return Operand(external_pointer_reg, ToRegister(key), scale_factor, 0);
+  }
 }
 
 
 void LCodeGen::DoLoadKeyedSpecializedArrayElement(
     LLoadKeyedSpecializedArrayElement* instr) {
-  Register external_pointer = ToRegister(instr->external_pointer());
-  Register key = ToRegister(instr->key());
   ExternalArrayType array_type = instr->array_type();
+  Operand operand(BuildExternalArrayOperand(instr->external_pointer(),
+                                            instr->key(), array_type));
   if (array_type == kExternalFloatArray) {
     XMMRegister result(ToDoubleRegister(instr->result()));
-    __ movss(result, Operand(external_pointer, key, times_4, 0));
+    __ movss(result, operand);
     __ cvtss2sd(result, result);
+  } else if (array_type == kExternalDoubleArray) {
+    __ movdbl(ToDoubleRegister(instr->result()), operand);
   } else {
     Register result(ToRegister(instr->result()));
     switch (array_type) {
       case kExternalByteArray:
-        __ movsx_b(result, Operand(external_pointer, key, times_1, 0));
+        __ movsx_b(result, operand);
         break;
       case kExternalUnsignedByteArray:
       case kExternalPixelArray:
-        __ movzx_b(result, Operand(external_pointer, key, times_1, 0));
+        __ movzx_b(result, operand);
         break;
       case kExternalShortArray:
-        __ movsx_w(result, Operand(external_pointer, key, times_2, 0));
+        __ movsx_w(result, operand);
         break;
       case kExternalUnsignedShortArray:
-        __ movzx_w(result, Operand(external_pointer, key, times_2, 0));
+        __ movzx_w(result, operand);
         break;
       case kExternalIntArray:
-        __ mov(result, Operand(external_pointer, key, times_4, 0));
+        __ mov(result, operand);
         break;
       case kExternalUnsignedIntArray:
-        __ mov(result, Operand(external_pointer, key, times_4, 0));
+        __ mov(result, operand);
         __ test(result, Operand(result));
         // TODO(danno): we could be more clever here, perhaps having a special
         // version of the stub that detects if the overflow case actually
@@ -2369,6 +2530,7 @@
         DeoptimizeIf(negative, instr->environment());
         break;
       case kExternalFloatArray:
+      case kExternalDoubleArray:
         UNREACHABLE();
         break;
     }
@@ -2390,16 +2552,16 @@
   Register result = ToRegister(instr->result());
 
   // Check for arguments adapter frame.
-  NearLabel done, adapted;
+  Label done, adapted;
   __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
   __ cmp(Operand(result),
          Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(equal, &adapted);
+  __ j(equal, &adapted, Label::kNear);
 
   // No arguments adaptor frame.
   __ mov(result, Operand(ebp));
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   // Arguments adaptor frame present.
   __ bind(&adapted);
@@ -2415,12 +2577,12 @@
   Operand elem = ToOperand(instr->InputAt(0));
   Register result = ToRegister(instr->result());
 
-  NearLabel done;
+  Label done;
 
   // If no arguments adaptor frame the number of arguments is fixed.
   __ cmp(ebp, elem);
   __ mov(result, Immediate(scope()->num_parameters()));
-  __ j(equal, &done);
+  __ j(equal, &done, Label::kNear);
 
   // Arguments adaptor frame present. Get argument length from there.
   __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -2443,20 +2605,23 @@
   ASSERT(function.is(edi));  // Required by InvokeFunction.
   ASSERT(ToRegister(instr->result()).is(eax));
 
+  // TODO(1412): This is not correct if the called function is a
+  // strict mode function or a native.
+  //
   // If the receiver is null or undefined, we have to pass the global object
   // as a receiver.
-  NearLabel global_object, receiver_ok;
+  Label global_object, receiver_ok;
   __ cmp(receiver, factory()->null_value());
-  __ j(equal, &global_object);
+  __ j(equal, &global_object, Label::kNear);
   __ cmp(receiver, factory()->undefined_value());
-  __ j(equal, &global_object);
+  __ j(equal, &global_object, Label::kNear);
 
   // The receiver should be a JS object.
   __ test(receiver, Immediate(kSmiTagMask));
   DeoptimizeIf(equal, instr->environment());
   __ CmpObjectType(receiver, FIRST_JS_OBJECT_TYPE, scratch);
   DeoptimizeIf(below, instr->environment());
-  __ jmp(&receiver_ok);
+  __ jmp(&receiver_ok, Label::kNear);
 
   __ bind(&global_object);
   // TODO(kmillikin): We have a hydrogen value for the global object.  See
@@ -2464,6 +2629,8 @@
   // here.
   __ mov(receiver, Operand(ebp, StandardFrameConstants::kContextOffset));
   __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_INDEX));
+  __ mov(receiver,
+         FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
   __ bind(&receiver_ok);
 
   // Copy the arguments to this function possibly from the
@@ -2477,10 +2644,10 @@
 
   // Loop through the arguments pushing them onto the execution
   // stack.
-  NearLabel invoke, loop;
+  Label invoke, loop;
   // length is a small non-negative integer, due to the test above.
   __ test(length, Operand(length));
-  __ j(zero, &invoke);
+  __ j(zero, &invoke, Label::kNear);
   __ bind(&loop);
   __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
   __ dec(length);
@@ -2496,8 +2663,9 @@
   SafepointGenerator safepoint_generator(this,
                                          pointers,
                                          env->deoptimization_index());
-  v8::internal::ParameterCount actual(eax);
-  __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
+  ParameterCount actual(eax);
+  __ InvokeFunction(function, actual, CALL_FUNCTION,
+                    safepoint_generator, CALL_AS_METHOD);
 }
 
 
@@ -2541,7 +2709,8 @@
 
 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
                                  int arity,
-                                 LInstruction* instr) {
+                                 LInstruction* instr,
+                                 CallKind call_kind) {
   // Change context if needed.
   bool change_context =
       (info()->closure()->context() != function->context()) ||
@@ -2563,6 +2732,7 @@
   RecordPosition(pointers->position());
 
   // Invoke function.
+  __ SetCallKind(ecx, call_kind);
   if (*function == *info()->closure()) {
     __ CallSelf();
   } else {
@@ -2577,7 +2747,10 @@
 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
   ASSERT(ToRegister(instr->result()).is(eax));
   __ mov(edi, instr->function());
-  CallKnownFunction(instr->function(), instr->arity(), instr);
+  CallKnownFunction(instr->function(),
+                    instr->arity(),
+                    instr,
+                    CALL_AS_METHOD);
 }
 
 
@@ -2665,7 +2838,7 @@
   if (r.IsDouble()) {
     XMMRegister  scratch = xmm0;
     XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
-    __ pxor(scratch, scratch);
+    __ xorps(scratch, scratch);
     __ subsd(scratch, input_reg);
     __ pand(input_reg, scratch);
   } else if (r.IsInteger32()) {
@@ -2687,7 +2860,7 @@
   XMMRegister xmm_scratch = xmm0;
   Register output_reg = ToRegister(instr->result());
   XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
-  __ xorpd(xmm_scratch, xmm_scratch);  // Zero the register.
+  __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
   __ ucomisd(input_reg, xmm_scratch);
 
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -2710,25 +2883,16 @@
   Register output_reg = ToRegister(instr->result());
   XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
 
+  Label below_half, done;
   // xmm_scratch = 0.5
   ExternalReference one_half = ExternalReference::address_of_one_half();
   __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
 
+  __ ucomisd(xmm_scratch, input_reg);
+  __ j(above, &below_half);
   // input = input + 0.5
   __ addsd(input_reg, xmm_scratch);
 
-  // We need to return -0 for the input range [-0.5, 0[, otherwise
-  // compute Math.floor(value + 0.5).
-  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    __ ucomisd(input_reg, xmm_scratch);
-    DeoptimizeIf(below_equal, instr->environment());
-  } else {
-    // If we don't need to bailout on -0, we check only bailout
-    // on negative inputs.
-    __ xorpd(xmm_scratch, xmm_scratch);  // Zero the register.
-    __ ucomisd(input_reg, xmm_scratch);
-    DeoptimizeIf(below, instr->environment());
-  }
 
   // Compute Math.floor(value + 0.5).
   // Use truncating instruction (OK because input is positive).
@@ -2737,6 +2901,27 @@
   // Overflow is signalled with minint.
   __ cmp(output_reg, 0x80000000u);
   DeoptimizeIf(equal, instr->environment());
+  __ jmp(&done);
+
+  __ bind(&below_half);
+
+  // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
+  // we can ignore the difference between a result of -0 and +0.
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // If the sign is positive, we return +0.
+    __ movmskpd(output_reg, input_reg);
+    __ test(output_reg, Immediate(1));
+    DeoptimizeIf(not_zero, instr->environment());
+  } else {
+    // If the input is >= -0.5, we return +0.
+    __ mov(output_reg, Immediate(0xBF000000));
+    __ movd(xmm_scratch, Operand(output_reg));
+    __ cvtss2sd(xmm_scratch, xmm_scratch);
+    __ ucomisd(input_reg, xmm_scratch);
+    DeoptimizeIf(below, instr->environment());
+  }
+  __ Set(output_reg, Immediate(0));
+  __ bind(&done);
 }
 
 
@@ -2751,7 +2936,7 @@
   XMMRegister xmm_scratch = xmm0;
   XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
   ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
-  __ xorpd(xmm_scratch, xmm_scratch);
+  __ xorps(xmm_scratch, xmm_scratch);
   __ addsd(input_reg, xmm_scratch);  // Convert -0 to +0.
   __ sqrtsd(input_reg, input_reg);
 }
@@ -2820,20 +3005,20 @@
 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
   ASSERT(instr->InputAt(0)->Equals(instr->result()));
   XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
-  NearLabel positive, done, zero, negative;
-  __ xorpd(xmm0, xmm0);
+  Label positive, done, zero;
+  __ xorps(xmm0, xmm0);
   __ ucomisd(input_reg, xmm0);
-  __ j(above, &positive);
-  __ j(equal, &zero);
+  __ j(above, &positive, Label::kNear);
+  __ j(equal, &zero, Label::kNear);
   ExternalReference nan = ExternalReference::address_of_nan();
   __ movdbl(input_reg, Operand::StaticVariable(nan));
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(&zero);
   __ push(Immediate(0xFFF00000));
   __ push(Immediate(0));
   __ movdbl(input_reg, Operand(esp, 0));
   __ add(Operand(esp), Immediate(kDoubleSize));
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(&positive);
   __ fldln2();
   __ sub(Operand(esp), Immediate(kDoubleSize));
@@ -2896,6 +3081,21 @@
 }
 
 
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  ASSERT(ToRegister(instr->context()).is(esi));
+  ASSERT(ToRegister(instr->function()).is(edi));
+  ASSERT(instr->HasPointerMap());
+  ASSERT(instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  LEnvironment* env = instr->deoptimization_environment();
+  RecordPosition(pointers->position());
+  RegisterEnvironmentForDeoptimization(env);
+  SafepointGenerator generator(this, pointers, env->deoptimization_index());
+  ParameterCount count(instr->arity());
+  __ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+}
+
+
 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
   ASSERT(ToRegister(instr->context()).is(esi));
   ASSERT(ToRegister(instr->key()).is(ecx));
@@ -2913,10 +3113,11 @@
   ASSERT(ToRegister(instr->result()).is(eax));
 
   int arity = instr->arity();
-  Handle<Code> ic = isolate()->stub_cache()->
-      ComputeCallInitialize(arity, NOT_IN_LOOP);
+  RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode);
   __ mov(ecx, instr->name());
-  CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+  CallCode(ic, mode, instr, CONTEXT_ADJUSTED);
 }
 
 
@@ -2925,7 +3126,7 @@
   ASSERT(ToRegister(instr->result()).is(eax));
 
   int arity = instr->arity();
-  CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
+  CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_IMPLICIT);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
   __ Drop(1);
 }
@@ -2936,17 +3137,18 @@
   ASSERT(ToRegister(instr->result()).is(eax));
 
   int arity = instr->arity();
-  Handle<Code> ic = isolate()->stub_cache()->
-      ComputeCallInitialize(arity, NOT_IN_LOOP);
+  RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode);
   __ mov(ecx, instr->name());
-  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr, CONTEXT_ADJUSTED);
+  CallCode(ic, mode, instr, CONTEXT_ADJUSTED);
 }
 
 
 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
   ASSERT(ToRegister(instr->result()).is(eax));
   __ mov(edi, instr->target());
-  CallKnownFunction(instr->target(), instr->arity(), instr);
+  CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
 }
 
 
@@ -3017,46 +3219,32 @@
 
 void LCodeGen::DoStoreKeyedSpecializedArrayElement(
     LStoreKeyedSpecializedArrayElement* instr) {
-  Register external_pointer = ToRegister(instr->external_pointer());
-  Register key = ToRegister(instr->key());
   ExternalArrayType array_type = instr->array_type();
+  Operand operand(BuildExternalArrayOperand(instr->external_pointer(),
+                                            instr->key(), array_type));
   if (array_type == kExternalFloatArray) {
     __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
-    __ movss(Operand(external_pointer, key, times_4, 0), xmm0);
+    __ movss(operand, xmm0);
+  } else if (array_type == kExternalDoubleArray) {
+    __ movdbl(operand, ToDoubleRegister(instr->value()));
   } else {
     Register value = ToRegister(instr->value());
     switch (array_type) {
-      case kExternalPixelArray: {
-        // Clamp the value to [0..255].
-        Register temp = ToRegister(instr->TempAt(0));
-        // The dec_b below requires that the clamped value is in a byte
-        // register. eax is an arbitrary choice to satisfy this requirement, we
-        // hinted the register allocator to give us eax when building the
-        // instruction.
-        ASSERT(temp.is(eax));
-        __ mov(temp, ToRegister(instr->value()));
-        NearLabel done;
-        __ test(temp, Immediate(0xFFFFFF00));
-        __ j(zero, &done);
-        __ setcc(negative, temp);  // 1 if negative, 0 if positive.
-        __ dec_b(temp);  // 0 if negative, 255 if positive.
-        __ bind(&done);
-        __ mov_b(Operand(external_pointer, key, times_1, 0), temp);
-        break;
-      }
+      case kExternalPixelArray:
       case kExternalByteArray:
       case kExternalUnsignedByteArray:
-        __ mov_b(Operand(external_pointer, key, times_1, 0), value);
+        __ mov_b(operand, value);
         break;
       case kExternalShortArray:
       case kExternalUnsignedShortArray:
-        __ mov_w(Operand(external_pointer, key, times_2, 0), value);
+        __ mov_w(operand, value);
         break;
       case kExternalIntArray:
       case kExternalUnsignedIntArray:
-        __ mov(Operand(external_pointer, key, times_4, 0), value);
+        __ mov(operand, value);
         break;
       case kExternalFloatArray:
+      case kExternalDoubleArray:
         UNREACHABLE();
         break;
     }
@@ -3143,7 +3331,7 @@
   DeferredStringCharCodeAt* deferred =
       new DeferredStringCharCodeAt(this, instr);
 
-  NearLabel flat_string, ascii_string, done;
+  Label flat_string, ascii_string, done;
 
   // Fetch the instance type of the receiver into result register.
   __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
@@ -3152,7 +3340,7 @@
   // We need special handling for non-flat strings.
   STATIC_ASSERT(kSeqStringTag == 0);
   __ test(result, Immediate(kStringRepresentationMask));
-  __ j(zero, &flat_string);
+  __ j(zero, &flat_string, Label::kNear);
 
   // Handle non-flat strings.
   __ test(result, Immediate(kIsConsStringMask));
@@ -3179,7 +3367,7 @@
   __ bind(&flat_string);
   STATIC_ASSERT(kAsciiStringTag != 0);
   __ test(result, Immediate(kStringEncodingMask));
-  __ j(not_zero, &ascii_string);
+  __ j(not_zero, &ascii_string, Label::kNear);
 
   // Two-byte string.
   // Load the two-byte character code into the result register.
@@ -3195,7 +3383,7 @@
                                     times_2,
                                     SeqTwoByteString::kHeaderSize));
   }
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   // ASCII string.
   // Load the byte into the result register.
@@ -3299,6 +3487,22 @@
 }
 
 
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+  if (instr->left()->IsConstantOperand()) {
+    __ push(ToImmediate(instr->left()));
+  } else {
+    __ push(ToOperand(instr->left()));
+  }
+  if (instr->right()->IsConstantOperand()) {
+    __ push(ToImmediate(instr->right()));
+  } else {
+    __ push(ToOperand(instr->right()));
+  }
+  StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+}
+
+
 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister() || input->IsStackSlot());
@@ -3340,13 +3544,13 @@
   // There was overflow, so bits 30 and 31 of the original integer
   // disagree. Try to allocate a heap number in new space and store
   // the value in there. If that fails, call the runtime system.
-  NearLabel done;
+  Label done;
   __ SmiUntag(reg);
   __ xor_(reg, 0x80000000);
   __ cvtsi2sd(xmm0, Operand(reg));
   if (FLAG_inline_new) {
     __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
-    __ jmp(&done);
+    __ jmp(&done, Label::kNear);
   }
 
   // Slow case: Call the runtime system to do the number allocation.
@@ -3429,11 +3633,11 @@
                                 XMMRegister result_reg,
                                 bool deoptimize_on_undefined,
                                 LEnvironment* env) {
-  NearLabel load_smi, done;
+  Label load_smi, done;
 
   // Smi check.
   __ test(input_reg, Immediate(kSmiTagMask));
-  __ j(zero, &load_smi, not_taken);
+  __ j(zero, &load_smi, Label::kNear);
 
   // Heap number map check.
   __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
@@ -3441,21 +3645,22 @@
   if (deoptimize_on_undefined) {
     DeoptimizeIf(not_equal, env);
   } else {
-    NearLabel heap_number;
-    __ j(equal, &heap_number);
+    Label heap_number;
+    __ j(equal, &heap_number, Label::kNear);
+
     __ cmp(input_reg, factory()->undefined_value());
     DeoptimizeIf(not_equal, env);
 
     // Convert undefined to NaN.
     ExternalReference nan = ExternalReference::address_of_nan();
     __ movdbl(result_reg, Operand::StaticVariable(nan));
-    __ jmp(&done);
+    __ jmp(&done, Label::kNear);
 
     __ bind(&heap_number);
   }
   // Heap number to XMM conversion.
   __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   // Smi to XMM conversion
   __ bind(&load_smi);
@@ -3477,7 +3682,7 @@
 
 
 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
-  NearLabel done, heap_number;
+  Label done, heap_number;
   Register input_reg = ToRegister(instr->InputAt(0));
 
   // Heap number map check.
@@ -3485,18 +3690,18 @@
          factory()->heap_number_map());
 
   if (instr->truncating()) {
-    __ j(equal, &heap_number);
+    __ j(equal, &heap_number, Label::kNear);
     // Check for undefined. Undefined is converted to zero for truncating
     // conversions.
     __ cmp(input_reg, factory()->undefined_value());
     DeoptimizeIf(not_equal, instr->environment());
     __ mov(input_reg, 0);
-    __ jmp(&done);
+    __ jmp(&done, Label::kNear);
 
     __ bind(&heap_number);
     if (CpuFeatures::IsSupported(SSE3)) {
       CpuFeatures::Scope scope(SSE3);
-      NearLabel convert;
+      Label convert;
       // Use more powerful conversion when sse3 is available.
       // Load x87 register with heap number.
       __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
@@ -3506,7 +3711,7 @@
       const uint32_t kTooBigExponent =
           (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
       __ cmp(Operand(input_reg), Immediate(kTooBigExponent));
-      __ j(less, &convert);
+      __ j(less, &convert, Label::kNear);
       // Pop FPU stack before deoptimizing.
       __ ffree(0);
       __ fincstp();
@@ -3520,7 +3725,6 @@
       __ mov(input_reg, Operand(esp, 0));  // Low word of answer is the result.
       __ add(Operand(esp), Immediate(kDoubleSize));
     } else {
-      NearLabel deopt;
       XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
       __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
       __ cvttsd2si(input_reg, Operand(xmm0));
@@ -3609,8 +3813,8 @@
     if (CpuFeatures::IsSupported(SSE3)) {
       // This will deoptimize if the exponent of the input in out of range.
       CpuFeatures::Scope scope(SSE3);
-      NearLabel convert, done;
-      __ j(not_equal, &done);
+      Label convert, done;
+      __ j(not_equal, &done, Label::kNear);
       __ sub(Operand(esp), Immediate(kDoubleSize));
       __ movdbl(Operand(esp, 0), input_reg);
       // Get exponent alone and check for too-big exponent.
@@ -3619,7 +3823,7 @@
       const uint32_t kTooBigExponent =
           (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
       __ cmp(Operand(result_reg), Immediate(kTooBigExponent));
-      __ j(less, &convert);
+      __ j(less, &convert, Label::kNear);
       __ add(Operand(esp), Immediate(kDoubleSize));
       DeoptimizeIf(no_condition, instr->environment());
       __ bind(&convert);
@@ -3630,13 +3834,13 @@
       __ add(Operand(esp), Immediate(kDoubleSize));
       __ bind(&done);
     } else {
-      NearLabel done;
+      Label done;
       Register temp_reg = ToRegister(instr->TempAt(0));
       XMMRegister xmm_scratch = xmm0;
 
       // If cvttsd2si succeeded, we're done. Otherwise, we attempt
       // manual conversion.
-      __ j(not_equal, &done);
+      __ j(not_equal, &done, Label::kNear);
 
       // Get high 32 bits of the input in result_reg and temp_reg.
       __ pshufd(xmm_scratch, input_reg, 1);
@@ -3686,7 +3890,7 @@
       __ bind(&done);
     }
   } else {
-    NearLabel done;
+    Label done;
     __ cvttsd2si(result_reg, Operand(input_reg));
     __ cvtsi2sd(xmm0, Operand(result_reg));
     __ ucomisd(xmm0, input_reg);
@@ -3696,7 +3900,7 @@
       // The integer converted back is equal to the original. We
       // only have to test if we got -0 as an input.
       __ test(result_reg, Operand(result_reg));
-      __ j(not_zero, &done);
+      __ j(not_zero, &done, Label::kNear);
       __ movmskpd(result_reg, input_reg);
       // Bit 0 contains the sign of the double in input_reg.
       // If input was positive, we are ok and return 0, otherwise
@@ -3726,29 +3930,43 @@
 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
   Register input = ToRegister(instr->InputAt(0));
   Register temp = ToRegister(instr->TempAt(0));
-  InstanceType first = instr->hydrogen()->first();
-  InstanceType last = instr->hydrogen()->last();
 
   __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
 
-  // If there is only one type in the interval check for equality.
-  if (first == last) {
+  if (instr->hydrogen()->is_interval_check()) {
+    InstanceType first;
+    InstanceType last;
+    instr->hydrogen()->GetCheckInterval(&first, &last);
+
     __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
             static_cast<int8_t>(first));
-    DeoptimizeIf(not_equal, instr->environment());
-  } else if (first == FIRST_STRING_TYPE && last == LAST_STRING_TYPE) {
-    // String has a dedicated bit in instance type.
-    __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), kIsNotStringMask);
-    DeoptimizeIf(not_zero, instr->environment());
-  } else  {
-    __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
-            static_cast<int8_t>(first));
-    DeoptimizeIf(below, instr->environment());
-    // Omit check for the last type.
-    if (last != LAST_TYPE) {
-      __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
-              static_cast<int8_t>(last));
-      DeoptimizeIf(above, instr->environment());
+
+    // If there is only one type in the interval check for equality.
+    if (first == last) {
+      DeoptimizeIf(not_equal, instr->environment());
+    } else {
+      DeoptimizeIf(below, instr->environment());
+      // Omit check for the last type.
+      if (last != LAST_TYPE) {
+        __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
+                static_cast<int8_t>(last));
+        DeoptimizeIf(above, instr->environment());
+      }
+    }
+  } else {
+    uint8_t mask;
+    uint8_t tag;
+    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+    if (IsPowerOf2(mask)) {
+      ASSERT(tag == 0 || IsPowerOf2(tag));
+      __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
+      DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
+    } else {
+      __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
+      __ and_(temp, mask);
+      __ cmpb(Operand(temp), tag);
+      DeoptimizeIf(not_equal, instr->environment());
     }
   }
 }
@@ -3772,6 +3990,54 @@
 }
 
 
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+  XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+  ASSERT(instr->unclamped()->Equals(instr->result()));
+  Register value_reg = ToRegister(instr->result());
+  __ ClampUint8(value_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+  ASSERT(instr->unclamped()->Equals(instr->result()));
+  Register input_reg = ToRegister(instr->unclamped());
+  Label is_smi, done, heap_number;
+
+  __ JumpIfSmi(input_reg, &is_smi);
+
+  // Check for heap number
+  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+         factory()->heap_number_map());
+  __ j(equal, &heap_number, Label::kNear);
+
+  // Check for undefined. Undefined is converted to zero for clamping
+  // conversions.
+  __ cmp(input_reg, factory()->undefined_value());
+  DeoptimizeIf(not_equal, instr->environment());
+  __ mov(input_reg, 0);
+  __ jmp(&done, Label::kNear);
+
+  // Heap number
+  __ bind(&heap_number);
+  __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+  __ ClampDoubleToUint8(xmm0, xmm1, input_reg);
+  __ jmp(&done, Label::kNear);
+
+  // smi
+  __ bind(&is_smi);
+  __ SmiUntag(input_reg);
+  __ ClampUint8(input_reg);
+
+  __ bind(&done);
+}
+
+
 void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
   if (isolate()->heap()->InNewSpace(*object)) {
     Handle<JSGlobalPropertyCell> cell =
@@ -3873,7 +4139,7 @@
 
 
 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
-  NearLabel materialized;
+  Label materialized;
   // Registers will be used as follows:
   // edi = JS function.
   // ecx = literals array.
@@ -3885,7 +4151,7 @@
       instr->hydrogen()->literal_index() * kPointerSize;
   __ mov(ebx, FieldOperand(ecx, literal_offset));
   __ cmp(ebx, factory()->undefined_value());
-  __ j(not_equal, &materialized);
+  __ j(not_equal, &materialized, Label::kNear);
 
   // Create regexp literal using runtime function
   // Result will be in eax.
@@ -3961,16 +4227,16 @@
   Register result = ToRegister(instr->result());
   Label true_label;
   Label false_label;
-  NearLabel done;
+  Label done;
 
   Condition final_branch_condition = EmitTypeofIs(&true_label,
                                                   &false_label,
                                                   input,
                                                   instr->type_literal());
-  __ j(final_branch_condition, &true_label);
+  __ j(final_branch_condition, &true_label, Label::kNear);
   __ bind(&false_label);
   __ mov(result, factory()->false_value());
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   __ bind(&true_label);
   __ mov(result, factory()->true_value());
@@ -4064,15 +4330,14 @@
 
 void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
   Register result = ToRegister(instr->result());
-  NearLabel true_label;
-  NearLabel false_label;
-  NearLabel done;
+  Label true_label;
+  Label done;
 
   EmitIsConstructCall(result);
-  __ j(equal, &true_label);
+  __ j(equal, &true_label, Label::kNear);
 
   __ mov(result, factory()->false_value());
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   __ bind(&true_label);
   __ mov(result, factory()->true_value());
@@ -4096,10 +4361,10 @@
   __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
 
   // Skip the arguments adaptor frame if it exists.
-  NearLabel check_frame_marker;
+  Label check_frame_marker;
   __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
          Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(not_equal, &check_frame_marker);
+  __ j(not_equal, &check_frame_marker, Label::kNear);
   __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
 
   // Check the marker in the calling frame.
@@ -4142,17 +4407,17 @@
                                          env->deoptimization_index());
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   __ push(Immediate(Smi::FromInt(strict_mode_flag())));
-  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator);
+  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
 }
 
 
 void LCodeGen::DoStackCheck(LStackCheck* instr) {
   // Perform stack overflow check.
-  NearLabel done;
+  Label done;
   ExternalReference stack_limit =
       ExternalReference::address_of_stack_limit(isolate());
   __ cmp(esp, Operand::StaticVariable(stack_limit));
-  __ j(above_equal, &done);
+  __ j(above_equal, &done, Label::kNear);
 
   StackCheckStub stub;
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
@@ -4177,6 +4442,35 @@
 }
 
 
+void LCodeGen::DoIn(LIn* instr) {
+  LOperand* obj = instr->object();
+  LOperand* key = instr->key();
+  if (key->IsConstantOperand()) {
+    __ push(ToImmediate(key));
+  } else {
+    __ push(ToOperand(key));
+  }
+  if (obj->IsConstantOperand()) {
+    __ push(ToImmediate(obj));
+  } else {
+    __ push(ToOperand(obj));
+  }
+  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  LEnvironment* env = instr->deoptimization_environment();
+  RecordPosition(pointers->position());
+  RegisterEnvironmentForDeoptimization(env);
+  // Create safepoint generator that will also ensure enough space in the
+  // reloc info for patching in deoptimization (since this is invoking a
+  // builtin)
+  SafepointGenerator safepoint_generator(this,
+                                         pointers,
+                                         env->deoptimization_index());
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index bdccd3c..1a98d8d 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -105,6 +105,7 @@
 
   // Parallel move support.
   void DoParallelMove(LParallelMove* move);
+  void DoGap(LGap* instr);
 
   // Emit frame translation commands for an environment.
   void WriteTranslation(LEnvironment* environment, Translation* translation);
@@ -147,8 +148,8 @@
                        Register temporary,
                        Register temporary2);
 
-  int StackSlotCount() const { return chunk()->spill_slot_count(); }
-  int ParameterCount() const { return scope()->num_parameters(); }
+  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+  int GetParameterCount() const { return scope()->num_parameters(); }
 
   void Abort(const char* format, ...);
   void Comment(const char* format, ...);
@@ -207,7 +208,8 @@
   // to be in edi.
   void CallKnownFunction(Handle<JSFunction> function,
                          int arity,
-                         LInstruction* instr);
+                         LInstruction* instr,
+                         CallKind call_kind);
 
   void LoadHeapObject(Register result, Handle<HeapObject> object);
 
@@ -228,6 +230,9 @@
   Register ToRegister(int index) const;
   XMMRegister ToDoubleRegister(int index) const;
   int ToInteger32(LConstantOperand* op) const;
+  Operand BuildExternalArrayOperand(LOperand* external_pointer,
+                                    LOperand* key,
+                                    ExternalArrayType array_type);
 
   // Specific math operations - used from DoUnaryMathOperation.
   void EmitIntegerMathAbs(LUnaryMathOperation* instr);
@@ -280,10 +285,10 @@
   // Caller should branch on equal condition.
   void EmitIsConstructCall(Register temp);
 
-  void EmitLoadField(Register result,
-                     Register object,
-                     Handle<Map> type,
-                     Handle<String> name);
+  void EmitLoadFieldOrConstantFunction(Register result,
+                                       Register object,
+                                       Handle<Map> type,
+                                       Handle<String> name);
 
   LChunk* const chunk_;
   MacroAssembler* const masm_;
diff --git a/src/ia32/lithium-gap-resolver-ia32.cc b/src/ia32/lithium-gap-resolver-ia32.cc
index 3d1da40..9d91c61 100644
--- a/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/src/ia32/lithium-gap-resolver-ia32.cc
@@ -309,12 +309,15 @@
     __ mov(dst, src);
 
   } else if (source->IsDoubleRegister()) {
-    ASSERT(destination->IsDoubleRegister() ||
-           destination->IsDoubleStackSlot());
     XMMRegister src = cgen_->ToDoubleRegister(source);
-    Operand dst = cgen_->ToOperand(destination);
-    __ movdbl(dst, src);
-
+    if (destination->IsDoubleRegister()) {
+      XMMRegister dst = cgen_->ToDoubleRegister(destination);
+      __ movaps(dst, src);
+    } else {
+      ASSERT(destination->IsDoubleStackSlot());
+      Operand dst = cgen_->ToOperand(destination);
+      __ movdbl(dst, src);
+    }
   } else if (source->IsDoubleStackSlot()) {
     ASSERT(destination->IsDoubleRegister() ||
            destination->IsDoubleStackSlot());
@@ -391,13 +394,19 @@
       __ mov(dst, tmp1);
       __ mov(src, tmp0);
     }
+  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+    // XMM register-register swap. We rely on having xmm0
+    // available as a fixed scratch register.
+    XMMRegister src = cgen_->ToDoubleRegister(source);
+    XMMRegister dst = cgen_->ToDoubleRegister(destination);
+    __ movaps(xmm0, src);
+    __ movaps(src, dst);
+    __ movaps(dst, xmm0);
 
   } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
-    // XMM register-register or register-memory.  We rely on having xmm0
+    // XMM register-memory swap.  We rely on having xmm0
     // available as a fixed scratch register.
-    ASSERT(source->IsDoubleRegister() || source->IsDoubleStackSlot());
-    ASSERT(destination->IsDoubleRegister() ||
-           destination->IsDoubleStackSlot());
+    ASSERT(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot());
     XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
                                                   ? source
                                                   : destination);
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 4b10562..91606ce 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -71,22 +71,21 @@
 
 #ifdef DEBUG
 void LInstruction::VerifyCall() {
-  // Call instructions can use only fixed registers as
-  // temporaries and outputs because all registers
-  // are blocked by the calling convention.
-  // Inputs must use a fixed register.
+  // Call instructions can use only fixed registers as temporaries and
+  // outputs because all registers are blocked by the calling convention.
+  // Inputs operands must use a fixed register or use-at-start policy or
+  // a non-register policy.
   ASSERT(Output() == NULL ||
          LUnallocated::cast(Output())->HasFixedPolicy() ||
          !LUnallocated::cast(Output())->HasRegisterPolicy());
   for (UseIterator it(this); it.HasNext(); it.Advance()) {
-    LOperand* operand = it.Next();
-    ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
-           !LUnallocated::cast(operand)->HasRegisterPolicy());
+    LUnallocated* operand = LUnallocated::cast(it.Next());
+    ASSERT(operand->HasFixedPolicy() ||
+           operand->IsUsedAtStart());
   }
   for (TempIterator it(this); it.HasNext(); it.Advance()) {
-    LOperand* operand = it.Next();
-    ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
-           !LUnallocated::cast(operand)->HasRegisterPolicy());
+    LUnallocated* operand = LUnallocated::cast(it.Next());
+    ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
   }
 }
 #endif
@@ -240,6 +239,13 @@
 }
 
 
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_undetectable(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
 void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if has_instance_type(");
   InputAt(0)->PrintTo(stream);
@@ -303,6 +309,15 @@
 }
 
 
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(" ");
+  InputAt(1)->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
 void LCallKeyed::PrintDataTo(StringStream* stream) {
   stream->Add("[ecx] #%d / ", arity());
 }
@@ -441,7 +456,7 @@
 
 
 void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
-  LGap* gap = new LGap(block);
+  LInstructionGap* gap = new LInstructionGap(block);
   int index = -1;
   if (instr->IsControl()) {
     instructions_.Add(gap);
@@ -844,24 +859,22 @@
     right = UseFixed(right_value, ecx);
   }
 
-  // Shift operations can only deoptimize if we do a logical shift
-  // by 0 and the result cannot be truncated to int32.
-  bool can_deopt = (op == Token::SHR && constant_value == 0);
-  if (can_deopt) {
-    bool can_truncate = true;
-    for (int i = 0; i < instr->uses()->length(); i++) {
-      if (!instr->uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) {
-        can_truncate = false;
+  // Shift operations can only deoptimize if we do a logical shift by 0 and
+  // the result cannot be truncated to int32.
+  bool may_deopt = (op == Token::SHR && constant_value == 0);
+  bool does_deopt = false;
+  if (may_deopt) {
+    for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+      if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
+        does_deopt = true;
         break;
       }
     }
-    can_deopt = !can_truncate;
   }
 
-  LShiftI* result = new LShiftI(op, left, right, can_deopt);
-  return can_deopt
-      ? AssignEnvironment(DefineSameAsFirst(result))
-      : DefineSameAsFirst(result);
+  LInstruction* result =
+      DefineSameAsFirst(new LShiftI(op, left, right, does_deopt));
+  return does_deopt ? AssignEnvironment(result) : result;
 }
 
 
@@ -1004,6 +1017,8 @@
                                           outer);
   int argument_index = 0;
   for (int i = 0; i < value_count; ++i) {
+    if (hydrogen_env->is_special_index(i)) continue;
+
     HValue* value = hydrogen_env->values()->at(i);
     LOperand* op = NULL;
     if (value->IsArgumentsObject()) {
@@ -1031,106 +1046,102 @@
 
 LInstruction* LChunkBuilder::DoTest(HTest* instr) {
   HValue* v = instr->value();
-  if (v->EmitAtUses()) {
-    if (v->IsClassOfTest()) {
-      HClassOfTest* compare = HClassOfTest::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
-                                       TempRegister(),
-                                       TempRegister());
-    } else if (v->IsCompare()) {
-      HCompare* compare = HCompare::cast(v);
-      Token::Value op = compare->token();
-      HValue* left = compare->left();
-      HValue* right = compare->right();
-      Representation r = compare->GetInputRepresentation();
-      if (r.IsInteger32()) {
-        ASSERT(left->representation().IsInteger32());
-        ASSERT(right->representation().IsInteger32());
-
-        return new LCmpIDAndBranch(UseRegisterAtStart(left),
-                                   UseOrConstantAtStart(right));
-      } else if (r.IsDouble()) {
-        ASSERT(left->representation().IsDouble());
-        ASSERT(right->representation().IsDouble());
-
-        return new LCmpIDAndBranch(UseRegisterAtStart(left),
-                                   UseRegisterAtStart(right));
-      } else {
-        ASSERT(left->representation().IsTagged());
-        ASSERT(right->representation().IsTagged());
-        bool reversed = op == Token::GT || op == Token::LTE;
-        LOperand* left_operand = UseFixed(left, reversed ? eax : edx);
-        LOperand* right_operand = UseFixed(right, reversed ? edx : eax);
-        LCmpTAndBranch* result = new LCmpTAndBranch(left_operand,
-                                                    right_operand);
-        return MarkAsCall(result, instr);
-      }
-    } else if (v->IsIsSmi()) {
-      HIsSmi* compare = HIsSmi::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      return new LIsSmiAndBranch(Use(compare->value()));
-    } else if (v->IsHasInstanceType()) {
-      HHasInstanceType* compare = HHasInstanceType::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()),
-                                           TempRegister());
-    } else if (v->IsHasCachedArrayIndex()) {
-      HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      return new LHasCachedArrayIndexAndBranch(
-          UseRegisterAtStart(compare->value()));
-    } else if (v->IsIsNull()) {
-      HIsNull* compare = HIsNull::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      // We only need a temp register for non-strict compare.
-      LOperand* temp = compare->is_strict() ? NULL : TempRegister();
-      return new LIsNullAndBranch(UseRegisterAtStart(compare->value()),
-                                  temp);
-    } else if (v->IsIsObject()) {
-      HIsObject* compare = HIsObject::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      LOperand* temp1 = TempRegister();
-      LOperand* temp2 = TempRegister();
-      return new LIsObjectAndBranch(UseRegister(compare->value()),
-                                    temp1,
-                                    temp2);
-    } else if (v->IsCompareJSObjectEq()) {
-      HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
-      return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
-                                         UseRegisterAtStart(compare->right()));
-    } else if (v->IsInstanceOf()) {
-      HInstanceOf* instance_of = HInstanceOf::cast(v);
-      LOperand* left = UseFixed(instance_of->left(), InstanceofStub::left());
-      LOperand* right = UseFixed(instance_of->right(), InstanceofStub::right());
-      LOperand* context = UseFixed(instance_of->context(), esi);
-      LInstanceOfAndBranch* result =
-          new LInstanceOfAndBranch(context, left, right);
-      return MarkAsCall(result, instr);
-    } else if (v->IsTypeofIs()) {
-      HTypeofIs* typeof_is = HTypeofIs::cast(v);
-      return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
-    } else if (v->IsIsConstructCall()) {
-      return new LIsConstructCallAndBranch(TempRegister());
+  if (!v->EmitAtUses()) {
+    return new LBranch(UseRegisterAtStart(v));
+  } else if (v->IsClassOfTest()) {
+    HClassOfTest* compare = HClassOfTest::cast(v);
+    ASSERT(compare->value()->representation().IsTagged());
+    return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
+                                     TempRegister(),
+                                     TempRegister());
+  } else if (v->IsCompare()) {
+    HCompare* compare = HCompare::cast(v);
+    Token::Value op = compare->token();
+    HValue* left = compare->left();
+    HValue* right = compare->right();
+    Representation r = compare->GetInputRepresentation();
+    if (r.IsInteger32()) {
+      ASSERT(left->representation().IsInteger32());
+      ASSERT(right->representation().IsInteger32());
+      return new LCmpIDAndBranch(UseRegisterAtStart(left),
+                                 UseOrConstantAtStart(right));
+    } else if (r.IsDouble()) {
+      ASSERT(left->representation().IsDouble());
+      ASSERT(right->representation().IsDouble());
+      return new LCmpIDAndBranch(UseRegisterAtStart(left),
+                                 UseRegisterAtStart(right));
     } else {
-      if (v->IsConstant()) {
-        if (HConstant::cast(v)->ToBoolean()) {
-          return new LGoto(instr->FirstSuccessor()->block_id());
-        } else {
-          return new LGoto(instr->SecondSuccessor()->block_id());
-        }
-      }
-      Abort("Undefined compare before branch");
-      return NULL;
+      ASSERT(left->representation().IsTagged());
+      ASSERT(right->representation().IsTagged());
+      bool reversed = op == Token::GT || op == Token::LTE;
+      LOperand* left_operand = UseFixed(left, reversed ? eax : edx);
+      LOperand* right_operand = UseFixed(right, reversed ? edx : eax);
+      LCmpTAndBranch* result = new LCmpTAndBranch(left_operand, right_operand);
+      return MarkAsCall(result, instr);
     }
+  } else if (v->IsIsSmi()) {
+    HIsSmi* compare = HIsSmi::cast(v);
+    ASSERT(compare->value()->representation().IsTagged());
+    return new LIsSmiAndBranch(Use(compare->value()));
+  } else if (v->IsIsUndetectable()) {
+    HIsUndetectable* compare = HIsUndetectable::cast(v);
+    ASSERT(compare->value()->representation().IsTagged());
+    return new LIsUndetectableAndBranch(UseRegisterAtStart(compare->value()),
+                                        TempRegister());
+  } else if (v->IsHasInstanceType()) {
+    HHasInstanceType* compare = HHasInstanceType::cast(v);
+    ASSERT(compare->value()->representation().IsTagged());
+    return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()),
+                                         TempRegister());
+  } else if (v->IsHasCachedArrayIndex()) {
+    HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
+    ASSERT(compare->value()->representation().IsTagged());
+    return new LHasCachedArrayIndexAndBranch(
+        UseRegisterAtStart(compare->value()));
+  } else if (v->IsIsNull()) {
+    HIsNull* compare = HIsNull::cast(v);
+    ASSERT(compare->value()->representation().IsTagged());
+    // We only need a temp register for non-strict compare.
+    LOperand* temp = compare->is_strict() ? NULL : TempRegister();
+    return new LIsNullAndBranch(UseRegisterAtStart(compare->value()), temp);
+  } else if (v->IsIsObject()) {
+    HIsObject* compare = HIsObject::cast(v);
+    ASSERT(compare->value()->representation().IsTagged());
+    LOperand* temp1 = TempRegister();
+    LOperand* temp2 = TempRegister();
+    return new LIsObjectAndBranch(UseRegister(compare->value()),
+                                  temp1,
+                                  temp2);
+  } else if (v->IsCompareJSObjectEq()) {
+    HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
+    return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
+                                       UseRegisterAtStart(compare->right()));
+  } else if (v->IsCompareSymbolEq()) {
+    HCompareSymbolEq* compare = HCompareSymbolEq::cast(v);
+    return new LCmpSymbolEqAndBranch(UseRegisterAtStart(compare->left()),
+                                     UseRegisterAtStart(compare->right()));
+  } else if (v->IsInstanceOf()) {
+    HInstanceOf* instance_of = HInstanceOf::cast(v);
+    LOperand* left = UseFixed(instance_of->left(), InstanceofStub::left());
+    LOperand* right = UseFixed(instance_of->right(), InstanceofStub::right());
+    LOperand* context = UseFixed(instance_of->context(), esi);
+    LInstanceOfAndBranch* result =
+        new LInstanceOfAndBranch(context, left, right);
+    return MarkAsCall(result, instr);
+  } else if (v->IsTypeofIs()) {
+    HTypeofIs* typeof_is = HTypeofIs::cast(v);
+    return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
+  } else if (v->IsIsConstructCall()) {
+    return new LIsConstructCallAndBranch(TempRegister());
+  } else if (v->IsConstant()) {
+    HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
+        ? instr->FirstSuccessor()
+        : instr->SecondSuccessor();
+    return new LGoto(successor->block_id());
+  } else {
+    Abort("Undefined compare before branch");
+    return NULL;
   }
-  return new LBranch(UseRegisterAtStart(v));
 }
 
 
@@ -1193,7 +1204,7 @@
 
 
 LInstruction* LChunkBuilder::DoContext(HContext* instr) {
-  return DefineAsRegister(new LContext);
+  return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
 }
 
 
@@ -1222,6 +1233,15 @@
 }
 
 
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* function = UseFixed(instr->function(), edi);
+  argument_count_ -= instr->argument_count();
+  LInvokeFunction* result = new LInvokeFunction(context, function);
+  return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
 LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
   BuiltinFunctionId op = instr->op();
   if (op == kMathLog) {
@@ -1523,6 +1543,15 @@
 }
 
 
+LInstruction* LChunkBuilder::DoCompareSymbolEq(
+    HCompareSymbolEq* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  LCmpSymbolEq* result = new LCmpSymbolEq(left, right);
+  return DefineAsRegister(result);
+}
+
+
 LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
@@ -1547,6 +1576,14 @@
 }
 
 
+LInstruction* LChunkBuilder::DoIsUndetectable(HIsUndetectable* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+
+  return DefineAsRegister(new LIsUndetectable(value));
+}
+
+
 LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
@@ -1626,6 +1663,14 @@
 }
 
 
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+  // All HForceRepresentation instructions should be eliminated in the
+  // representation change phase of Hydrogen.
+  UNREACHABLE();
+  return NULL;
+}
+
+
 LInstruction* LChunkBuilder::DoChange(HChange* instr) {
   Representation from = instr->from();
   Representation to = instr->to();
@@ -1727,6 +1772,27 @@
 }
 
 
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+  if (input_rep.IsDouble()) {
+    LOperand* reg = UseRegister(value);
+    return DefineAsRegister(new LClampDToUint8(reg));
+  } else if (input_rep.IsInteger32()) {
+    LOperand* reg = UseFixed(value, eax);
+    return DefineFixed(new LClampIToUint8(reg), eax);
+  } else {
+    ASSERT(input_rep.IsTagged());
+    LOperand* reg = UseFixed(value, eax);
+    // Register allocator doesn't (yet) support allocation of double
+    // temps. Reserve xmm1 explicitly.
+    LOperand* temp = FixedTemp(xmm1);
+    LClampTToUint8* result = new LClampTToUint8(reg, temp);
+    return AssignEnvironment(DefineFixed(result, eax));
+  }
+}
+
+
 LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
   return new LReturn(UseFixed(instr->value(), eax));
 }
@@ -1873,11 +1939,14 @@
     HLoadKeyedSpecializedArrayElement* instr) {
   ExternalArrayType array_type = instr->array_type();
   Representation representation(instr->representation());
-  ASSERT((representation.IsInteger32() && array_type != kExternalFloatArray) ||
-         (representation.IsDouble() && array_type == kExternalFloatArray));
+  ASSERT(
+      (representation.IsInteger32() && (array_type != kExternalFloatArray &&
+                                        array_type != kExternalDoubleArray)) ||
+      (representation.IsDouble() && (array_type == kExternalFloatArray ||
+                                     array_type == kExternalDoubleArray)));
   ASSERT(instr->key()->representation().IsInteger32());
   LOperand* external_pointer = UseRegister(instr->external_pointer());
-  LOperand* key = UseRegister(instr->key());
+  LOperand* key = UseRegisterOrConstant(instr->key());
   LLoadKeyedSpecializedArrayElement* result =
       new LLoadKeyedSpecializedArrayElement(external_pointer,
                                             key);
@@ -1923,25 +1992,20 @@
     HStoreKeyedSpecializedArrayElement* instr) {
   Representation representation(instr->value()->representation());
   ExternalArrayType array_type = instr->array_type();
-  ASSERT((representation.IsInteger32() && array_type != kExternalFloatArray) ||
-         (representation.IsDouble() && array_type == kExternalFloatArray));
+  ASSERT(
+      (representation.IsInteger32() && (array_type != kExternalFloatArray &&
+                                        array_type != kExternalDoubleArray)) ||
+      (representation.IsDouble() && (array_type == kExternalFloatArray ||
+                                     array_type == kExternalDoubleArray)));
   ASSERT(instr->external_pointer()->representation().IsExternal());
   ASSERT(instr->key()->representation().IsInteger32());
 
   LOperand* external_pointer = UseRegister(instr->external_pointer());
-  LOperand* key = UseRegister(instr->key());
-  LOperand* temp = NULL;
-
-  if (array_type == kExternalPixelArray) {
-    // The generated code for pixel array stores requires that the clamped value
-    // is in a byte register. eax is an arbitrary choice to satisfy this
-    // requirement.
-    temp = FixedTemp(eax);
-  }
-
+  LOperand* key = UseRegisterOrConstant(instr->key());
   LOperand* val = NULL;
   if (array_type == kExternalByteArray ||
-      array_type == kExternalUnsignedByteArray) {
+      array_type == kExternalUnsignedByteArray ||
+      array_type == kExternalPixelArray) {
     // We need a byte register in this case for the value.
     val = UseFixed(instr->value(), eax);
   } else {
@@ -1950,8 +2014,7 @@
 
   return new LStoreKeyedSpecializedArrayElement(external_pointer,
                                                 key,
-                                                val,
-                                                temp);
+                                                val);
 }
 
 
@@ -2002,6 +2065,13 @@
 }
 
 
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+  LOperand* left = UseOrConstantAtStart(instr->left());
+  LOperand* right = UseOrConstantAtStart(instr->right());
+  return MarkAsCall(DefineFixed(new LStringAdd(left, right), eax), instr);
+}
+
+
 LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
   LOperand* string = UseRegister(instr->string());
   LOperand* index = UseRegisterOrConstant(instr->index());
@@ -2046,7 +2116,8 @@
 
 LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
   LDeleteProperty* result =
-      new LDeleteProperty(Use(instr->object()), UseOrConstant(instr->key()));
+      new LDeleteProperty(UseAtStart(instr->object()),
+                          UseOrConstantAtStart(instr->key()));
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -2165,8 +2236,9 @@
   HConstant* undefined = graph()->GetConstantUndefined();
   HEnvironment* inner = outer->CopyForInlining(instr->closure(),
                                                instr->function(),
-                                               false,
-                                               undefined);
+                                               HEnvironment::LITHIUM,
+                                               undefined,
+                                               instr->call_kind());
   current_block_->UpdateEnvironment(inner);
   chunk_->AddInlinedClosure(instr->closure());
   return NULL;
@@ -2180,6 +2252,14 @@
 }
 
 
+LInstruction* LChunkBuilder::DoIn(HIn* instr) {
+  LOperand* key = UseOrConstantAtStart(instr->key());
+  LOperand* object = UseOrConstantAtStart(instr->object());
+  LIn* result = new LIn(key, object);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index be5658b..979c494 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -39,12 +39,6 @@
 // Forward declarations.
 class LCodeGen;
 
-#define LITHIUM_ALL_INSTRUCTION_LIST(V)         \
-  V(ControlInstruction)                         \
-  V(Call)                                       \
-  LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
-
-
 #define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
   V(AccessArgumentsAt)                          \
   V(AddI)                                       \
@@ -73,12 +67,17 @@
   V(CheckNonSmi)                                \
   V(CheckPrototypeMaps)                         \
   V(CheckSmi)                                   \
+  V(ClampDToUint8)                              \
+  V(ClampIToUint8)                              \
+  V(ClampTToUint8)                              \
   V(ClassOfTest)                                \
   V(ClassOfTestAndBranch)                       \
   V(CmpID)                                      \
   V(CmpIDAndBranch)                             \
   V(CmpJSObjectEq)                              \
   V(CmpJSObjectEqAndBranch)                     \
+  V(CmpSymbolEq)                                \
+  V(CmpSymbolEqAndBranch)                       \
   V(CmpMapAndBranch)                            \
   V(CmpT)                                       \
   V(CmpTAndBranch)                              \
@@ -93,7 +92,6 @@
   V(ExternalArrayLength)                        \
   V(FixedArrayLength)                           \
   V(FunctionLiteral)                            \
-  V(Gap)                                        \
   V(GetCachedArrayIndex)                        \
   V(GlobalObject)                               \
   V(GlobalReceiver)                             \
@@ -102,18 +100,23 @@
   V(HasCachedArrayIndexAndBranch)               \
   V(HasInstanceType)                            \
   V(HasInstanceTypeAndBranch)                   \
+  V(In)                                         \
   V(InstanceOf)                                 \
   V(InstanceOfAndBranch)                        \
   V(InstanceOfKnownGlobal)                      \
+  V(InstructionGap)                             \
   V(Integer32ToDouble)                          \
+  V(InvokeFunction)                             \
+  V(IsConstructCall)                            \
+  V(IsConstructCallAndBranch)                   \
   V(IsNull)                                     \
   V(IsNullAndBranch)                            \
   V(IsObject)                                   \
   V(IsObjectAndBranch)                          \
   V(IsSmi)                                      \
   V(IsSmiAndBranch)                             \
-  V(IsConstructCall)                            \
-  V(IsConstructCallAndBranch)                   \
+  V(IsUndetectable)                             \
+  V(IsUndetectableAndBranch)                    \
   V(JSArrayLength)                              \
   V(Label)                                      \
   V(LazyBailout)                                \
@@ -154,6 +157,7 @@
   V(StoreKeyedSpecializedArrayElement)          \
   V(StoreNamedField)                            \
   V(StoreNamedGeneric)                          \
+  V(StringAdd)                                  \
   V(StringCharCodeAt)                           \
   V(StringCharFromCode)                         \
   V(StringLength)                               \
@@ -169,20 +173,16 @@
   V(ValueOf)
 
 
-#define DECLARE_INSTRUCTION(type)                \
-  virtual bool Is##type() const { return true; } \
-  static L##type* cast(LInstruction* instr) {    \
-    ASSERT(instr->Is##type());                   \
-    return reinterpret_cast<L##type*>(instr);    \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
+  virtual Opcode opcode() const { return LInstruction::k##type; } \
+  virtual void CompileToNative(LCodeGen* generator);              \
+  virtual const char* Mnemonic() const { return mnemonic; }       \
+  static L##type* cast(LInstruction* instr) {                     \
+    ASSERT(instr->Is##type());                                    \
+    return reinterpret_cast<L##type*>(instr);                     \
   }
 
 
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)        \
-  virtual void CompileToNative(LCodeGen* generator);        \
-  virtual const char* Mnemonic() const { return mnemonic; } \
-  DECLARE_INSTRUCTION(type)
-
-
 #define DECLARE_HYDROGEN_ACCESSOR(type)     \
   H##type* hydrogen() const {               \
     return H##type::cast(hydrogen_value()); \
@@ -204,10 +204,25 @@
   virtual void PrintDataTo(StringStream* stream) = 0;
   virtual void PrintOutputOperandTo(StringStream* stream) = 0;
 
-  // Declare virtual type testers.
-#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
-  LITHIUM_ALL_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
+  enum Opcode {
+    // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+    kNumberOfInstructions
+#undef DECLARE_OPCODE
+  };
+
+  virtual Opcode opcode() const = 0;
+
+  // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+  bool Is##type() const { return opcode() == k##type; }
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+  // Declare virtual predicates for instructions that don't have
+  // an opcode.
+  virtual bool IsGap() const { return false; }
 
   virtual bool IsControl() const { return false; }
   virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
@@ -327,16 +342,20 @@
 
 class LGap: public LTemplateInstruction<0, 0, 0> {
  public:
-  explicit LGap(HBasicBlock* block)
-      : block_(block) {
+  explicit LGap(HBasicBlock* block) : block_(block) {
     parallel_moves_[BEFORE] = NULL;
     parallel_moves_[START] = NULL;
     parallel_moves_[END] = NULL;
     parallel_moves_[AFTER] = NULL;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Gap, "gap")
+  // Can't use the DECLARE-macro here because of sub-classes.
+  virtual bool IsGap() const { return true; }
   virtual void PrintDataTo(StringStream* stream);
+  static LGap* cast(LInstruction* instr) {
+    ASSERT(instr->IsGap());
+    return reinterpret_cast<LGap*>(instr);
+  }
 
   bool IsRedundant() const;
 
@@ -366,6 +385,14 @@
 };
 
 
+class LInstructionGap: public LGap {
+ public:
+  explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
 class LGoto: public LTemplateInstruction<0, 0, 0> {
  public:
   LGoto(int block_id, bool include_stack_check = false)
@@ -460,7 +487,6 @@
 template<int I, int T>
 class LControlInstruction: public LTemplateInstruction<0, I, T> {
  public:
-  DECLARE_INSTRUCTION(ControlInstruction)
   virtual bool IsControl() const { return true; }
 
   int true_block_id() const { return true_block_id_; }
@@ -647,6 +673,28 @@
 };
 
 
+class LCmpSymbolEq: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LCmpSymbolEq(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEq, "cmp-symbol-eq")
+};
+
+
+class LCmpSymbolEqAndBranch: public LControlInstruction<2, 0> {
+ public:
+  LCmpSymbolEqAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEqAndBranch, "cmp-symbol-eq-and-branch")
+};
+
+
 class LIsNull: public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LIsNull(LOperand* value) {
@@ -724,6 +772,31 @@
 };
 
 
+class LIsUndetectable: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LIsUndetectable(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsUndetectable, "is-undetectable")
+  DECLARE_HYDROGEN_ACCESSOR(IsUndetectable)
+};
+
+
+class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+ public:
+  explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+                               "is-undetectable-and-branch")
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
 class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LHasInstanceType(LOperand* value) {
@@ -1130,6 +1203,7 @@
 
   Token::Value op() const { return op_; }
 
+  virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
   virtual void CompileToNative(LCodeGen* generator);
   virtual const char* Mnemonic() const;
 
@@ -1146,6 +1220,7 @@
     inputs_[1] = right;
   }
 
+  virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
   virtual void CompileToNative(LCodeGen* generator);
   virtual const char* Mnemonic() const;
 
@@ -1450,6 +1525,25 @@
 };
 
 
+class LInvokeFunction: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInvokeFunction(LOperand* context, LOperand* function) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+  DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
 class LCallKeyed: public LTemplateInstruction<1, 2, 0> {
  public:
   LCallKeyed(LOperand* context, LOperand* key) {
@@ -1720,16 +1814,14 @@
 };
 
 
-class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 1> {
+class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
  public:
   LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
                                      LOperand* key,
-                                     LOperand* val,
-                                     LOperand* temp) {
+                                     LOperand* val) {
     inputs_[0] = external_pointer;
     inputs_[1] = key;
     inputs_[2] = val;
-    temps_[0] = temp;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
@@ -1770,6 +1862,21 @@
 };
 
 
+class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LStringAdd(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+  DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+};
+
+
 class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
  public:
   LStringCharCodeAt(LOperand* string, LOperand* index) {
@@ -1869,6 +1976,43 @@
 };
 
 
+class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampDToUint8(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampIToUint8(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LClampTToUint8(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
 class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LCheckNonSmi(LOperand* value) {
@@ -2009,6 +2153,20 @@
 };
 
 
+class LIn: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LIn(LOperand* key, LOperand* object) {
+    inputs_[0] = key;
+    inputs_[1] = object;
+  }
+
+  LOperand* key() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(In, "in")
+};
+
+
 class LChunkBuilder;
 class LChunk: public ZoneObject {
  public:
@@ -2232,7 +2390,6 @@
 };
 
 #undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_INSTRUCTION
 #undef DECLARE_CONCRETE_INSTRUCTION
 
 } }  // namespace v8::internal
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 13394cb..6e66b6e 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -73,7 +73,69 @@
   shr(addr, Page::kRegionSizeLog2);
 
   // Set dirty mark for region.
-  bts(Operand(object, Page::kDirtyFlagOffset), addr);
+  // Bit tests with a memory operand should be avoided on Intel processors,
+  // as they usually have long latency and multiple uops. We load the bit base
+  // operand to a register at first and store it back after bit set.
+  mov(scratch, Operand(object, Page::kDirtyFlagOffset));
+  bts(Operand(scratch), addr);
+  mov(Operand(object, Page::kDirtyFlagOffset), scratch);
+}
+
+
+void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
+                                        XMMRegister scratch_reg,
+                                        Register result_reg) {
+  Label done;
+  ExternalReference zero_ref = ExternalReference::address_of_zero();
+  movdbl(scratch_reg, Operand::StaticVariable(zero_ref));
+  Set(result_reg, Immediate(0));
+  ucomisd(input_reg, scratch_reg);
+  j(below, &done, Label::kNear);
+  ExternalReference half_ref = ExternalReference::address_of_one_half();
+  movdbl(scratch_reg, Operand::StaticVariable(half_ref));
+  addsd(scratch_reg, input_reg);
+  cvttsd2si(result_reg, Operand(scratch_reg));
+  test(result_reg, Immediate(0xFFFFFF00));
+  j(zero, &done, Label::kNear);
+  Set(result_reg, Immediate(255));
+  bind(&done);
+}
+
+
+void MacroAssembler::ClampUint8(Register reg) {
+  Label done;
+  test(reg, Immediate(0xFFFFFF00));
+  j(zero, &done, Label::kNear);
+  setcc(negative, reg);  // 1 if negative, 0 if positive.
+  dec_b(reg);  // 0 if negative, 255 if positive.
+  bind(&done);
+}
+
+
+void MacroAssembler::InNewSpace(Register object,
+                                Register scratch,
+                                Condition cc,
+                                Label* branch,
+                                Label::Distance branch_near) {
+  ASSERT(cc == equal || cc == not_equal);
+  if (Serializer::enabled()) {
+    // Can't do arithmetic on external references if it might get serialized.
+    mov(scratch, Operand(object));
+    // The mask isn't really an address.  We load it as an external reference in
+    // case the size of the new space is different between the snapshot maker
+    // and the running system.
+    and_(Operand(scratch),
+         Immediate(ExternalReference::new_space_mask(isolate())));
+    cmp(Operand(scratch),
+        Immediate(ExternalReference::new_space_start(isolate())));
+    j(cc, branch, branch_near);
+  } else {
+    int32_t new_space_start = reinterpret_cast<int32_t>(
+        ExternalReference::new_space_start(isolate()).address());
+    lea(scratch, Operand(object, -new_space_start));
+    and_(scratch, isolate()->heap()->NewSpaceMask());
+    j(cc, branch, branch_near);
+  }
 }
 
 
@@ -83,14 +145,14 @@
                                  Register scratch) {
   // First, check if a write barrier is even needed. The tests below
   // catch stores of Smis and stores into young gen.
-  NearLabel done;
+  Label done;
 
   // Skip barrier if writing a smi.
   ASSERT_EQ(0, kSmiTag);
   test(value, Immediate(kSmiTagMask));
-  j(zero, &done);
+  j(zero, &done, Label::kNear);
 
-  InNewSpace(object, value, equal, &done);
+  InNewSpace(object, value, equal, &done, Label::kNear);
 
   // The offset is relative to a tagged or untagged HeapObject pointer,
   // so either offset or offset + kHeapObjectTag must be a
@@ -220,16 +282,30 @@
 void MacroAssembler::CheckMap(Register obj,
                               Handle<Map> map,
                               Label* fail,
-                              bool is_heap_object) {
-  if (!is_heap_object) {
-    test(obj, Immediate(kSmiTagMask));
-    j(zero, fail);
+                              SmiCheckType smi_check_type) {
+  if (smi_check_type == DO_SMI_CHECK) {
+    JumpIfSmi(obj, fail);
   }
   cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
   j(not_equal, fail);
 }
 
 
+void MacroAssembler::DispatchMap(Register obj,
+                                 Handle<Map> map,
+                                 Handle<Code> success,
+                                 SmiCheckType smi_check_type) {
+  Label fail;
+  if (smi_check_type == DO_SMI_CHECK) {
+    JumpIfSmi(obj, &fail);
+  }
+  cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
+  j(equal, success);
+
+  bind(&fail);
+}
+
+
 Condition MacroAssembler::IsObjectStringType(Register heap_object,
                                              Register map,
                                              Register instance_type) {
@@ -511,9 +587,9 @@
   // not NULL.  The frame pointer is NULL in the exception handler of
   // a JS entry frame.
   Set(esi, Immediate(0));  // Tentatively set context pointer to NULL.
-  NearLabel skip;
+  Label skip;
   cmp(ebp, 0);
-  j(equal, &skip, not_taken);
+  j(equal, &skip, Label::kNear);
   mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   bind(&skip);
 
@@ -538,12 +614,12 @@
   mov(esp, Operand::StaticVariable(handler_address));
 
   // Unwind the handlers until the ENTRY handler is found.
-  NearLabel loop, done;
+  Label loop, done;
   bind(&loop);
   // Load the type of the current stack handler.
   const int kStateOffset = StackHandlerConstants::kStateOffset;
   cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY));
-  j(equal, &done);
+  j(equal, &done, Label::kNear);
   // Fetch the next handler in the list.
   const int kNextOffset = StackHandlerConstants::kNextOffset;
   mov(esp, Operand(esp, kNextOffset));
@@ -614,7 +690,7 @@
 
   // Check if both contexts are the same.
   cmp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
-  j(equal, &same_contexts, taken);
+  j(equal, &same_contexts);
 
   // Compare security tokens, save holder_reg on the stack so we can use it
   // as a temporary register.
@@ -644,7 +720,7 @@
   mov(scratch, FieldOperand(scratch, token_offset));
   cmp(scratch, FieldOperand(holder_reg, token_offset));
   pop(holder_reg);
-  j(not_equal, miss, not_taken);
+  j(not_equal, miss);
 
   bind(&same_contexts);
 }
@@ -732,9 +808,9 @@
     mov(top_reg, result);
   }
   add(Operand(top_reg), Immediate(object_size));
-  j(carry, gc_required, not_taken);
+  j(carry, gc_required);
   cmp(top_reg, Operand::StaticVariable(new_space_allocation_limit));
-  j(above, gc_required, not_taken);
+  j(above, gc_required);
 
   // Update allocation top.
   UpdateAllocationTopHelper(top_reg, scratch);
@@ -831,9 +907,9 @@
     mov(result_end, object_size);
   }
   add(result_end, Operand(result));
-  j(carry, gc_required, not_taken);
+  j(carry, gc_required);
   cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
-  j(above, gc_required, not_taken);
+  j(above, gc_required);
 
   // Tag result if requested.
   if ((flags & TAG_OBJECT) != 0) {
@@ -1062,9 +1138,9 @@
                                       Label* then_label) {
   Label ok;
   test(result, Operand(result));
-  j(not_zero, &ok, taken);
+  j(not_zero, &ok);
   test(op, Operand(op));
-  j(sign, then_label, not_taken);
+  j(sign, then_label);
   bind(&ok);
 }
 
@@ -1076,10 +1152,10 @@
                                       Label* then_label) {
   Label ok;
   test(result, Operand(result));
-  j(not_zero, &ok, taken);
+  j(not_zero, &ok);
   mov(scratch, Operand(op1));
   or_(scratch, Operand(op2));
-  j(sign, then_label, not_taken);
+  j(sign, then_label);
   bind(&ok);
 }
 
@@ -1090,17 +1166,17 @@
                                              Label* miss) {
   // Check that the receiver isn't a smi.
   test(function, Immediate(kSmiTagMask));
-  j(zero, miss, not_taken);
+  j(zero, miss);
 
   // Check that the function really is a function.
   CmpObjectType(function, JS_FUNCTION_TYPE, result);
-  j(not_equal, miss, not_taken);
+  j(not_equal, miss);
 
   // Make sure that the function has an instance prototype.
   Label non_instance;
   movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
   test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
-  j(not_zero, &non_instance, not_taken);
+  j(not_zero, &non_instance);
 
   // Get the prototype or initial map from the function.
   mov(result,
@@ -1110,7 +1186,7 @@
   // simply miss the cache instead. This will allow us to allocate a
   // prototype object on-demand in the runtime system.
   cmp(Operand(result), Immediate(isolate()->factory()->the_hole_value()));
-  j(equal, miss, not_taken);
+  j(equal, miss);
 
   // If the function does not have an initial map, we're done.
   Label done;
@@ -1131,9 +1207,9 @@
 }
 
 
-void MacroAssembler::CallStub(CodeStub* stub) {
+void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
   ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
-  call(stub->GetCode(), RelocInfo::CODE_TARGET);
+  call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
 }
 
 
@@ -1391,7 +1467,7 @@
 
   // Check if the result handle holds 0.
   test(eax, Operand(eax));
-  j(zero, &empty_handle, not_taken);
+  j(zero, &empty_handle);
   // It was non-zero.  Dereference to get the result value.
   mov(eax, Operand(eax, 0));
   bind(&prologue);
@@ -1401,7 +1477,7 @@
   sub(Operand::StaticVariable(level_address), Immediate(1));
   Assert(above_equal, "Invalid HandleScope level");
   cmp(edi, Operand::StaticVariable(limit_address));
-  j(not_equal, &delete_allocated_handles, not_taken);
+  j(not_equal, &delete_allocated_handles);
   bind(&leave_exit_frame);
 
   // Check if the function scheduled an exception.
@@ -1409,7 +1485,7 @@
       ExternalReference::scheduled_exception_address(isolate());
   cmp(Operand::StaticVariable(scheduled_exception_address),
       Immediate(isolate()->factory()->the_hole_value()));
-  j(not_equal, &promote_scheduled_exception, not_taken);
+  j(not_equal, &promote_scheduled_exception);
   LeaveApiExitFrame();
   ret(stack_space * kPointerSize);
   bind(&promote_scheduled_exception);
@@ -1456,13 +1532,32 @@
 }
 
 
+void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
+  // This macro takes the dst register to make the code more readable
+  // at the call sites. However, the dst register has to be ecx to
+  // follow the calling convention which requires the call type to be
+  // in ecx.
+  ASSERT(dst.is(ecx));
+  if (call_kind == CALL_AS_FUNCTION) {
+    // Set to some non-zero smi by updating the least significant
+    // byte.
+    mov_b(Operand(dst), 1 << kSmiTagSize);
+  } else {
+    // Set to smi zero by clearing the register.
+    xor_(dst, Operand(dst));
+  }
+}
+
+
 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
                                     const ParameterCount& actual,
                                     Handle<Code> code_constant,
                                     const Operand& code_operand,
-                                    NearLabel* done,
+                                    Label* done,
                                     InvokeFlag flag,
-                                    PostCallGenerator* post_call_generator) {
+                                    Label::Distance done_near,
+                                    const CallWrapper& call_wrapper,
+                                    CallKind call_kind) {
   bool definitely_matches = false;
   Label invoke;
   if (expected.is_immediate()) {
@@ -1512,10 +1607,13 @@
     }
 
     if (flag == CALL_FUNCTION) {
+      call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
+      SetCallKind(ecx, call_kind);
       call(adaptor, RelocInfo::CODE_TARGET);
-      if (post_call_generator != NULL) post_call_generator->Generate();
-      jmp(done);
+      call_wrapper.AfterCall();
+      jmp(done, done_near);
     } else {
+      SetCallKind(ecx, call_kind);
       jmp(adaptor, RelocInfo::CODE_TARGET);
     }
     bind(&invoke);
@@ -1527,15 +1625,20 @@
                                 const ParameterCount& expected,
                                 const ParameterCount& actual,
                                 InvokeFlag flag,
-                                PostCallGenerator* post_call_generator) {
-  NearLabel done;
+                                const CallWrapper& call_wrapper,
+                                CallKind call_kind) {
+  Label done;
   InvokePrologue(expected, actual, Handle<Code>::null(), code,
-                 &done, flag, post_call_generator);
+                 &done, flag, Label::kNear, call_wrapper,
+                 call_kind);
   if (flag == CALL_FUNCTION) {
+    call_wrapper.BeforeCall(CallSize(code));
+    SetCallKind(ecx, call_kind);
     call(code);
-    if (post_call_generator != NULL) post_call_generator->Generate();
+    call_wrapper.AfterCall();
   } else {
     ASSERT(flag == JUMP_FUNCTION);
+    SetCallKind(ecx, call_kind);
     jmp(code);
   }
   bind(&done);
@@ -1547,16 +1650,20 @@
                                 const ParameterCount& actual,
                                 RelocInfo::Mode rmode,
                                 InvokeFlag flag,
-                                PostCallGenerator* post_call_generator) {
-  NearLabel done;
+                                const CallWrapper& call_wrapper,
+                                CallKind call_kind) {
+  Label done;
   Operand dummy(eax);
-  InvokePrologue(expected, actual, code, dummy, &done,
-                 flag, post_call_generator);
+  InvokePrologue(expected, actual, code, dummy, &done, flag, Label::kNear,
+                 call_wrapper, call_kind);
   if (flag == CALL_FUNCTION) {
+    call_wrapper.BeforeCall(CallSize(code, rmode));
+    SetCallKind(ecx, call_kind);
     call(code, rmode);
-    if (post_call_generator != NULL) post_call_generator->Generate();
+    call_wrapper.AfterCall();
   } else {
     ASSERT(flag == JUMP_FUNCTION);
+    SetCallKind(ecx, call_kind);
     jmp(code, rmode);
   }
   bind(&done);
@@ -1566,7 +1673,8 @@
 void MacroAssembler::InvokeFunction(Register fun,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
-                                    PostCallGenerator* post_call_generator) {
+                                    const CallWrapper& call_wrapper,
+                                    CallKind call_kind) {
   ASSERT(fun.is(edi));
   mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
   mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
@@ -1575,14 +1683,15 @@
 
   ParameterCount expected(ebx);
   InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
-             expected, actual, flag, post_call_generator);
+             expected, actual, flag, call_wrapper, call_kind);
 }
 
 
 void MacroAssembler::InvokeFunction(JSFunction* function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
-                                    PostCallGenerator* post_call_generator) {
+                                    const CallWrapper& call_wrapper,
+                                    CallKind call_kind) {
   ASSERT(function->is_compiled());
   // Get the function and setup the context.
   mov(edi, Immediate(Handle<JSFunction>(function)));
@@ -1594,18 +1703,18 @@
     // code field in the function to allow recompilation to take effect
     // without changing any of the call sites.
     InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
-               expected, actual, flag, post_call_generator);
+               expected, actual, flag, call_wrapper, call_kind);
   } else {
     Handle<Code> code(function->code());
     InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET,
-               flag, post_call_generator);
+               flag, call_wrapper, call_kind);
   }
 }
 
 
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeFlag flag,
-                                   PostCallGenerator* post_call_generator) {
+                                   const CallWrapper& call_wrapper) {
   // Calls are not allowed in some stubs.
   ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
 
@@ -1615,7 +1724,7 @@
   ParameterCount expected(0);
   GetBuiltinFunction(edi, id);
   InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
-             expected, expected, flag, post_call_generator);
+             expected, expected, flag, call_wrapper, CALL_AS_METHOD);
 }
 
 void MacroAssembler::GetBuiltinFunction(Register target,
@@ -1681,7 +1790,7 @@
   mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   if (emit_debug_code()) {
     Label ok, fail;
-    CheckMap(map, isolate()->factory()->meta_map(), &fail, false);
+    CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
     jmp(&ok);
     bind(&fail);
     Abort("Global functions must have initial map");
@@ -1845,7 +1954,7 @@
 
 void MacroAssembler::Check(Condition cc, const char* msg) {
   Label L;
-  j(cc, &L, taken);
+  j(cc, &L);
   Abort(msg);
   // will not return here
   bind(&L);
@@ -1894,56 +2003,14 @@
 }
 
 
-void MacroAssembler::JumpIfNotNumber(Register reg,
-                                     TypeInfo info,
-                                     Label* on_not_number) {
-  if (emit_debug_code()) AbortIfSmi(reg);
-  if (!info.IsNumber()) {
-    cmp(FieldOperand(reg, HeapObject::kMapOffset),
-        isolate()->factory()->heap_number_map());
-    j(not_equal, on_not_number);
-  }
-}
-
-
-void MacroAssembler::ConvertToInt32(Register dst,
-                                    Register source,
-                                    Register scratch,
-                                    TypeInfo info,
-                                    Label* on_not_int32) {
-  if (emit_debug_code()) {
-    AbortIfSmi(source);
-    AbortIfNotNumber(source);
-  }
-  if (info.IsInteger32()) {
-    cvttsd2si(dst, FieldOperand(source, HeapNumber::kValueOffset));
-  } else {
-    Label done;
-    bool push_pop = (scratch.is(no_reg) && dst.is(source));
-    ASSERT(!scratch.is(source));
-    if (push_pop) {
-      push(dst);
-      scratch = dst;
-    }
-    if (scratch.is(no_reg)) scratch = dst;
-    cvttsd2si(scratch, FieldOperand(source, HeapNumber::kValueOffset));
-    cmp(scratch, 0x80000000u);
-    if (push_pop) {
-      j(not_equal, &done);
-      pop(dst);
-      jmp(on_not_int32);
-    } else {
-      j(equal, on_not_int32);
-    }
-
-    bind(&done);
-    if (push_pop) {
-      add(Operand(esp), Immediate(kPointerSize));  // Pop.
-    }
-    if (!scratch.is(dst)) {
-      mov(dst, scratch);
-    }
-  }
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+                                             Register descriptors) {
+  mov(descriptors,
+      FieldOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
+  Label not_smi;
+  JumpIfNotSmi(descriptors, &not_smi);
+  mov(descriptors, isolate()->factory()->empty_descriptor_array());
+  bind(&not_smi);
 }
 
 
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index b986264..2ab98c5 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -29,7 +29,7 @@
 #define V8_IA32_MACRO_ASSEMBLER_IA32_H_
 
 #include "assembler.h"
-#include "type-info.h"
+#include "v8globals.h"
 
 namespace v8 {
 namespace internal {
@@ -45,13 +45,11 @@
   RESULT_CONTAINS_TOP = 1 << 1
 };
 
+
 // Convenience for platform-independent signatures.  We do not normally
 // distinguish memory operands from other operands on ia32.
 typedef Operand MemOperand;
 
-// Forward declaration.
-class PostCallGenerator;
-
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
@@ -73,11 +71,11 @@
 
   // Check if object is in new space.
   // scratch can be object itself, but it will be clobbered.
-  template <typename LabelType>
   void InNewSpace(Register object,
                   Register scratch,
                   Condition cc,  // equal for new space, not_equal otherwise.
-                  LabelType* branch);
+                  Label* branch,
+                  Label::Distance branch_near = Label::kFar);
 
   // For page containing |object| mark region covering [object+offset]
   // dirty. |object| is the object being stored into, |value| is the
@@ -155,37 +153,46 @@
   // ---------------------------------------------------------------------------
   // JavaScript invokes
 
+  // Setup call kind marking in ecx. The method takes ecx as an
+  // explicit first parameter to make the code more readable at the
+  // call sites.
+  void SetCallKind(Register dst, CallKind kind);
+
   // Invoke the JavaScript function code by either calling or jumping.
   void InvokeCode(const Operand& code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
                   InvokeFlag flag,
-                  PostCallGenerator* post_call_generator = NULL);
+                  const CallWrapper& call_wrapper,
+                  CallKind call_kind);
 
   void InvokeCode(Handle<Code> code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
                   RelocInfo::Mode rmode,
                   InvokeFlag flag,
-                  PostCallGenerator* post_call_generator = NULL);
+                  const CallWrapper& call_wrapper,
+                  CallKind call_kind);
 
   // Invoke the JavaScript function in the given register. Changes the
   // current context to the context in the function before invoking.
   void InvokeFunction(Register function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
-                      PostCallGenerator* post_call_generator = NULL);
+                      const CallWrapper& call_wrapper,
+                      CallKind call_kind);
 
   void InvokeFunction(JSFunction* function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
-                      PostCallGenerator* post_call_generator = NULL);
+                      const CallWrapper& call_wrapper,
+                      CallKind call_kind);
 
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
   void InvokeBuiltin(Builtins::JavaScript id,
                      InvokeFlag flag,
-                     PostCallGenerator* post_call_generator = NULL);
+                     const CallWrapper& call_wrapper = NullCallWrapper());
 
   // Store the function for the given builtin in the target register.
   void GetBuiltinFunction(Register target, Builtins::JavaScript id);
@@ -209,13 +216,21 @@
   // Compare instance type for map.
   void CmpInstanceType(Register map, InstanceType type);
 
-  // Check if the map of an object is equal to a specified map and
-  // branch to label if not. Skip the smi check if not required
-  // (object is known to be a heap object)
+  // Check if the map of an object is equal to a specified map and branch to
+  // label if not. Skip the smi check if not required (object is known to be a
+  // heap object)
   void CheckMap(Register obj,
                 Handle<Map> map,
                 Label* fail,
-                bool is_heap_object);
+                SmiCheckType smi_check_type);
+
+  // Check if the map of an object is equal to a specified map and branch to a
+  // specified target if equal. Skip the smi check if not required (object is
+  // known to be a heap object)
+  void DispatchMap(Register obj,
+                   Handle<Map> map,
+                   Handle<Code> success,
+                   SmiCheckType smi_check_type);
 
   // Check if the object in register heap_object is a string. Afterwards the
   // register map contains the object map and the register instance_type
@@ -242,6 +257,13 @@
   // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
   void FCmp();
 
+  void ClampUint8(Register reg);
+
+  void ClampDoubleToUint8(XMMRegister input_reg,
+                          XMMRegister scratch_reg,
+                          Register result_reg);
+
+
   // Smi tagging support.
   void SmiTag(Register reg) {
     ASSERT(kSmiTag == 0);
@@ -253,16 +275,6 @@
   }
 
   // Modifies the register even if it does not contain a Smi!
-  void SmiUntag(Register reg, TypeInfo info, Label* non_smi) {
-    ASSERT(kSmiTagSize == 1);
-    sar(reg, kSmiTagSize);
-    if (info.IsSmi()) {
-      ASSERT(kSmiTag == 0);
-      j(carry, non_smi);
-    }
-  }
-
-  // Modifies the register even if it does not contain a Smi!
   void SmiUntag(Register reg, Label* is_smi) {
     ASSERT(kSmiTagSize == 1);
     sar(reg, kSmiTagSize);
@@ -273,24 +285,15 @@
   // Jump the register contains a smi.
   inline void JumpIfSmi(Register value, Label* smi_label) {
     test(value, Immediate(kSmiTagMask));
-    j(zero, smi_label, not_taken);
+    j(zero, smi_label);
   }
   // Jump if register contain a non-smi.
   inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
     test(value, Immediate(kSmiTagMask));
-    j(not_zero, not_smi_label, not_taken);
+    j(not_zero, not_smi_label);
   }
 
-  // Assumes input is a heap object.
-  void JumpIfNotNumber(Register reg, TypeInfo info, Label* on_not_number);
-
-  // Assumes input is a heap number.  Jumps on things out of range.  Also jumps
-  // on the min negative int32.  Ignores frational parts.
-  void ConvertToInt32(Register dst,
-                      Register src,      // Can be the same as dst.
-                      Register scratch,  // Can be no_reg or dst, but not src.
-                      TypeInfo info,
-                      Label* on_not_int32);
+  void LoadInstanceDescriptors(Register map, Register descriptors);
 
   void LoadPowerOf2(XMMRegister dst, Register scratch, int power);
 
@@ -457,7 +460,7 @@
   // Runtime calls
 
   // Call a code stub.  Generate the code if necessary.
-  void CallStub(CodeStub* stub);
+  void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
 
   // Call a code stub and return the code object called.  Try to generate
   // the code if necessary.  Do not perform a GC but instead return a retry
@@ -655,9 +658,11 @@
                       const ParameterCount& actual,
                       Handle<Code> code_constant,
                       const Operand& code_operand,
-                      NearLabel* done,
+                      Label* done,
                       InvokeFlag flag,
-                      PostCallGenerator* post_call_generator = NULL);
+                      Label::Distance done_near = Label::kFar,
+                      const CallWrapper& call_wrapper = NullCallWrapper(),
+                      CallKind call_kind = CALL_AS_METHOD);
 
   // Activation support.
   void EnterFrame(StackFrame::Type type);
@@ -692,33 +697,6 @@
 };
 
 
-template <typename LabelType>
-void MacroAssembler::InNewSpace(Register object,
-                                Register scratch,
-                                Condition cc,
-                                LabelType* branch) {
-  ASSERT(cc == equal || cc == not_equal);
-  if (Serializer::enabled()) {
-    // Can't do arithmetic on external references if it might get serialized.
-    mov(scratch, Operand(object));
-    // The mask isn't really an address.  We load it as an external reference in
-    // case the size of the new space is different between the snapshot maker
-    // and the running system.
-    and_(Operand(scratch),
-         Immediate(ExternalReference::new_space_mask(isolate())));
-    cmp(Operand(scratch),
-        Immediate(ExternalReference::new_space_start(isolate())));
-    j(cc, branch);
-  } else {
-    int32_t new_space_start = reinterpret_cast<int32_t>(
-        ExternalReference::new_space_start(isolate()).address());
-    lea(scratch, Operand(object, -new_space_start));
-    and_(scratch, isolate()->heap()->NewSpaceMask());
-    j(cc, branch);
-  }
-}
-
-
 // The code patcher is used to patch (typically) small parts of code e.g. for
 // debugging and other types of instrumentation. When using the code patcher
 // the exact number of bytes specified must be emitted. Is not legal to emit
@@ -739,17 +717,6 @@
 };
 
 
-// Helper class for generating code or data associated with the code
-// right after a call instruction. As an example this can be used to
-// generate safepoint data after calls for crankshaft.
-class PostCallGenerator {
- public:
-  PostCallGenerator() { }
-  virtual ~PostCallGenerator() { }
-  virtual void Generate() = 0;
-};
-
-
 // -----------------------------------------------------------------------------
 // Static helper functions.
 
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index 5b2f208..8db2e9b 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -305,7 +305,7 @@
   // The length of a capture should not be negative. This can only happen
   // if the end of the capture is unrecorded, or at a point earlier than
   // the start of the capture.
-  BranchOrBacktrack(less, on_no_match, not_taken);
+  BranchOrBacktrack(less, on_no_match);
 
   // If length is zero, either the capture is empty or it is completely
   // uncaptured. In either case succeed immediately.
@@ -348,7 +348,7 @@
     __ add(Operand(edi), Immediate(1));
     // Compare to end of match, and loop if not done.
     __ cmp(edi, Operand(ebx));
-    __ j(below, &loop, taken);
+    __ j(below, &loop);
     __ jmp(&success);
 
     __ bind(&fail);
@@ -687,11 +687,11 @@
   __ mov(ecx, esp);
   __ sub(ecx, Operand::StaticVariable(stack_limit));
   // Handle it if the stack pointer is already below the stack limit.
-  __ j(below_equal, &stack_limit_hit, not_taken);
+  __ j(below_equal, &stack_limit_hit);
   // Check if there is room for the variable number of registers above
   // the stack limit.
   __ cmp(ecx, num_registers_ * kPointerSize);
-  __ j(above_equal, &stack_ok, taken);
+  __ j(above_equal, &stack_ok);
   // Exit with OutOfMemory exception. There is not enough space on the stack
   // for our working registers.
   __ mov(eax, EXCEPTION);
@@ -971,9 +971,9 @@
 }
 
 void RegExpMacroAssemblerIA32::SetCurrentPositionFromEnd(int by)  {
-  NearLabel after_position;
+  Label after_position;
   __ cmp(edi, -by * char_size());
-  __ j(greater_equal, &after_position);
+  __ j(greater_equal, &after_position, Label::kNear);
   __ mov(edi, -by * char_size());
   // On RegExp code entry (where this operation is used), the character before
   // the current position is expected to be already loaded.
@@ -1142,8 +1142,7 @@
 
 
 void RegExpMacroAssemblerIA32::BranchOrBacktrack(Condition condition,
-                                                 Label* to,
-                                                 Hint hint) {
+                                                 Label* to) {
   if (condition < 0) {  // No condition
     if (to == NULL) {
       Backtrack();
@@ -1153,10 +1152,10 @@
     return;
   }
   if (to == NULL) {
-    __ j(condition, &backtrack_label_, hint);
+    __ j(condition, &backtrack_label_);
     return;
   }
-  __ j(condition, to, hint);
+  __ j(condition, to);
 }
 
 
@@ -1209,7 +1208,7 @@
   ExternalReference stack_limit =
       ExternalReference::address_of_stack_limit(masm_->isolate());
   __ cmp(esp, Operand::StaticVariable(stack_limit));
-  __ j(above, &no_preempt, taken);
+  __ j(above, &no_preempt);
 
   SafeCall(&check_preempt_label_);
 
diff --git a/src/ia32/regexp-macro-assembler-ia32.h b/src/ia32/regexp-macro-assembler-ia32.h
index 70606da..21c86d0 100644
--- a/src/ia32/regexp-macro-assembler-ia32.h
+++ b/src/ia32/regexp-macro-assembler-ia32.h
@@ -168,7 +168,7 @@
 
   // Equivalent to a conditional branch to the label, unless the label
   // is NULL, in which case it is a conditional Backtrack.
-  void BranchOrBacktrack(Condition condition, Label* to, Hint hint = no_hint);
+  void BranchOrBacktrack(Condition condition, Label* to);
 
   // Call and return internally in the generated code in a way that
   // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
diff --git a/src/ia32/simulator-ia32.h b/src/ia32/simulator-ia32.h
index cb660cd..13ddf35 100644
--- a/src/ia32/simulator-ia32.h
+++ b/src/ia32/simulator-ia32.h
@@ -56,7 +56,9 @@
 // just use the C stack limit.
 class SimulatorStack : public v8::internal::AllStatic {
  public:
-  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+  static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
+                                            uintptr_t c_limit) {
+    USE(isolate);
     return c_limit;
   }
 
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 27d2886..550a6ff 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -57,7 +57,7 @@
 
     // Check that the key in the entry matches the name.
     __ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
-    __ j(not_equal, &miss, not_taken);
+    __ j(not_equal, &miss);
 
     // Check that the flags match what we're looking for.
     __ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
@@ -76,7 +76,7 @@
 
     // Check that the key in the entry matches the name.
     __ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
-    __ j(not_equal, &miss, not_taken);
+    __ j(not_equal, &miss);
 
     // Get the code entry from the cache.
     __ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
@@ -107,18 +107,17 @@
 // must always call a backup property check that is complete.
 // This function is safe to call if the receiver has fast properties.
 // Name must be a symbol and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
-                                             Label* miss_label,
-                                             Register receiver,
-                                             String* name,
-                                             Register r0,
-                                             Register r1) {
+static MaybeObject* GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+                                                     Label* miss_label,
+                                                     Register receiver,
+                                                     String* name,
+                                                     Register r0,
+                                                     Register r1) {
   ASSERT(name->IsSymbol());
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->negative_lookups(), 1);
   __ IncrementCounter(counters->negative_lookups_miss(), 1);
 
-  Label done;
   __ mov(r0, FieldOperand(receiver, HeapObject::kMapOffset));
 
   const int kInterceptorOrAccessCheckNeededMask =
@@ -127,11 +126,11 @@
   // Bail out if the receiver has a named interceptor or requires access checks.
   __ test_b(FieldOperand(r0, Map::kBitFieldOffset),
             kInterceptorOrAccessCheckNeededMask);
-  __ j(not_zero, miss_label, not_taken);
+  __ j(not_zero, miss_label);
 
   // Check that receiver is a JSObject.
   __ CmpInstanceType(r0, FIRST_JS_OBJECT_TYPE);
-  __ j(below, miss_label, not_taken);
+  __ j(below, miss_label);
 
   // Load properties array.
   Register properties = r0;
@@ -142,64 +141,20 @@
          Immediate(masm->isolate()->factory()->hash_table_map()));
   __ j(not_equal, miss_label);
 
-  // Compute the capacity mask.
-  const int kCapacityOffset =
-      StringDictionary::kHeaderSize +
-      StringDictionary::kCapacityIndex * kPointerSize;
-
-  // Generate an unrolled loop that performs a few probes before
-  // giving up.
-  static const int kProbes = 4;
-  const int kElementsStartOffset =
-      StringDictionary::kHeaderSize +
-      StringDictionary::kElementsStartIndex * kPointerSize;
-
-  // If names of slots in range from 1 to kProbes - 1 for the hash value are
-  // not equal to the name and kProbes-th slot is not used (its name is the
-  // undefined value), it guarantees the hash table doesn't contain the
-  // property. It's true even if some slots represent deleted properties
-  // (their names are the null value).
-  for (int i = 0; i < kProbes; i++) {
-    // r0 points to properties hash.
-    // Compute the masked index: (hash + i + i * i) & mask.
-    Register index = r1;
-    // Capacity is smi 2^n.
-    __ mov(index, FieldOperand(properties, kCapacityOffset));
-    __ dec(index);
-    __ and_(Operand(index),
-            Immediate(Smi::FromInt(name->Hash() +
-                                   StringDictionary::GetProbeOffset(i))));
-
-    // Scale the index by multiplying by the entry size.
-    ASSERT(StringDictionary::kEntrySize == 3);
-    __ lea(index, Operand(index, index, times_2, 0));  // index *= 3.
-
-    Register entity_name = r1;
-    // Having undefined at this place means the name is not contained.
-    ASSERT_EQ(kSmiTagSize, 1);
-    __ mov(entity_name, Operand(properties, index, times_half_pointer_size,
-                                kElementsStartOffset - kHeapObjectTag));
-    __ cmp(entity_name, masm->isolate()->factory()->undefined_value());
-    if (i != kProbes - 1) {
-      __ j(equal, &done, taken);
-
-      // Stop if found the property.
-      __ cmp(entity_name, Handle<String>(name));
-      __ j(equal, miss_label, not_taken);
-
-      // Check if the entry name is not a symbol.
-      __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
-      __ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
-                kIsSymbolMask);
-      __ j(zero, miss_label, not_taken);
-    } else {
-      // Give up probing if still not found the undefined value.
-      __ j(not_equal, miss_label, not_taken);
-    }
-  }
+  Label done;
+  MaybeObject* result =
+      StringDictionaryLookupStub::GenerateNegativeLookup(masm,
+                                                         miss_label,
+                                                         &done,
+                                                         properties,
+                                                         name,
+                                                         r1);
+  if (result->IsFailure()) return result;
 
   __ bind(&done);
   __ DecrementCounter(counters->negative_lookups_miss(), 1);
+
+  return result;
 }
 
 
@@ -234,7 +189,7 @@
 
   // Check that the receiver isn't a smi.
   __ test(receiver, Immediate(kSmiTagMask));
-  __ j(zero, &miss, not_taken);
+  __ j(zero, &miss);
 
   // Get the map of the receiver and compute the hash.
   __ mov(scratch, FieldOperand(name, String::kHashFieldOffset));
@@ -295,11 +250,11 @@
                                            Label* miss_label) {
   // Check that the receiver isn't a smi.
   __ test(receiver, Immediate(kSmiTagMask));
-  __ j(zero, miss_label, not_taken);
+  __ j(zero, miss_label);
 
   // Check that the object is a JS array.
   __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
-  __ j(not_equal, miss_label, not_taken);
+  __ j(not_equal, miss_label);
 
   // Load length directly from the JS array.
   __ mov(eax, FieldOperand(receiver, JSArray::kLengthOffset));
@@ -316,14 +271,14 @@
                                 Label* non_string_object) {
   // Check that the object isn't a smi.
   __ test(receiver, Immediate(kSmiTagMask));
-  __ j(zero, smi, not_taken);
+  __ j(zero, smi);
 
   // Check that the object is a string.
   __ mov(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
   __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
   ASSERT(kNotStringTag != 0);
   __ test(scratch, Immediate(kNotStringTag));
-  __ j(not_zero, non_string_object, not_taken);
+  __ j(not_zero, non_string_object);
 }
 
 
@@ -348,7 +303,7 @@
     // Check if the object is a JSValue wrapper.
     __ bind(&check_wrapper);
     __ cmp(scratch1, JS_VALUE_TYPE);
-    __ j(not_equal, miss, not_taken);
+    __ j(not_equal, miss);
 
     // Check if the wrapped value is a string and load the length
     // directly if it is.
@@ -533,10 +488,12 @@
  public:
   CallInterceptorCompiler(StubCompiler* stub_compiler,
                           const ParameterCount& arguments,
-                          Register name)
+                          Register name,
+                          Code::ExtraICState extra_ic_state)
       : stub_compiler_(stub_compiler),
         arguments_(arguments),
-        name_(name) {}
+        name_(name),
+        extra_ic_state_(extra_ic_state) {}
 
   MaybeObject* Compile(MacroAssembler* masm,
                        JSObject* object,
@@ -553,7 +510,7 @@
 
     // Check that the receiver isn't a smi.
     __ test(receiver, Immediate(kSmiTagMask));
-    __ j(zero, miss, not_taken);
+    __ j(zero, miss);
 
     CallOptimization optimization(lookup);
 
@@ -661,8 +618,11 @@
           GenerateFastApiCall(masm, optimization, arguments_.immediate());
       if (result->IsFailure()) return result;
     } else {
+      CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+          ? CALL_AS_FUNCTION
+          : CALL_AS_METHOD;
       __ InvokeFunction(optimization.constant_function(), arguments_,
-                        JUMP_FUNCTION);
+                        JUMP_FUNCTION, NullCallWrapper(), call_kind);
     }
 
     // Deferred code for fast API call case---clean preallocated space.
@@ -741,6 +701,7 @@
   StubCompiler* stub_compiler_;
   const ParameterCount& arguments_;
   Register name_;
+  Code::ExtraICState extra_ic_state_;
 };
 
 
@@ -758,6 +719,14 @@
 }
 
 
+void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
+  Code* code = masm->isolate()->builtins()->builtin(
+      Builtins::kKeyedLoadIC_MissForceGeneric);
+  Handle<Code> ic(code);
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+}
+
+
 // Both name_reg and receiver_reg are preserved on jumps to miss_label,
 // but may be destroyed if store is successful.
 void StubCompiler::GenerateStoreField(MacroAssembler* masm,
@@ -770,12 +739,12 @@
                                       Label* miss_label) {
   // Check that the object isn't a smi.
   __ test(receiver_reg, Immediate(kSmiTagMask));
-  __ j(zero, miss_label, not_taken);
+  __ j(zero, miss_label);
 
   // Check that the map of the object hasn't changed.
   __ cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
          Immediate(Handle<Map>(object->map())));
-  __ j(not_equal, miss_label, not_taken);
+  __ j(not_equal, miss_label);
 
   // Perform global security token check if needed.
   if (object->IsJSGlobalProxy()) {
@@ -865,7 +834,7 @@
     __ cmp(Operand::Cell(Handle<JSGlobalPropertyCell>(cell)),
            Immediate(masm->isolate()->factory()->the_hole_value()));
   }
-  __ j(not_equal, miss, not_taken);
+  __ j(not_equal, miss);
   return cell;
 }
 
@@ -951,12 +920,17 @@
       ASSERT(current->property_dictionary()->FindEntry(name) ==
              StringDictionary::kNotFound);
 
-      GenerateDictionaryNegativeLookup(masm(),
-                                       miss,
-                                       reg,
-                                       name,
-                                       scratch1,
-                                       scratch2);
+      MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
+                                                                      miss,
+                                                                      reg,
+                                                                      name,
+                                                                      scratch1,
+                                                                      scratch2);
+      if (negative_lookup->IsFailure()) {
+        set_failure(Failure::cast(negative_lookup));
+        return reg;
+      }
+
       __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
       reg = holder_reg;  // from now the object is in holder_reg
       __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
@@ -965,7 +939,7 @@
       __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
       __ cmp(Operand(scratch1), Immediate(Handle<Map>(current->map())));
       // Branch on the result of the map check.
-      __ j(not_equal, miss, not_taken);
+      __ j(not_equal, miss);
       // Check access rights to the global object.  This has to happen
       // after the map check so that we know that the object is
       // actually a global object.
@@ -985,7 +959,7 @@
       __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
              Immediate(Handle<Map>(current->map())));
       // Branch on the result of the map check.
-      __ j(not_equal, miss, not_taken);
+      __ j(not_equal, miss);
       // Check access rights to the global object.  This has to happen
       // after the map check so that we know that the object is
       // actually a global object.
@@ -1012,7 +986,7 @@
   // Check the holder map.
   __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
          Immediate(Handle<Map>(holder->map())));
-  __ j(not_equal, miss, not_taken);
+  __ j(not_equal, miss);
 
   // Perform security check for access to the global object.
   ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
@@ -1047,7 +1021,7 @@
                                      Label* miss) {
   // Check that the receiver isn't a smi.
   __ test(receiver, Immediate(kSmiTagMask));
-  __ j(zero, miss, not_taken);
+  __ j(zero, miss);
 
   // Check the prototype chain.
   Register reg =
@@ -1072,7 +1046,7 @@
                                                 Label* miss) {
   // Check that the receiver isn't a smi.
   __ test(receiver, Immediate(kSmiTagMask));
-  __ j(zero, miss, not_taken);
+  __ j(zero, miss);
 
   // Check that the maps haven't changed.
   Register reg =
@@ -1139,7 +1113,7 @@
                                         Label* miss) {
   // Check that the receiver isn't a smi.
   __ test(receiver, Immediate(kSmiTagMask));
-  __ j(zero, miss, not_taken);
+  __ j(zero, miss);
 
   // Check that the maps haven't changed.
   CheckPrototypes(object, receiver, holder,
@@ -1166,7 +1140,7 @@
 
   // Check that the receiver isn't a smi.
   __ test(receiver, Immediate(kSmiTagMask));
-  __ j(zero, miss, not_taken);
+  __ j(zero, miss);
 
   // So far the most popular follow ups for interceptor loads are FIELD
   // and CALLBACKS, so inline only them, other cases may be added
@@ -1295,7 +1269,7 @@
 void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
   if (kind_ == Code::KEYED_CALL_IC) {
     __ cmp(Operand(ecx), Immediate(Handle<String>(name)));
-    __ j(not_equal, miss, not_taken);
+    __ j(not_equal, miss);
   }
 }
 
@@ -1317,7 +1291,7 @@
   // the receiver cannot be a smi.
   if (object != holder) {
     __ test(edx, Immediate(kSmiTagMask));
-    __ j(zero, miss, not_taken);
+    __ j(zero, miss);
   }
 
   // Check that the maps haven't changed.
@@ -1344,17 +1318,17 @@
     // function can all use this call IC. Before we load through the
     // function, we have to verify that it still is a function.
     __ test(edi, Immediate(kSmiTagMask));
-    __ j(zero, miss, not_taken);
+    __ j(zero, miss);
     __ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
-    __ j(not_equal, miss, not_taken);
+    __ j(not_equal, miss);
 
     // Check the shared function info. Make sure it hasn't changed.
     __ cmp(FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset),
            Immediate(Handle<SharedFunctionInfo>(function->shared())));
-    __ j(not_equal, miss, not_taken);
+    __ j(not_equal, miss);
   } else {
     __ cmp(Operand(edi), Immediate(Handle<JSFunction>(function)));
-    __ j(not_equal, miss, not_taken);
+    __ j(not_equal, miss);
   }
 }
 
@@ -1362,7 +1336,8 @@
 MaybeObject* CallStubCompiler::GenerateMissBranch() {
   MaybeObject* maybe_obj =
       isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
-                                                       kind_);
+                                               kind_,
+                                               extra_ic_state_);
   Object* obj;
   if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   __ jmp(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
@@ -1392,7 +1367,7 @@
 
   // Check that the receiver isn't a smi.
   __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &miss, not_taken);
+  __ j(zero, &miss);
 
   // Do the right check and compute the holder register.
   Register reg = CheckPrototypes(object, edx, holder, ebx, eax, edi,
@@ -1402,9 +1377,9 @@
 
   // Check that the function really is a function.
   __ test(edi, Immediate(kSmiTagMask));
-  __ j(zero, &miss, not_taken);
+  __ j(zero, &miss);
   __ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   // Patch the receiver on the stack with the global proxy if
   // necessary.
@@ -1414,7 +1389,11 @@
   }
 
   // Invoke the function.
-  __ InvokeFunction(edi, arguments(), JUMP_FUNCTION);
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  __ InvokeFunction(edi, arguments(), JUMP_FUNCTION,
+                    NullCallWrapper(), call_kind);
 
   // Handle call cache miss.
   __ bind(&miss);
@@ -1689,7 +1668,9 @@
   Label index_out_of_range;
   Label* index_out_of_range_label = &index_out_of_range;
 
-  if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
+  if (kind_ == Code::CALL_IC &&
+      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+       DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
 
@@ -1773,7 +1754,9 @@
   Label index_out_of_range;
   Label* index_out_of_range_label = &index_out_of_range;
 
-  if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
+  if (kind_ == Code::CALL_IC &&
+      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+       DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
 
@@ -1896,7 +1879,11 @@
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
+                    NullCallWrapper(), call_kind);
 
   __ bind(&miss);
   // ecx: function name.
@@ -1964,7 +1951,7 @@
 
   // Check if the argument is a heap number and load its value into xmm0.
   Label slow;
-  __ CheckMap(eax, factory()->heap_number_map(), &slow, true);
+  __ CheckMap(eax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
   __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
 
   // Check if the argument is strictly positive. Note this also
@@ -2026,7 +2013,8 @@
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
 
   __ bind(&miss);
   // ecx: function name.
@@ -2108,7 +2096,7 @@
   // Check if the argument is a heap number and load its exponent and
   // sign into ebx.
   __ bind(&not_smi);
-  __ CheckMap(eax, factory()->heap_number_map(), &slow, true);
+  __ CheckMap(eax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
   __ mov(ebx, FieldOperand(eax, HeapNumber::kExponentOffset));
 
   // Check the sign of the argument. If the argument is positive,
@@ -2131,7 +2119,8 @@
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
 
   __ bind(&miss);
   // ecx: function name.
@@ -2155,6 +2144,7 @@
   // repatch it to global receiver.
   if (object->IsGlobalObject()) return heap()->undefined_value();
   if (cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSObject()) return heap()->undefined_value();
   int depth = optimization.GetPrototypeDepthOfExpectedType(
             JSObject::cast(object), holder);
   if (depth == kInvalidProtoDepth) return heap()->undefined_value();
@@ -2169,7 +2159,7 @@
 
   // Check that the receiver isn't a smi.
   __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &miss_before_stack_reserved, not_taken);
+  __ j(zero, &miss_before_stack_reserved);
 
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->call_const(), 1);
@@ -2204,11 +2194,12 @@
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
-                                                   JSObject* holder,
-                                                   JSFunction* function,
-                                                   String* name,
-                                                   CheckType check) {
+MaybeObject* CallStubCompiler::CompileCallConstant(
+    Object* object,
+    JSObject* holder,
+    JSFunction* function,
+    String* name,
+    CheckType check) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -2237,7 +2228,7 @@
   // Check that the receiver isn't a smi.
   if (check != NUMBER_CHECK) {
     __ test(edx, Immediate(kSmiTagMask));
-    __ j(zero, &miss, not_taken);
+    __ j(zero, &miss);
   }
 
   // Make sure that it's okay not to patch the on stack receiver
@@ -2269,7 +2260,7 @@
       } else {
         // Check that the object is a string or a symbol.
         __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, eax);
-        __ j(above_equal, &miss, not_taken);
+        __ j(above_equal, &miss);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::STRING_FUNCTION_INDEX, eax, &miss);
@@ -2287,9 +2278,9 @@
         Label fast;
         // Check that the object is a smi or a heap number.
         __ test(edx, Immediate(kSmiTagMask));
-        __ j(zero, &fast, taken);
+        __ j(zero, &fast);
         __ CmpObjectType(edx, HEAP_NUMBER_TYPE, eax);
-        __ j(not_equal, &miss, not_taken);
+        __ j(not_equal, &miss);
         __ bind(&fast);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
@@ -2309,9 +2300,9 @@
         Label fast;
         // Check that the object is a boolean.
         __ cmp(edx, factory()->true_value());
-        __ j(equal, &fast, taken);
+        __ j(equal, &fast);
         __ cmp(edx, factory()->false_value());
-        __ j(not_equal, &miss, not_taken);
+        __ j(not_equal, &miss);
         __ bind(&fast);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
@@ -2326,7 +2317,11 @@
       UNREACHABLE();
   }
 
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
+                    NullCallWrapper(), call_kind);
 
   // Handle call cache miss.
   __ bind(&miss);
@@ -2361,7 +2356,7 @@
   // Get the receiver from the stack.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
 
-  CallInterceptorCompiler compiler(this, arguments(), ecx);
+  CallInterceptorCompiler compiler(this, arguments(), ecx, extra_ic_state_);
   MaybeObject* result = compiler.Compile(masm(),
                                          object,
                                          holder,
@@ -2379,9 +2374,9 @@
 
   // Check that the function really is a function.
   __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &miss, not_taken);
+  __ j(zero, &miss);
   __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   // Patch the receiver on the stack with the global proxy if
   // necessary.
@@ -2392,7 +2387,11 @@
 
   // Invoke the function.
   __ mov(edi, eax);
-  __ InvokeFunction(edi, arguments(), JUMP_FUNCTION);
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  __ InvokeFunction(edi, arguments(), JUMP_FUNCTION,
+                    NullCallWrapper(), call_kind);
 
   // Handle load cache miss.
   __ bind(&miss);
@@ -2404,11 +2403,12 @@
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
-                                                 GlobalObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 JSFunction* function,
-                                                 String* name) {
+MaybeObject* CallStubCompiler::CompileCallGlobal(
+    JSObject* object,
+    GlobalObject* holder,
+    JSGlobalPropertyCell* cell,
+    JSFunction* function,
+    String* name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -2451,16 +2451,21 @@
   __ IncrementCounter(counters->call_global_inline(), 1);
   ASSERT(function->is_compiled());
   ParameterCount expected(function->shared()->formal_parameter_count());
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
   if (V8::UseCrankshaft()) {
     // TODO(kasperl): For now, we always call indirectly through the
     // code field in the function to allow recompilation to take effect
     // without changing any of the call sites.
     __ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
-                  expected, arguments(), JUMP_FUNCTION);
+                  expected, arguments(), JUMP_FUNCTION,
+                  NullCallWrapper(), call_kind);
   } else {
     Handle<Code> code(function->code());
     __ InvokeCode(code, expected, arguments(),
-                  RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+                  RelocInfo::CODE_TARGET, JUMP_FUNCTION,
+                  NullCallWrapper(), call_kind);
   }
 
   // Handle call cache miss.
@@ -2518,12 +2523,12 @@
 
   // Check that the object isn't a smi.
   __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &miss, not_taken);
+  __ j(zero, &miss);
 
   // Check that the map of the object hasn't changed.
   __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
          Immediate(Handle<Map>(object->map())));
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   // Perform global security token check if needed.
   if (object->IsJSGlobalProxy()) {
@@ -2568,12 +2573,12 @@
 
   // Check that the object isn't a smi.
   __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &miss, not_taken);
+  __ j(zero, &miss);
 
   // Check that the map of the object hasn't changed.
   __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
          Immediate(Handle<Map>(receiver->map())));
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   // Perform global security token check if needed.
   if (receiver->IsJSGlobalProxy()) {
@@ -2620,7 +2625,7 @@
   // Check that the map of the global has not changed.
   __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
          Immediate(Handle<Map>(object->map())));
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
 
   // Compute the cell operand to use.
@@ -2673,7 +2678,7 @@
 
   // Check that the name has not changed.
   __ cmp(Operand(ecx), Immediate(Handle<String>(name)));
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   // Generate store field code.  Trashes the name register.
   GenerateStoreField(masm(),
@@ -2694,8 +2699,35 @@
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
-    JSObject* receiver) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreFastElement(
+    Map* receiver_map) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+  MaybeObject* maybe_stub =
+      KeyedStoreFastElementStub(is_js_array).TryGetCode();
+  Code* stub;
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(edx,
+                 Handle<Map>(receiver_map),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
+
+  Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
+MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
+    MapList* receiver_maps,
+    CodeList* handler_ics) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : key
@@ -2703,51 +2735,22 @@
   //  -- esp[0] : return address
   // -----------------------------------
   Label miss;
+  __ JumpIfSmi(edx, &miss);
 
-  // Check that the receiver isn't a smi.
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &miss, not_taken);
-
-  // Check that the map matches.
-  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
-         Immediate(Handle<Map>(receiver->map())));
-  __ j(not_equal, &miss, not_taken);
-
-  // Check that the key is a smi.
-  __ test(ecx, Immediate(kSmiTagMask));
-  __ j(not_zero, &miss, not_taken);
-
-  // Get the elements array and make sure it is a fast element array, not 'cow'.
-  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-  __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
-         Immediate(factory()->fixed_array_map()));
-  __ j(not_equal, &miss, not_taken);
-
-  // Check that the key is within bounds.
-  if (receiver->IsJSArray()) {
-    __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset));  // Compare smis.
-    __ j(above_equal, &miss, not_taken);
-  } else {
-    __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));  // Compare smis.
-    __ j(above_equal, &miss, not_taken);
+  Register map_reg = ebx;
+  __ mov(map_reg, FieldOperand(edx, HeapObject::kMapOffset));
+  int receiver_count = receiver_maps->length();
+  for (int current = 0; current < receiver_count; ++current) {
+    Handle<Map> map(receiver_maps->at(current));
+    __ cmp(map_reg, map);
+    __ j(equal, Handle<Code>(handler_ics->at(current)));
   }
-
-  // Do the store and update the write barrier. Make sure to preserve
-  // the value in register eax.
-  __ mov(edx, Operand(eax));
-  __ mov(FieldOperand(edi, ecx, times_2, FixedArray::kHeaderSize), eax);
-  __ RecordWrite(edi, 0, edx, ecx);
-
-  // Done.
-  __ ret(0);
-
-  // Handle store cache miss.
   __ bind(&miss);
-  Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
-  __ jmp(ic, RelocInfo::CODE_TARGET);
+  Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
+  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL);
+  return GetCode(NORMAL, NULL, MEGAMORPHIC);
 }
 
 
@@ -2763,7 +2766,7 @@
 
   // Check that the receiver isn't a smi.
   __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &miss, not_taken);
+  __ j(zero, &miss);
 
   ASSERT(last->IsGlobalObject() || last->HasFastProperties());
 
@@ -2916,7 +2919,7 @@
   // the receiver cannot be a smi.
   if (object != holder) {
     __ test(eax, Immediate(kSmiTagMask));
-    __ j(zero, &miss, not_taken);
+    __ j(zero, &miss);
   }
 
   // Check that the maps haven't changed.
@@ -2933,7 +2936,7 @@
   // Check for deleted property if property can actually be deleted.
   if (!is_dont_delete) {
     __ cmp(ebx, factory()->the_hole_value());
-    __ j(equal, &miss, not_taken);
+    __ j(equal, &miss);
   } else if (FLAG_debug_code) {
     __ cmp(ebx, factory()->the_hole_value());
     __ Check(not_equal, "DontDelete cells can't contain the hole");
@@ -2969,7 +2972,7 @@
 
   // Check that the name has not changed.
   __ cmp(Operand(eax), Immediate(Handle<String>(name)));
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   GenerateLoadField(receiver, holder, edx, ebx, ecx, edi, index, name, &miss);
 
@@ -2999,7 +3002,7 @@
 
   // Check that the name has not changed.
   __ cmp(Operand(eax), Immediate(Handle<String>(name)));
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   MaybeObject* result = GenerateLoadCallback(receiver, holder, edx, eax, ebx,
                                              ecx, edi, callback, name, &miss);
@@ -3034,7 +3037,7 @@
 
   // Check that the name has not changed.
   __ cmp(Operand(eax), Immediate(Handle<String>(name)));
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   GenerateLoadConstant(receiver, holder, edx, ebx, ecx, edi,
                        value, name, &miss);
@@ -3062,7 +3065,7 @@
 
   // Check that the name has not changed.
   __ cmp(Operand(eax), Immediate(Handle<String>(name)));
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   LookupResult lookup;
   LookupPostInterceptor(holder, name, &lookup);
@@ -3098,7 +3101,7 @@
 
   // Check that the name has not changed.
   __ cmp(Operand(eax), Immediate(Handle<String>(name)));
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   GenerateLoadArrayLength(masm(), edx, ecx, &miss);
   __ bind(&miss);
@@ -3123,7 +3126,7 @@
 
   // Check that the name has not changed.
   __ cmp(Operand(eax), Immediate(Handle<String>(name)));
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss, true);
   __ bind(&miss);
@@ -3148,7 +3151,7 @@
 
   // Check that the name has not changed.
   __ cmp(Operand(eax), Immediate(Handle<String>(name)));
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss);
 
   GenerateLoadFunctionPrototype(masm(), edx, ecx, ebx, &miss);
   __ bind(&miss);
@@ -3160,48 +3163,52 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadFastElement(Map* receiver_map) {
+  // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  MaybeObject* maybe_stub = KeyedLoadFastElementStub().TryGetCode();
+  Code* stub;
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(edx,
+                 Handle<Map>(receiver_map),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
+
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
+    MapList* receiver_maps,
+    CodeList* handler_ics) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
   Label miss;
+  __ JumpIfSmi(edx, &miss);
 
-  // Check that the receiver isn't a smi.
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &miss, not_taken);
-
-  // Check that the map matches.
-  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
-         Immediate(Handle<Map>(receiver->map())));
-  __ j(not_equal, &miss, not_taken);
-
-  // Check that the key is a smi.
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(not_zero, &miss, not_taken);
-
-  // Get the elements array.
-  __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
-  __ AssertFastElements(ecx);
-
-  // Check that the key is within bounds.
-  __ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
-  __ j(above_equal, &miss, not_taken);
-
-  // Load the result and make sure it's not the hole.
-  __ mov(ebx, Operand(ecx, eax, times_2,
-                      FixedArray::kHeaderSize - kHeapObjectTag));
-  __ cmp(ebx, factory()->the_hole_value());
-  __ j(equal, &miss, not_taken);
-  __ mov(eax, ebx);
-  __ ret(0);
+  Register map_reg = ebx;
+  __ mov(map_reg, FieldOperand(edx, HeapObject::kMapOffset));
+  int receiver_count = receiver_maps->length();
+  for (int current = 0; current < receiver_count; ++current) {
+    Handle<Map> map(receiver_maps->at(current));
+    __ cmp(map_reg, map);
+    __ j(equal, Handle<Code>(handler_ics->at(current)));
+  }
 
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL);
+  return GetCode(NORMAL, NULL, MEGAMORPHIC);
 }
 
 
@@ -3222,7 +3229,7 @@
   __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
   __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kDebugInfoOffset));
   __ cmp(ebx, factory()->undefined_value());
-  __ j(not_equal, &generic_stub_call, not_taken);
+  __ j(not_equal, &generic_stub_call);
 #endif
 
   // Load the initial map and verify that it is in fact a map.
@@ -3344,36 +3351,82 @@
 }
 
 
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
-    JSObject*receiver, ExternalArrayType array_type, Code::Flags flags) {
+MaybeObject* ExternalArrayLoadStubCompiler::CompileLoad(
+    JSObject*receiver, ExternalArrayType array_type) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
-  Label slow, failed_allocation;
+  MaybeObject* maybe_stub =
+      KeyedLoadExternalArrayStub(array_type).TryGetCode();
+  Code* stub;
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(edx,
+                 Handle<Map>(receiver->map()),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
 
-  // Check that the object isn't a smi.
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &slow, not_taken);
+  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode();
+}
+
+
+MaybeObject* ExternalArrayStoreStubCompiler::CompileStore(
+    JSObject* receiver, ExternalArrayType array_type) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  MaybeObject* maybe_stub =
+      KeyedStoreExternalArrayStub(array_type).TryGetCode();
+  Code* stub;
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(edx,
+                 Handle<Map>(receiver->map()),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
+
+  Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  return GetCode();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void KeyedLoadStubCompiler::GenerateLoadExternalArray(
+    MacroAssembler* masm,
+    ExternalArrayType array_type) {
+  // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label miss_force_generic, failed_allocation, slow;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
 
   // Check that the key is a smi.
   __ test(eax, Immediate(kSmiTagMask));
-  __ j(not_zero, &slow, not_taken);
+  __ j(not_zero, &miss_force_generic);
 
-  // Check that the map matches.
-  __ CheckMap(edx, Handle<Map>(receiver->map()), &slow, false);
-  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
-
-  // eax: key, known to be a smi.
-  // edx: receiver, known to be a JSObject.
-  // ebx: elements object, known to be an external array.
   // Check that the index is in range.
   __ mov(ecx, eax);
   __ SmiUntag(ecx);  // Untag the index.
+  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
   __ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset));
   // Unsigned comparison catches both negative and too-large values.
-  __ j(above_equal, &slow);
+  __ j(above_equal, &miss_force_generic);
   __ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
   // ebx: base pointer of external storage
   switch (array_type) {
@@ -3397,6 +3450,9 @@
     case kExternalFloatArray:
       __ fld_s(Operand(ebx, ecx, times_4, 0));
       break;
+    case kExternalDoubleArray:
+      __ fld_d(Operand(ebx, ecx, times_8, 0));
+      break;
     default:
       UNREACHABLE();
       break;
@@ -3454,7 +3510,8 @@
     __ mov(eax, ecx);
     __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
     __ ret(0);
-  } else if (array_type == kExternalFloatArray) {
+  } else if (array_type == kExternalFloatArray ||
+             array_type == kExternalDoubleArray) {
     // For the floating-point array type, we need to always allocate a
     // HeapNumber.
     __ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
@@ -3476,47 +3533,48 @@
 
   // Slow case: Jump to runtime.
   __ bind(&slow);
-  Counters* counters = isolate()->counters();
+  Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
+
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
 
-  __ pop(ebx);
-  __ push(edx);  // receiver
-  __ push(eax);  // name
-  __ push(ebx);  // return address
+  Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Slow();
+  __ jmp(ic, RelocInfo::CODE_TARGET);
 
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-
-  // Return the generated code.
-  return GetCode(flags);
-}
-
-
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
-    JSObject* receiver, ExternalArrayType array_type, Code::Flags flags) {
   // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ecx    : key
+  //  -- eax    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
-  Label slow, check_heap_number;
 
-  // Check that the object isn't a smi.
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &slow);
+  // Miss case: Jump to runtime.
+  __ bind(&miss_force_generic);
+  Handle<Code> miss_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
+}
 
-  // Check that the map matches.
-  __ CheckMap(edx, Handle<Map>(receiver->map()), &slow, false);
+
+void KeyedStoreStubCompiler::GenerateStoreExternalArray(
+    MacroAssembler* masm,
+    ExternalArrayType array_type) {
+  // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label miss_force_generic, slow, check_heap_number;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
 
   // Check that the key is a smi.
   __ test(ecx, Immediate(kSmiTagMask));
-  __ j(not_zero, &slow);
+  __ j(not_zero, &miss_force_generic);
 
   // Check that the index is in range.
   __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
@@ -3547,9 +3605,9 @@
   switch (array_type) {
     case kExternalPixelArray:
       {  // Clamp the value to [0..255].
-        NearLabel done;
+        Label done;
         __ test(ecx, Immediate(0xFFFFFF00));
-        __ j(zero, &done);
+        __ j(zero, &done, Label::kNear);
         __ setcc(negative, ecx);  // 1 if negative, 0 if positive.
         __ dec_b(ecx);  // 0 if negative, 255 if positive.
         __ bind(&done);
@@ -3569,11 +3627,16 @@
       __ mov(Operand(edi, ebx, times_4, 0), ecx);
       break;
     case kExternalFloatArray:
+    case kExternalDoubleArray:
       // Need to perform int-to-float conversion.
       __ push(ecx);
       __ fild_s(Operand(esp, 0));
       __ pop(ecx);
-      __ fstp_s(Operand(edi, ebx, times_4, 0));
+      if (array_type == kExternalFloatArray) {
+        __ fstp_s(Operand(edi, ebx, times_4, 0));
+      } else {  // array_type == kExternalDoubleArray.
+        __ fstp_d(Operand(edi, ebx, times_8, 0));
+      }
       break;
     default:
       UNREACHABLE();
@@ -3590,7 +3653,7 @@
     // edi: elements array
     // ebx: untagged index
     __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
-           Immediate(factory()->heap_number_map()));
+           Immediate(masm->isolate()->factory()->heap_number_map()));
     __ j(not_equal, &slow);
 
     // The WebGL specification leaves the behavior of storing NaN and
@@ -3603,6 +3666,10 @@
       __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
       __ fstp_s(Operand(edi, ebx, times_4, 0));
       __ ret(0);
+    } else if (array_type == kExternalDoubleArray) {
+      __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+      __ fstp_d(Operand(edi, ebx, times_8, 0));
+      __ ret(0);
     } else {
       // Perform float-to-int conversion with truncation (round-to-zero)
       // behavior.
@@ -3621,9 +3688,9 @@
           switch (array_type) {
             case kExternalPixelArray:
               {  // Clamp the value to [0..255].
-                NearLabel done;
+                Label done;
                 __ test(ecx, Immediate(0xFFFFFF00));
-                __ j(zero, &done);
+                __ j(zero, &done, Label::kNear);
                 __ setcc(negative, ecx);  // 1 if negative, 0 if positive.
                 __ dec_b(ecx);  // 0 if negative, 255 if positive.
                 __ bind(&done);
@@ -3681,6 +3748,9 @@
 
   // Slow case: call runtime.
   __ bind(&slow);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->keyed_store_external_array_slow(), 1);
+
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : key
@@ -3688,19 +3758,109 @@
   //  -- esp[0] : return address
   // -----------------------------------
 
-  __ pop(ebx);
-  __ push(edx);
-  __ push(ecx);
-  __ push(eax);
-  __ push(Immediate(Smi::FromInt(NONE)));   // PropertyAttributes
-  __ push(Immediate(Smi::FromInt(
-      Code::ExtractExtraICStateFromFlags(flags) & kStrictMode)));
-  __ push(ebx);   // return address
+  Handle<Code> ic = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+  __ jmp(ic, RelocInfo::CODE_TARGET);
 
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
 
-  return GetCode(flags);
+  __ bind(&miss_force_generic);
+  Handle<Code> miss_ic =
+      masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
+
+
+void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label miss_force_generic;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(not_zero, &miss_force_generic);
+
+  // Get the elements array.
+  __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+  __ AssertFastElements(ecx);
+
+  // Check that the key is within bounds.
+  __ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
+  __ j(above_equal, &miss_force_generic);
+
+  // Load the result and make sure it's not the hole.
+  __ mov(ebx, Operand(ecx, eax, times_2,
+                      FixedArray::kHeaderSize - kHeapObjectTag));
+  __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
+  __ j(equal, &miss_force_generic);
+  __ mov(eax, ebx);
+  __ ret(0);
+
+  __ bind(&miss_force_generic);
+  Handle<Code> miss_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
+                                                      bool is_js_array) {
+  // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label miss_force_generic;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi.
+  __ test(ecx, Immediate(kSmiTagMask));
+  __ j(not_zero, &miss_force_generic);
+
+  // Get the elements array and make sure it is a fast element array, not 'cow'.
+  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+  __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
+         Immediate(masm->isolate()->factory()->fixed_array_map()));
+  __ j(not_equal, &miss_force_generic);
+
+  if (is_js_array) {
+    // Check that the key is within bounds.
+    __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset));  // smis.
+    __ j(above_equal, &miss_force_generic);
+  } else {
+    // Check that the key is within bounds.
+    __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));  // smis.
+    __ j(above_equal, &miss_force_generic);
+  }
+
+  // Do the store and update the write barrier. Make sure to preserve
+  // the value in register eax.
+  __ mov(edx, Operand(eax));
+  __ mov(FieldOperand(edi, ecx, times_2, FixedArray::kHeaderSize), eax);
+  __ RecordWrite(edi, 0, edx, ecx);
+
+  // Done.
+  __ ret(0);
+
+  // Handle store cache miss, replacing the ic with the generic stub.
+  __ bind(&miss_force_generic);
+  Handle<Code> ic_force_generic =
+      masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+  __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
 }
 
 
diff --git a/src/ic.cc b/src/ic.cc
index 99eb21f..0e87e51 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -67,7 +67,33 @@
     State new_state = StateFrom(new_target,
                                 HEAP->undefined_value(),
                                 HEAP->undefined_value());
-    PrintF("[%s (%c->%c)%s", type,
+    PrintF("[%s in ", type);
+    StackFrameIterator it;
+    while (it.frame()->fp() != this->fp()) it.Advance();
+    StackFrame* raw_frame = it.frame();
+    if (raw_frame->is_internal()) {
+      Isolate* isolate = new_target->GetIsolate();
+      Code* apply_builtin = isolate->builtins()->builtin(
+          Builtins::kFunctionApply);
+      if (raw_frame->unchecked_code() == apply_builtin) {
+        PrintF("apply from ");
+        it.Advance();
+        raw_frame = it.frame();
+      }
+    }
+    if (raw_frame->is_java_script()) {
+      JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
+      Code* js_code = frame->unchecked_code();
+      // Find the function on the stack and both the active code for the
+      // function and the original code.
+      JSFunction* function = JSFunction::cast(frame->function());
+      function->PrintName();
+      int code_offset = address() - js_code->instruction_start();
+      PrintF("+%d", code_offset);
+    } else {
+      PrintF("<unknown>");
+    }
+    PrintF(" (%c->%c)%s",
            TransitionMarkFromState(old_state),
            TransitionMarkFromState(new_state),
            extra_info);
@@ -274,15 +300,14 @@
   switch (target->kind()) {
     case Code::LOAD_IC: return LoadIC::Clear(address, target);
     case Code::KEYED_LOAD_IC:
-    case Code::KEYED_EXTERNAL_ARRAY_LOAD_IC:
       return KeyedLoadIC::Clear(address, target);
     case Code::STORE_IC: return StoreIC::Clear(address, target);
     case Code::KEYED_STORE_IC:
-    case Code::KEYED_EXTERNAL_ARRAY_STORE_IC:
       return KeyedStoreIC::Clear(address, target);
     case Code::CALL_IC: return CallIC::Clear(address, target);
     case Code::KEYED_CALL_IC:  return KeyedCallIC::Clear(address, target);
-    case Code::TYPE_RECORDING_BINARY_OP_IC:
+    case Code::UNARY_OP_IC:
+    case Code::BINARY_OP_IC:
     case Code::COMPARE_IC:
       // Clearing these is tricky and does not
       // make any performance difference.
@@ -293,65 +318,36 @@
 
 
 void CallICBase::Clear(Address address, Code* target) {
+  bool contextual = CallICBase::Contextual::decode(target->extra_ic_state());
   State state = target->ic_state();
   if (state == UNINITIALIZED) return;
   Code* code =
       Isolate::Current()->stub_cache()->FindCallInitialize(
           target->arguments_count(),
           target->ic_in_loop(),
+          contextual ? RelocInfo::CODE_TARGET_CONTEXT : RelocInfo::CODE_TARGET,
           target->kind());
   SetTargetAtAddress(address, code);
 }
 
 
-void KeyedLoadIC::ClearInlinedVersion(Address address) {
-  // Insert null as the map to check for to make sure the map check fails
-  // sending control flow to the IC instead of the inlined version.
-  PatchInlinedLoad(address, HEAP->null_value());
-}
-
-
 void KeyedLoadIC::Clear(Address address, Code* target) {
   if (target->ic_state() == UNINITIALIZED) return;
   // Make sure to also clear the map used in inline fast cases.  If we
   // do not clear these maps, cached code can keep objects alive
   // through the embedded maps.
-  ClearInlinedVersion(address);
   SetTargetAtAddress(address, initialize_stub());
 }
 
 
-void LoadIC::ClearInlinedVersion(Address address) {
-  // Reset the map check of the inlined inobject property load (if
-  // present) to guarantee failure by holding an invalid map (the null
-  // value).  The offset can be patched to anything.
-  Heap* heap = HEAP;
-  PatchInlinedLoad(address, heap->null_value(), 0);
-  PatchInlinedContextualLoad(address,
-                             heap->null_value(),
-                             heap->null_value(),
-                             true);
-}
-
-
 void LoadIC::Clear(Address address, Code* target) {
   if (target->ic_state() == UNINITIALIZED) return;
-  ClearInlinedVersion(address);
   SetTargetAtAddress(address, initialize_stub());
 }
 
 
-void StoreIC::ClearInlinedVersion(Address address) {
-  // Reset the map check of the inlined inobject property store (if
-  // present) to guarantee failure by holding an invalid map (the null
-  // value).  The offset can be patched to anything.
-  PatchInlinedStore(address, HEAP->null_value(), 0);
-}
-
-
 void StoreIC::Clear(Address address, Code* target) {
   if (target->ic_state() == UNINITIALIZED) return;
-  ClearInlinedVersion(address);
   SetTargetAtAddress(address,
       (target->extra_ic_state() == kStrictMode)
         ? initialize_stub_strict()
@@ -359,21 +355,6 @@
 }
 
 
-void KeyedStoreIC::ClearInlinedVersion(Address address) {
-  // Insert null as the elements map to check for.  This will make
-  // sure that the elements fast-case map check fails so that control
-  // flows to the IC instead of the inlined version.
-  PatchInlinedStore(address, HEAP->null_value());
-}
-
-
-void KeyedStoreIC::RestoreInlinedVersion(Address address) {
-  // Restore the fast-case elements map check so that the inlined
-  // version can be used again.
-  PatchInlinedStore(address, HEAP->fixed_array_map());
-}
-
-
 void KeyedStoreIC::Clear(Address address, Code* target) {
   if (target->ic_state() == UNINITIALIZED) return;
   SetTargetAtAddress(address,
@@ -595,7 +576,7 @@
         ASSERT(string == args[0] || string == JSValue::cast(args[0])->value());
         // If we're in the default (fastest) state and the index is
         // out of bounds, update the state to record this fact.
-        if (*extra_ic_state == DEFAULT_STRING_STUB &&
+        if (StringStubState::decode(*extra_ic_state) == DEFAULT_STRING_STUB &&
             argc >= 1 && args[1]->IsNumber()) {
           double index;
           if (args[1]->IsSmi()) {
@@ -605,7 +586,9 @@
             index = DoubleToInteger(HeapNumber::cast(args[1])->value());
           }
           if (index < 0 || index >= string->length()) {
-            *extra_ic_state = STRING_INDEX_OUT_OF_BOUNDS;
+            *extra_ic_state =
+                StringStubState::update(*extra_ic_state,
+                                        STRING_INDEX_OUT_OF_BOUNDS);
             return true;
           }
         }
@@ -633,6 +616,7 @@
       maybe_code = isolate()->stub_cache()->ComputeCallField(argc,
                                                              in_loop,
                                                              kind_,
+                                                             extra_ic_state,
                                                              *name,
                                                              *object,
                                                              lookup->holder(),
@@ -668,6 +652,7 @@
         maybe_code = isolate()->stub_cache()->ComputeCallGlobal(argc,
                                                                 in_loop,
                                                                 kind_,
+                                                                extra_ic_state,
                                                                 *name,
                                                                 *receiver,
                                                                 global,
@@ -682,6 +667,7 @@
         maybe_code = isolate()->stub_cache()->ComputeCallNormal(argc,
                                                                 in_loop,
                                                                 kind_,
+                                                                extra_ic_state,
                                                                 *name,
                                                                 *receiver);
       }
@@ -692,6 +678,7 @@
       maybe_code = isolate()->stub_cache()->ComputeCallInterceptor(
           argc,
           kind_,
+          extra_ic_state,
           *name,
           *object,
           lookup->holder());
@@ -730,9 +717,11 @@
     // This is the first time we execute this inline cache.
     // Set the target to the pre monomorphic stub to delay
     // setting the monomorphic state.
-    maybe_code = isolate()->stub_cache()->ComputeCallPreMonomorphic(argc,
-                                                                    in_loop,
-                                                                    kind_);
+    maybe_code =
+        isolate()->stub_cache()->ComputeCallPreMonomorphic(argc,
+                                                           in_loop,
+                                                           kind_,
+                                                           extra_ic_state);
   } else if (state == MONOMORPHIC) {
     if (kind_ == Code::CALL_IC &&
         TryUpdateExtraICState(lookup, object, &extra_ic_state)) {
@@ -752,9 +741,11 @@
                                           object,
                                           name);
     } else {
-      maybe_code = isolate()->stub_cache()->ComputeCallMegamorphic(argc,
-                                                                   in_loop,
-                                                                   kind_);
+      maybe_code =
+          isolate()->stub_cache()->ComputeCallMegamorphic(argc,
+                                                          in_loop,
+                                                          kind_,
+                                                          extra_ic_state);
     }
   } else {
     maybe_code = ComputeMonomorphicStub(lookup,
@@ -812,7 +803,7 @@
     int argc = target()->arguments_count();
     InLoopFlag in_loop = target()->ic_in_loop();
     MaybeObject* maybe_code = isolate()->stub_cache()->ComputeCallMegamorphic(
-        argc, in_loop, Code::KEYED_CALL_IC);
+        argc, in_loop, Code::KEYED_CALL_IC, Code::kNoExtraICState);
     Object* code;
     if (maybe_code->ToObject(&code)) {
       set_target(Code::cast(code));
@@ -873,9 +864,6 @@
 #endif
       if (state == PREMONOMORPHIC) {
         if (object->IsString()) {
-          Map* map = HeapObject::cast(*object)->map();
-          const int offset = String::kLengthOffset;
-          PatchInlinedLoad(address(), map, offset);
           set_target(isolate()->builtins()->builtin(
               Builtins::kLoadIC_StringLength));
         } else {
@@ -903,9 +891,6 @@
       if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
 #endif
       if (state == PREMONOMORPHIC) {
-        Map* map = HeapObject::cast(*object)->map();
-        const int offset = JSArray::kLengthOffset;
-        PatchInlinedLoad(address(), map, offset);
         set_target(isolate()->builtins()->builtin(
             Builtins::kLoadIC_ArrayLength));
       } else {
@@ -948,70 +933,14 @@
     LOG(isolate(), SuspectReadEvent(*name, *object));
   }
 
-  bool can_be_inlined_precheck =
-      FLAG_use_ic &&
-      lookup.IsProperty() &&
-      lookup.IsCacheable() &&
-      lookup.holder() == *object &&
-      !object->IsAccessCheckNeeded();
-
-  bool can_be_inlined =
-      can_be_inlined_precheck &&
-      state == PREMONOMORPHIC &&
-      lookup.type() == FIELD;
-
-  bool can_be_inlined_contextual =
-      can_be_inlined_precheck &&
-      state == UNINITIALIZED &&
-      lookup.holder()->IsGlobalObject() &&
-      lookup.type() == NORMAL;
-
-  if (can_be_inlined) {
-    Map* map = lookup.holder()->map();
-    // Property's index in the properties array.  If negative we have
-    // an inobject property.
-    int index = lookup.GetFieldIndex() - map->inobject_properties();
-    if (index < 0) {
-      // Index is an offset from the end of the object.
-      int offset = map->instance_size() + (index * kPointerSize);
-      if (PatchInlinedLoad(address(), map, offset)) {
-        set_target(megamorphic_stub());
-        TRACE_IC_NAMED("[LoadIC : inline patch %s]\n", name);
-        return lookup.holder()->FastPropertyAt(lookup.GetFieldIndex());
-      } else {
-        TRACE_IC_NAMED("[LoadIC : no inline patch %s (patching failed)]\n",
-                       name);
-      }
-    } else {
-      TRACE_IC_NAMED("[LoadIC : no inline patch %s (not inobject)]\n", name);
-    }
-  } else if (can_be_inlined_contextual) {
-    Map* map = lookup.holder()->map();
-    JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(
-        lookup.holder()->property_dictionary()->ValueAt(
-            lookup.GetDictionaryEntry()));
-    if (PatchInlinedContextualLoad(address(),
-                                   map,
-                                   cell,
-                                   lookup.IsDontDelete())) {
-      set_target(megamorphic_stub());
-      TRACE_IC_NAMED("[LoadIC : inline contextual patch %s]\n", name);
-      ASSERT(cell->value() != isolate()->heap()->the_hole_value());
-      return cell->value();
-    }
-  } else {
-    if (FLAG_use_ic && state == PREMONOMORPHIC) {
-      TRACE_IC_NAMED("[LoadIC : no inline patch %s (not inlinable)]\n", name);
-    }
-  }
-
   // Update inline cache and stub cache.
   if (FLAG_use_ic) {
     UpdateCaches(&lookup, state, object, name);
   }
 
   PropertyAttributes attr;
-  if (lookup.IsProperty() && lookup.type() == INTERCEPTOR) {
+  if (lookup.IsProperty() &&
+      (lookup.type() == INTERCEPTOR || lookup.type() == HANDLER)) {
     // Get the property.
     Object* result;
     { MaybeObject* maybe_result =
@@ -1139,9 +1068,49 @@
 }
 
 
+String* KeyedLoadIC::GetStubNameForCache(IC::State ic_state) {
+  if (ic_state == MONOMORPHIC) {
+    return isolate()->heap()->KeyedLoadSpecializedMonomorphic_symbol();
+  } else {
+    ASSERT(ic_state == MEGAMORPHIC);
+    return isolate()->heap()->KeyedLoadSpecializedPolymorphic_symbol();
+  }
+}
+
+
+MaybeObject* KeyedLoadIC::GetFastElementStubWithoutMapCheck(
+    bool is_js_array) {
+  return KeyedLoadFastElementStub().TryGetCode();
+}
+
+
+MaybeObject* KeyedLoadIC::GetExternalArrayStubWithoutMapCheck(
+    ExternalArrayType array_type) {
+  return KeyedLoadExternalArrayStub(array_type).TryGetCode();
+}
+
+
+MaybeObject* KeyedLoadIC::ConstructMegamorphicStub(
+    MapList* receiver_maps,
+    CodeList* targets,
+    StrictModeFlag strict_mode) {
+  Object* object;
+  KeyedLoadStubCompiler compiler;
+  MaybeObject* maybe_code = compiler.CompileLoadMegamorphic(receiver_maps,
+                                                            targets);
+  if (!maybe_code->ToObject(&object)) return maybe_code;
+  isolate()->counters()->keyed_load_polymorphic_stubs()->Increment();
+  PROFILE(isolate(), CodeCreateEvent(
+      Logger::KEYED_LOAD_MEGAMORPHIC_IC_TAG,
+      Code::cast(object), 0));
+  return object;
+}
+
+
 MaybeObject* KeyedLoadIC::Load(State state,
                                Handle<Object> object,
-                               Handle<Object> key) {
+                               Handle<Object> key,
+                               bool force_generic_stub) {
   // Check for values that can be converted into a symbol.
   // TODO(1295): Remove this code.
   HandleScope scope(isolate());
@@ -1267,47 +1236,32 @@
 
   if (use_ic) {
     Code* stub = generic_stub();
-    if (state == UNINITIALIZED) {
+    if (!force_generic_stub) {
       if (object->IsString() && key->IsNumber()) {
-        stub = string_stub();
+        if (state == UNINITIALIZED) {
+          stub = string_stub();
+        }
       } else if (object->IsJSObject()) {
-        Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-        if (receiver->HasExternalArrayElements()) {
-          MaybeObject* probe =
-              isolate()->stub_cache()->ComputeKeyedLoadOrStoreExternalArray(
-                  *receiver, false, kNonStrictMode);
-          stub = probe->IsFailure() ?
-              NULL : Code::cast(probe->ToObjectUnchecked());
-        } else if (receiver->HasIndexedInterceptor()) {
+        JSObject* receiver = JSObject::cast(*object);
+        if (receiver->HasIndexedInterceptor()) {
           stub = indexed_interceptor_stub();
-        } else if (key->IsSmi() &&
-                   receiver->map()->has_fast_elements()) {
-          MaybeObject* probe =
-              isolate()->stub_cache()->ComputeKeyedLoadSpecialized(*receiver);
-          stub = probe->IsFailure() ?
-              NULL : Code::cast(probe->ToObjectUnchecked());
+        } else if (key->IsSmi()) {
+          MaybeObject* maybe_stub = ComputeStub(receiver,
+                                                false,
+                                                kNonStrictMode,
+                                                stub);
+          stub = maybe_stub->IsFailure() ?
+              NULL : Code::cast(maybe_stub->ToObjectUnchecked());
         }
       }
     }
     if (stub != NULL) set_target(stub);
+  }
 
 #ifdef DEBUG
-    TraceIC("KeyedLoadIC", key, state, target());
+  TraceIC("KeyedLoadIC", key, state, target());
 #endif  // DEBUG
 
-    // For JSObjects with fast elements that are not value wrappers
-    // and that do not have indexed interceptors, we initialize the
-    // inlined fast case (if present) by patching the inlined map
-    // check.
-    if (object->IsJSObject() &&
-        !object->IsJSValue() &&
-        !JSObject::cast(*object)->HasIndexedInterceptor() &&
-        JSObject::cast(*object)->HasFastElements()) {
-      Map* map = JSObject::cast(*object)->map();
-      PatchInlinedLoad(address(), map);
-    }
-  }
-
   // Get the property.
   return Runtime::GetObjectProperty(isolate(), object, key);
 }
@@ -1471,57 +1425,7 @@
     LookupResult lookup;
 
     if (LookupForWrite(*receiver, *name, &lookup)) {
-      bool can_be_inlined =
-          state == UNINITIALIZED &&
-          lookup.IsProperty() &&
-          lookup.holder() == *receiver &&
-          lookup.type() == FIELD &&
-          !receiver->IsAccessCheckNeeded();
-
-      if (can_be_inlined) {
-        Map* map = lookup.holder()->map();
-        // Property's index in the properties array.  If negative we have
-        // an inobject property.
-        int index = lookup.GetFieldIndex() - map->inobject_properties();
-        if (index < 0) {
-          // Index is an offset from the end of the object.
-          int offset = map->instance_size() + (index * kPointerSize);
-          if (PatchInlinedStore(address(), map, offset)) {
-            set_target((strict_mode == kStrictMode)
-                         ? megamorphic_stub_strict()
-                         : megamorphic_stub());
-#ifdef DEBUG
-            if (FLAG_trace_ic) {
-              PrintF("[StoreIC : inline patch %s]\n", *name->ToCString());
-            }
-#endif
-            return receiver->SetProperty(*name, *value, NONE, strict_mode);
-#ifdef DEBUG
-
-          } else {
-            if (FLAG_trace_ic) {
-              PrintF("[StoreIC : no inline patch %s (patching failed)]\n",
-                     *name->ToCString());
-            }
-          }
-        } else {
-          if (FLAG_trace_ic) {
-            PrintF("[StoreIC : no inline patch %s (not inobject)]\n",
-                   *name->ToCString());
-          }
-        }
-      } else {
-        if (state == PREMONOMORPHIC) {
-          if (FLAG_trace_ic) {
-            PrintF("[StoreIC : no inline patch %s (not inlinable)]\n",
-                   *name->ToCString());
-#endif
-          }
-        }
-      }
-
-      // If no inlined store ic was patched, generate a stub for this
-      // store.
+      // Generate a stub for this store.
       UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
     } else {
       // Strict mode doesn't allow setting non-existent global property
@@ -1653,11 +1557,256 @@
 }
 
 
+static bool AddOneReceiverMapIfMissing(MapList* receiver_maps,
+                                       Map* new_receiver_map) {
+  for (int current = 0; current < receiver_maps->length(); ++current) {
+    if (receiver_maps->at(current) == new_receiver_map) {
+      return false;
+    }
+  }
+  receiver_maps->Add(new_receiver_map);
+  return true;
+}
+
+
+void KeyedIC::GetReceiverMapsForStub(Code* stub, MapList* result) {
+  ASSERT(stub->is_inline_cache_stub());
+  if (stub == string_stub()) {
+    return result->Add(isolate()->heap()->string_map());
+  } else if (stub->is_keyed_load_stub() || stub->is_keyed_store_stub()) {
+    if (stub->ic_state() == MONOMORPHIC) {
+      result->Add(Map::cast(stub->FindFirstMap()));
+    } else {
+      ASSERT(stub->ic_state() == MEGAMORPHIC);
+      AssertNoAllocation no_allocation;
+      int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+      for (RelocIterator it(stub, mask); !it.done(); it.next()) {
+        RelocInfo* info = it.rinfo();
+        Object* object = info->target_object();
+        ASSERT(object->IsMap());
+        result->Add(Map::cast(object));
+      }
+    }
+  }
+}
+
+
+MaybeObject* KeyedIC::ComputeStub(JSObject* receiver,
+                                  bool is_store,
+                                  StrictModeFlag strict_mode,
+                                  Code* generic_stub) {
+  State ic_state = target()->ic_state();
+  Code* monomorphic_stub;
+  // Always compute the MONOMORPHIC stub, even if the MEGAMORPHIC stub ends up
+  // being used. This is necessary because the megamorphic stub needs to have
+  // access to more information than what is stored in the receiver map in some
+  // cases (external arrays need the array type from the MONOMORPHIC stub).
+  MaybeObject* maybe_stub = ComputeMonomorphicStub(receiver,
+                                                   is_store,
+                                                   strict_mode,
+                                                   generic_stub);
+  if (!maybe_stub->To(&monomorphic_stub)) return maybe_stub;
+
+  if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
+    return monomorphic_stub;
+  }
+  ASSERT(target() != generic_stub);
+
+  // Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
+  // via megamorphic stubs, since they don't have a map in their relocation info
+  // and so the stubs can't be harvested for the object needed for a map check.
+  if (target()->type() != NORMAL) {
+    return generic_stub;
+  }
+
+  // Determine the list of receiver maps that this call site has seen,
+  // adding the map that was just encountered.
+  MapList target_receiver_maps;
+  GetReceiverMapsForStub(target(), &target_receiver_maps);
+  if (!AddOneReceiverMapIfMissing(&target_receiver_maps, receiver->map())) {
+    // If the miss wasn't due to an unseen map, a MEGAMORPHIC stub
+    // won't help, use the generic stub.
+    return generic_stub;
+  }
+
+  // TODO(1385): Currently MEGAMORPHIC stubs are cached in the receiver map stub
+  // cache, but that can put receiver types together from unrelated call sites
+  // into the same stub--they always handle the union of all receiver maps seen
+  // at all call sites involving the receiver map. This is only an
+  // approximation: ideally, there would be a global cache that mapped sets of
+  // receiver maps to MEGAMORPHIC stubs. The complexity of the MEGAMORPHIC stub
+  // computation also leads to direct manipulation of the stub cache from the IC
+  // code, which the global cache solution would avoid.
+  Code::Kind kind = this->kind();
+  Code::Flags flags = Code::ComputeFlags(kind,
+                                         NOT_IN_LOOP,
+                                         MEGAMORPHIC,
+                                         strict_mode);
+  String* megamorphic_name = GetStubNameForCache(MEGAMORPHIC);
+  Object* maybe_cached_stub = receiver->map()->FindInCodeCache(megamorphic_name,
+                                                               flags);
+
+  // Create a set of all receiver maps that have been seen at the IC call site
+  // and those seen by the MEGAMORPHIC cached stub, if that's the stub that's
+  // been selected.
+  MapList receiver_maps;
+  if (!maybe_cached_stub->IsUndefined()) {
+    GetReceiverMapsForStub(Code::cast(maybe_cached_stub), &receiver_maps);
+  }
+  bool added_map = false;
+  for (int i = 0; i < target_receiver_maps.length(); ++i) {
+    if (AddOneReceiverMapIfMissing(&receiver_maps,
+                                   target_receiver_maps.at(i))) {
+      added_map = true;
+    }
+  }
+  ASSERT(receiver_maps.length() > 0);
+
+  // If the maximum number of receiver maps has been exceeded, use the Generic
+  // version of the IC.
+  if (receiver_maps.length() > KeyedIC::kMaxKeyedPolymorphism) {
+    return generic_stub;
+  }
+
+  // If no maps have been seen at the call site that aren't in the cached
+  // stub, then use it.
+  if (!added_map) {
+    ASSERT(!maybe_cached_stub->IsUndefined());
+    ASSERT(maybe_cached_stub->IsCode());
+    return Code::cast(maybe_cached_stub);
+  }
+
+  // Lookup all of the receiver maps in the cache, they should all already
+  // have MONOMORPHIC stubs.
+  CodeList handler_ics(KeyedIC::kMaxKeyedPolymorphism);
+  for (int current = 0; current < receiver_maps.length(); ++current) {
+    Map* receiver_map(receiver_maps.at(current));
+    MaybeObject* maybe_cached_stub = ComputeMonomorphicStubWithoutMapCheck(
+        receiver_map,
+        strict_mode,
+        generic_stub);
+    Code* cached_stub;
+    if (!maybe_cached_stub->To(&cached_stub)) {
+      return maybe_cached_stub;
+    }
+    handler_ics.Add(cached_stub);
+  }
+
+  Code* stub;
+  // Build the MEGAMORPHIC stub.
+  maybe_stub = ConstructMegamorphicStub(&receiver_maps,
+                                        &handler_ics,
+                                        strict_mode);
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+
+  MaybeObject* maybe_update = receiver->UpdateMapCodeCache(
+      megamorphic_name,
+      stub);
+  if (maybe_update->IsFailure()) return maybe_update;
+  return stub;
+}
+
+
+MaybeObject* KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
+    Map* receiver_map,
+    StrictModeFlag strict_mode,
+    Code* generic_stub) {
+  if ((receiver_map->instance_type() & kNotStringTag) == 0) {
+    ASSERT(string_stub() != NULL);
+    return string_stub();
+  } else if (receiver_map->has_external_array_elements()) {
+    // Determine the array type from the default MONOMORPHIC already generated
+    // stub. There is no other way to determine the type of the external array
+    // directly from the receiver type.
+    Code::Kind kind = this->kind();
+    Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
+                                                      NORMAL,
+                                                      strict_mode);
+    String* monomorphic_name = GetStubNameForCache(MONOMORPHIC);
+    Object* maybe_default_stub = receiver_map->FindInCodeCache(monomorphic_name,
+                                                               flags);
+    if (maybe_default_stub->IsUndefined()) {
+      return generic_stub;
+    }
+    Code* default_stub = Code::cast(maybe_default_stub);
+    return GetExternalArrayStubWithoutMapCheck(
+        default_stub->external_array_type());
+  } else if (receiver_map->has_fast_elements()) {
+    bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+    return GetFastElementStubWithoutMapCheck(is_js_array);
+  } else {
+    return generic_stub;
+  }
+}
+
+
+MaybeObject* KeyedIC::ComputeMonomorphicStub(JSObject* receiver,
+                                             bool is_store,
+                                             StrictModeFlag strict_mode,
+                                             Code* generic_stub) {
+  Code* result = NULL;
+  if (receiver->HasExternalArrayElements()) {
+    MaybeObject* maybe_stub =
+        isolate()->stub_cache()->ComputeKeyedLoadOrStoreExternalArray(
+            receiver, is_store, strict_mode);
+    if (!maybe_stub->To(&result)) return maybe_stub;
+  } else if (receiver->map()->has_fast_elements()) {
+    MaybeObject* maybe_stub =
+        isolate()->stub_cache()->ComputeKeyedLoadOrStoreFastElement(
+            receiver, is_store, strict_mode);
+    if (!maybe_stub->To(&result)) return maybe_stub;
+  } else {
+    result = generic_stub;
+  }
+  return result;
+}
+
+
+String* KeyedStoreIC::GetStubNameForCache(IC::State ic_state) {
+  if (ic_state == MONOMORPHIC) {
+    return isolate()->heap()->KeyedStoreSpecializedMonomorphic_symbol();
+  } else {
+    ASSERT(ic_state == MEGAMORPHIC);
+    return isolate()->heap()->KeyedStoreSpecializedPolymorphic_symbol();
+  }
+}
+
+
+MaybeObject* KeyedStoreIC::GetFastElementStubWithoutMapCheck(
+    bool is_js_array) {
+  return KeyedStoreFastElementStub(is_js_array).TryGetCode();
+}
+
+
+MaybeObject* KeyedStoreIC::GetExternalArrayStubWithoutMapCheck(
+    ExternalArrayType array_type) {
+  return KeyedStoreExternalArrayStub(array_type).TryGetCode();
+}
+
+
+MaybeObject* KeyedStoreIC::ConstructMegamorphicStub(
+    MapList* receiver_maps,
+    CodeList* targets,
+    StrictModeFlag strict_mode) {
+  Object* object;
+  KeyedStoreStubCompiler compiler(strict_mode);
+  MaybeObject* maybe_code = compiler.CompileStoreMegamorphic(receiver_maps,
+                                                             targets);
+  if (!maybe_code->ToObject(&object)) return maybe_code;
+  isolate()->counters()->keyed_store_polymorphic_stubs()->Increment();
+  PROFILE(isolate(), CodeCreateEvent(
+      Logger::KEYED_STORE_MEGAMORPHIC_IC_TAG,
+      Code::cast(object), 0));
+  return object;
+}
+
+
 MaybeObject* KeyedStoreIC::Store(State state,
                                  StrictModeFlag strict_mode,
                                  Handle<Object> object,
                                  Handle<Object> key,
-                                 Handle<Object> value) {
+                                 Handle<Object> value,
+                                 bool force_generic) {
   if (key->IsSymbol()) {
     Handle<String> name = Handle<String>::cast(key);
 
@@ -1699,29 +1848,27 @@
   ASSERT(!(use_ic && object->IsJSGlobalProxy()));
 
   if (use_ic) {
-    Code* stub =
-        (strict_mode == kStrictMode) ? generic_stub_strict() : generic_stub();
-    if (state == UNINITIALIZED) {
-      if (object->IsJSObject()) {
-        Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-        if (receiver->HasExternalArrayElements()) {
-          MaybeObject* probe =
-              isolate()->stub_cache()->ComputeKeyedLoadOrStoreExternalArray(
-                  *receiver, true, strict_mode);
-          stub = probe->IsFailure() ?
-              NULL : Code::cast(probe->ToObjectUnchecked());
-        } else if (key->IsSmi() && receiver->map()->has_fast_elements()) {
-          MaybeObject* probe =
-              isolate()->stub_cache()->ComputeKeyedStoreSpecialized(
-                  *receiver, strict_mode);
-          stub = probe->IsFailure() ?
-              NULL : Code::cast(probe->ToObjectUnchecked());
-        }
+    Code* stub = (strict_mode == kStrictMode)
+        ? generic_stub_strict()
+        : generic_stub();
+    if (!force_generic) {
+      if (object->IsJSObject() && key->IsSmi()) {
+        JSObject* receiver = JSObject::cast(*object);
+        MaybeObject* maybe_stub = ComputeStub(receiver,
+                                              true,
+                                              strict_mode,
+                                              stub);
+        stub = maybe_stub->IsFailure() ?
+            NULL : Code::cast(maybe_stub->ToObjectUnchecked());
       }
     }
     if (stub != NULL) set_target(stub);
   }
 
+#ifdef DEBUG
+  TraceIC("KeyedStoreIC", key, state, target());
+#endif
+
   // Set the property.
   return Runtime::SetObjectProperty(
       isolate(), object , key, value, NONE, strict_mode);
@@ -1890,7 +2037,16 @@
   ASSERT(args.length() == 2);
   KeyedLoadIC ic(isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
-  return ic.Load(state, args.at<Object>(0), args.at<Object>(1));
+  return ic.Load(state, args.at<Object>(0), args.at<Object>(1), false);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissForceGeneric) {
+  NoHandleAllocation na;
+  ASSERT(args.length() == 2);
+  KeyedLoadIC ic(isolate);
+  IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
+  return ic.Load(state, args.at<Object>(0), args.at<Object>(1), true);
 }
 
 
@@ -1974,22 +2130,123 @@
                   static_cast<StrictModeFlag>(extra_ic_state & kStrictMode),
                   args.at<Object>(0),
                   args.at<Object>(1),
-                  args.at<Object>(2));
+                  args.at<Object>(2),
+                  false);
 }
 
 
-void TRBinaryOpIC::patch(Code* code) {
+RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) {
+  NoHandleAllocation na;
+  ASSERT(args.length() == 3);
+  KeyedStoreIC ic(isolate);
+  Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
+  Handle<Object> object = args.at<Object>(0);
+  Handle<Object> key = args.at<Object>(1);
+  Handle<Object> value = args.at<Object>(2);
+  StrictModeFlag strict_mode =
+      static_cast<StrictModeFlag>(extra_ic_state & kStrictMode);
+  return Runtime::SetObjectProperty(isolate,
+                                    object,
+                                    key,
+                                    value,
+                                    NONE,
+                                    strict_mode);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissForceGeneric) {
+  NoHandleAllocation na;
+  ASSERT(args.length() == 3);
+  KeyedStoreIC ic(isolate);
+  IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
+  Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
+  return ic.Store(state,
+                  static_cast<StrictModeFlag>(extra_ic_state & kStrictMode),
+                  args.at<Object>(0),
+                  args.at<Object>(1),
+                  args.at<Object>(2),
+                  true);
+}
+
+
+void UnaryOpIC::patch(Code* code) {
   set_target(code);
 }
 
 
-const char* TRBinaryOpIC::GetName(TypeInfo type_info) {
+const char* UnaryOpIC::GetName(TypeInfo type_info) {
+  switch (type_info) {
+    case UNINITIALIZED: return "Uninitialized";
+    case SMI: return "Smi";
+    case HEAP_NUMBER: return "HeapNumbers";
+    case GENERIC: return "Generic";
+    default: return "Invalid";
+  }
+}
+
+
+UnaryOpIC::State UnaryOpIC::ToState(TypeInfo type_info) {
+  switch (type_info) {
+    case UNINITIALIZED:
+      return ::v8::internal::UNINITIALIZED;
+    case SMI:
+    case HEAP_NUMBER:
+      return MONOMORPHIC;
+    case GENERIC:
+      return MEGAMORPHIC;
+  }
+  UNREACHABLE();
+  return ::v8::internal::UNINITIALIZED;
+}
+
+UnaryOpIC::TypeInfo UnaryOpIC::GetTypeInfo(Handle<Object> operand) {
+  ::v8::internal::TypeInfo operand_type =
+      ::v8::internal::TypeInfo::TypeFromValue(operand);
+  if (operand_type.IsSmi()) {
+    return SMI;
+  } else if (operand_type.IsNumber()) {
+    return HEAP_NUMBER;
+  } else {
+    return GENERIC;
+  }
+}
+
+
+UnaryOpIC::TypeInfo UnaryOpIC::ComputeNewType(
+    UnaryOpIC::TypeInfo current_type,
+    UnaryOpIC::TypeInfo previous_type) {
+  switch (previous_type) {
+    case UnaryOpIC::UNINITIALIZED:
+      return current_type;
+    case UnaryOpIC::SMI:
+      return (current_type == UnaryOpIC::GENERIC)
+          ? UnaryOpIC::GENERIC
+          : UnaryOpIC::HEAP_NUMBER;
+    case UnaryOpIC::HEAP_NUMBER:
+      return UnaryOpIC::GENERIC;
+    case UnaryOpIC::GENERIC:
+      // We should never do patching if we are in GENERIC state.
+      UNREACHABLE();
+      return UnaryOpIC::GENERIC;
+  }
+  UNREACHABLE();
+  return UnaryOpIC::GENERIC;
+}
+
+
+void BinaryOpIC::patch(Code* code) {
+  set_target(code);
+}
+
+
+const char* BinaryOpIC::GetName(TypeInfo type_info) {
   switch (type_info) {
     case UNINITIALIZED: return "Uninitialized";
     case SMI: return "SMI";
     case INT32: return "Int32s";
     case HEAP_NUMBER: return "HeapNumbers";
     case ODDBALL: return "Oddball";
+    case BOTH_STRING: return "BothStrings";
     case STRING: return "Strings";
     case GENERIC: return "Generic";
     default: return "Invalid";
@@ -1997,7 +2254,7 @@
 }
 
 
-TRBinaryOpIC::State TRBinaryOpIC::ToState(TypeInfo type_info) {
+BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) {
   switch (type_info) {
     case UNINITIALIZED:
       return ::v8::internal::UNINITIALIZED;
@@ -2005,6 +2262,7 @@
     case INT32:
     case HEAP_NUMBER:
     case ODDBALL:
+    case BOTH_STRING:
     case STRING:
       return MONOMORPHIC;
     case GENERIC:
@@ -2015,18 +2273,23 @@
 }
 
 
-TRBinaryOpIC::TypeInfo TRBinaryOpIC::JoinTypes(TRBinaryOpIC::TypeInfo x,
-                                               TRBinaryOpIC::TypeInfo y) {
+BinaryOpIC::TypeInfo BinaryOpIC::JoinTypes(BinaryOpIC::TypeInfo x,
+                                           BinaryOpIC::TypeInfo y) {
   if (x == UNINITIALIZED) return y;
   if (y == UNINITIALIZED) return x;
-  if (x == STRING && y == STRING) return STRING;
-  if (x == STRING || y == STRING) return GENERIC;
-  if (x >= y) return x;
+  if (x == y) return x;
+  if (x == BOTH_STRING && y == STRING) return STRING;
+  if (x == STRING && y == BOTH_STRING) return STRING;
+  if (x == STRING || x == BOTH_STRING || y == STRING || y == BOTH_STRING) {
+    return GENERIC;
+  }
+  if (x > y) return x;
   return y;
 }
 
-TRBinaryOpIC::TypeInfo TRBinaryOpIC::GetTypeInfo(Handle<Object> left,
-                                                 Handle<Object> right) {
+
+BinaryOpIC::TypeInfo BinaryOpIC::GetTypeInfo(Handle<Object> left,
+                                             Handle<Object> right) {
   ::v8::internal::TypeInfo left_type =
       ::v8::internal::TypeInfo::TypeFromValue(left);
   ::v8::internal::TypeInfo right_type =
@@ -2046,9 +2309,11 @@
     return HEAP_NUMBER;
   }
 
-  if (left_type.IsString() || right_type.IsString()) {
-    // Patching for fast string ADD makes sense even if only one of the
-    // arguments is a string.
+  // Patching for fast string ADD makes sense even if only one of the
+  // arguments is a string.
+  if (left_type.IsString())  {
+    return right_type.IsString() ? BOTH_STRING : STRING;
+  } else if (right_type.IsString()) {
     return STRING;
   }
 
@@ -2062,12 +2327,67 @@
 
 // defined in code-stubs-<arch>.cc
 // Only needed to remove dependency of ic.cc on code-stubs-<arch>.h.
-Handle<Code> GetTypeRecordingBinaryOpStub(int key,
-                                          TRBinaryOpIC::TypeInfo type_info,
-                                          TRBinaryOpIC::TypeInfo result_type);
+Handle<Code> GetUnaryOpStub(int key, UnaryOpIC::TypeInfo type_info);
 
 
-RUNTIME_FUNCTION(MaybeObject*, TypeRecordingBinaryOp_Patch) {
+RUNTIME_FUNCTION(MaybeObject*, UnaryOp_Patch) {
+  ASSERT(args.length() == 4);
+
+  HandleScope scope(isolate);
+  Handle<Object> operand = args.at<Object>(0);
+  int key = Smi::cast(args[1])->value();
+  Token::Value op = static_cast<Token::Value>(Smi::cast(args[2])->value());
+  UnaryOpIC::TypeInfo previous_type =
+      static_cast<UnaryOpIC::TypeInfo>(Smi::cast(args[3])->value());
+
+  UnaryOpIC::TypeInfo type = UnaryOpIC::GetTypeInfo(operand);
+  type = UnaryOpIC::ComputeNewType(type, previous_type);
+
+  Handle<Code> code = GetUnaryOpStub(key, type);
+  if (!code.is_null()) {
+    if (FLAG_trace_ic) {
+      PrintF("[UnaryOpIC (%s->%s)#%s]\n",
+             UnaryOpIC::GetName(previous_type),
+             UnaryOpIC::GetName(type),
+             Token::Name(op));
+    }
+    UnaryOpIC ic(isolate);
+    ic.patch(*code);
+  }
+
+  Handle<JSBuiltinsObject> builtins = Handle<JSBuiltinsObject>(
+      isolate->thread_local_top()->context_->builtins(), isolate);
+  Object* builtin = NULL;  // Initialization calms down the compiler.
+  switch (op) {
+    case Token::SUB:
+      builtin = builtins->javascript_builtin(Builtins::UNARY_MINUS);
+      break;
+    case Token::BIT_NOT:
+      builtin = builtins->javascript_builtin(Builtins::BIT_NOT);
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  Handle<JSFunction> builtin_function(JSFunction::cast(builtin), isolate);
+
+  bool caught_exception;
+  Handle<Object> result = Execution::Call(builtin_function, operand, 0, NULL,
+                                          &caught_exception);
+  if (caught_exception) {
+    return Failure::Exception();
+  }
+  return *result;
+}
+
+// defined in code-stubs-<arch>.cc
+// Only needed to remove dependency of ic.cc on code-stubs-<arch>.h.
+Handle<Code> GetBinaryOpStub(int key,
+                             BinaryOpIC::TypeInfo type_info,
+                             BinaryOpIC::TypeInfo result_type);
+
+
+RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
   ASSERT(args.length() == 5);
 
   HandleScope scope(isolate);
@@ -2075,48 +2395,50 @@
   Handle<Object> right = args.at<Object>(1);
   int key = Smi::cast(args[2])->value();
   Token::Value op = static_cast<Token::Value>(Smi::cast(args[3])->value());
-  TRBinaryOpIC::TypeInfo previous_type =
-      static_cast<TRBinaryOpIC::TypeInfo>(Smi::cast(args[4])->value());
+  BinaryOpIC::TypeInfo previous_type =
+      static_cast<BinaryOpIC::TypeInfo>(Smi::cast(args[4])->value());
 
-  TRBinaryOpIC::TypeInfo type = TRBinaryOpIC::GetTypeInfo(left, right);
-  type = TRBinaryOpIC::JoinTypes(type, previous_type);
-  TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED;
-  if (type == TRBinaryOpIC::STRING && op != Token::ADD) {
-    type = TRBinaryOpIC::GENERIC;
+  BinaryOpIC::TypeInfo type = BinaryOpIC::GetTypeInfo(left, right);
+  type = BinaryOpIC::JoinTypes(type, previous_type);
+  BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED;
+  if ((type == BinaryOpIC::STRING || type == BinaryOpIC::BOTH_STRING) &&
+      op != Token::ADD) {
+    type = BinaryOpIC::GENERIC;
   }
-  if (type == TRBinaryOpIC::SMI &&
-      previous_type == TRBinaryOpIC::SMI) {
-    if (op == Token::DIV || op == Token::MUL || kSmiValueSize == 32) {
+  if (type == BinaryOpIC::SMI && previous_type == BinaryOpIC::SMI) {
+    if (op == Token::DIV ||
+        op == Token::MUL ||
+        op == Token::SHR ||
+        kSmiValueSize == 32) {
       // Arithmetic on two Smi inputs has yielded a heap number.
       // That is the only way to get here from the Smi stub.
       // With 32-bit Smis, all overflows give heap numbers, but with
       // 31-bit Smis, most operations overflow to int32 results.
-      result_type = TRBinaryOpIC::HEAP_NUMBER;
+      result_type = BinaryOpIC::HEAP_NUMBER;
     } else {
       // Other operations on SMIs that overflow yield int32s.
-      result_type = TRBinaryOpIC::INT32;
+      result_type = BinaryOpIC::INT32;
     }
   }
-  if (type == TRBinaryOpIC::INT32 &&
-      previous_type == TRBinaryOpIC::INT32) {
+  if (type == BinaryOpIC::INT32 && previous_type == BinaryOpIC::INT32) {
     // We must be here because an operation on two INT32 types overflowed.
-    result_type = TRBinaryOpIC::HEAP_NUMBER;
+    result_type = BinaryOpIC::HEAP_NUMBER;
   }
 
-  Handle<Code> code = GetTypeRecordingBinaryOpStub(key, type, result_type);
+  Handle<Code> code = GetBinaryOpStub(key, type, result_type);
   if (!code.is_null()) {
     if (FLAG_trace_ic) {
-      PrintF("[TypeRecordingBinaryOpIC (%s->(%s->%s))#%s]\n",
-             TRBinaryOpIC::GetName(previous_type),
-             TRBinaryOpIC::GetName(type),
-             TRBinaryOpIC::GetName(result_type),
+      PrintF("[BinaryOpIC (%s->(%s->%s))#%s]\n",
+             BinaryOpIC::GetName(previous_type),
+             BinaryOpIC::GetName(type),
+             BinaryOpIC::GetName(result_type),
              Token::Name(op));
     }
-    TRBinaryOpIC ic(isolate);
+    BinaryOpIC ic(isolate);
     ic.patch(*code);
 
     // Activate inlined smi code.
-    if (previous_type == TRBinaryOpIC::UNINITIALIZED) {
+    if (previous_type == BinaryOpIC::UNINITIALIZED) {
       PatchInlinedSmiCode(ic.address());
     }
   }
@@ -2198,6 +2520,8 @@
     case SMIS: return "SMIS";
     case HEAP_NUMBERS: return "HEAP_NUMBERS";
     case OBJECTS: return "OBJECTS";
+    case SYMBOLS: return "SYMBOLS";
+    case STRINGS: return "STRINGS";
     case GENERIC: return "GENERIC";
     default:
       UNREACHABLE();
@@ -2210,12 +2534,18 @@
                                         bool has_inlined_smi_code,
                                         Handle<Object> x,
                                         Handle<Object> y) {
-  if (!has_inlined_smi_code && state != UNINITIALIZED) return GENERIC;
+  if (!has_inlined_smi_code && state != UNINITIALIZED && state != SYMBOLS) {
+    return GENERIC;
+  }
   if (state == UNINITIALIZED && x->IsSmi() && y->IsSmi()) return SMIS;
   if ((state == UNINITIALIZED || (state == SMIS && has_inlined_smi_code)) &&
       x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
   if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return GENERIC;
   if (state == UNINITIALIZED &&
+      x->IsSymbol() && y->IsSymbol()) return SYMBOLS;
+  if ((state == UNINITIALIZED || state == SYMBOLS) &&
+      x->IsString() && y->IsString()) return STRINGS;
+  if (state == UNINITIALIZED &&
       x->IsJSObject() && y->IsJSObject()) return OBJECTS;
   return GENERIC;
 }
diff --git a/src/ic.h b/src/ic.h
index 911cbd8..25d0986 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,6 +29,7 @@
 #define V8_IC_H_
 
 #include "macro-assembler.h"
+#include "type-info.h"
 
 namespace v8 {
 namespace internal {
@@ -39,12 +40,15 @@
 #define IC_UTIL_LIST(ICU)                             \
   ICU(LoadIC_Miss)                                    \
   ICU(KeyedLoadIC_Miss)                               \
+  ICU(KeyedLoadIC_MissForceGeneric)                   \
   ICU(CallIC_Miss)                                    \
   ICU(KeyedCallIC_Miss)                               \
   ICU(StoreIC_Miss)                                   \
   ICU(StoreIC_ArrayLength)                            \
   ICU(SharedStoreIC_ExtendStorage)                    \
   ICU(KeyedStoreIC_Miss)                              \
+  ICU(KeyedStoreIC_MissForceGeneric)                  \
+  ICU(KeyedStoreIC_Slow)                              \
   /* Utilities for IC stubs. */                       \
   ICU(LoadCallbackProperty)                           \
   ICU(StoreCallbackProperty)                          \
@@ -53,7 +57,8 @@
   ICU(LoadPropertyWithInterceptorForCall)             \
   ICU(KeyedLoadPropertyWithInterceptor)               \
   ICU(StoreInterceptorProperty)                       \
-  ICU(TypeRecordingBinaryOp_Patch)                    \
+  ICU(UnaryOp_Patch)                                  \
+  ICU(BinaryOp_Patch)                                 \
   ICU(CompareIC_Miss)
 //
 // IC is the base class for LoadIC, StoreIC, CallIC, KeyedLoadIC,
@@ -141,11 +146,11 @@
   void set_target(Code* code) { SetTargetAtAddress(address(), code); }
 
 #ifdef DEBUG
-  static void TraceIC(const char* type,
-                      Handle<Object> name,
-                      State old_state,
-                      Code* new_target,
-                      const char* extra_info = "");
+  void TraceIC(const char* type,
+               Handle<Object> name,
+               State old_state,
+               Code* new_target,
+               const char* extra_info = "");
 #endif
 
   Failure* TypeError(const char* type,
@@ -190,6 +195,10 @@
 
 
 class CallICBase: public IC {
+ public:
+  class Contextual: public BitField<bool, 0, 1> {};
+  class StringStubState: public BitField<StringStubFeedback, 1, 1> {};
+
  protected:
   CallICBase(Code::Kind kind, Isolate* isolate)
       : IC(EXTRA_CALL_FRAME, isolate), kind_(kind) {}
@@ -230,6 +239,7 @@
   void ReceiverToObjectIfRequired(Handle<Object> callee, Handle<Object> object);
 
   static void Clear(Address address, Code* target);
+
   friend class IC;
 };
 
@@ -241,11 +251,17 @@
   }
 
   // Code generator routines.
-  static void GenerateInitialize(MacroAssembler* masm, int argc) {
-    GenerateMiss(masm, argc);
+  static void GenerateInitialize(MacroAssembler* masm,
+                                 int argc,
+                                 Code::ExtraICState extra_ic_state) {
+    GenerateMiss(masm, argc, extra_ic_state);
   }
-  static void GenerateMiss(MacroAssembler* masm, int argc);
-  static void GenerateMegamorphic(MacroAssembler* masm, int argc);
+  static void GenerateMiss(MacroAssembler* masm,
+                           int argc,
+                           Code::ExtraICState extra_ic_state);
+  static void GenerateMegamorphic(MacroAssembler* masm,
+                                  int argc,
+                                  Code::ExtraICState extra_ic_state);
   static void GenerateNormal(MacroAssembler* masm, int argc);
 };
 
@@ -296,14 +312,6 @@
                                    bool support_wrappers);
   static void GenerateFunctionPrototype(MacroAssembler* masm);
 
-  // Clear the use of the inlined version.
-  static void ClearInlinedVersion(Address address);
-
-  // The offset from the inlined patch site to the start of the
-  // inlined load instruction.  It is architecture-dependent, and not
-  // used on ARM.
-  static const int kOffsetToLoadInstruction;
-
  private:
   // Update the inline cache and the global stub cache based on the
   // lookup result.
@@ -328,42 +336,82 @@
 
   static void Clear(Address address, Code* target);
 
-  static bool PatchInlinedLoad(Address address, Object* map, int index);
-
-  static bool PatchInlinedContextualLoad(Address address,
-                                         Object* map,
-                                         Object* cell,
-                                         bool is_dont_delete);
-
   friend class IC;
 };
 
 
-class KeyedLoadIC: public IC {
+class KeyedIC: public IC {
  public:
-  explicit KeyedLoadIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {
+  explicit KeyedIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {}
+  virtual ~KeyedIC() {}
+
+  static const int kMaxKeyedPolymorphism = 4;
+
+  virtual MaybeObject* GetFastElementStubWithoutMapCheck(
+      bool is_js_array) = 0;
+
+  virtual MaybeObject* GetExternalArrayStubWithoutMapCheck(
+      ExternalArrayType array_type) = 0;
+
+ protected:
+  virtual Code* string_stub() {
+    return NULL;
+  }
+
+  virtual Code::Kind kind() const = 0;
+
+  virtual String* GetStubNameForCache(IC::State ic_state) = 0;
+
+  MaybeObject* ComputeStub(JSObject* receiver,
+                           bool is_store,
+                           StrictModeFlag strict_mode,
+                           Code* default_stub);
+
+  virtual MaybeObject* ConstructMegamorphicStub(
+      MapList* receiver_maps,
+      CodeList* targets,
+      StrictModeFlag strict_mode) = 0;
+
+ private:
+  void GetReceiverMapsForStub(Code* stub, MapList* result);
+
+  MaybeObject* ComputeMonomorphicStubWithoutMapCheck(
+      Map* receiver_map,
+      StrictModeFlag strict_mode,
+      Code* generic_stub);
+
+  MaybeObject* ComputeMonomorphicStub(JSObject* receiver,
+                                      bool is_store,
+                                      StrictModeFlag strict_mode,
+                                      Code* default_stub);
+};
+
+
+class KeyedLoadIC: public KeyedIC {
+ public:
+  explicit KeyedLoadIC(Isolate* isolate) : KeyedIC(isolate) {
     ASSERT(target()->is_keyed_load_stub());
   }
 
   MUST_USE_RESULT MaybeObject* Load(State state,
                                     Handle<Object> object,
-                                    Handle<Object> key);
+                                    Handle<Object> key,
+                                    bool force_generic_stub);
 
   // Code generator routines.
-  static void GenerateMiss(MacroAssembler* masm);
+  static void GenerateMiss(MacroAssembler* masm, bool force_generic);
   static void GenerateRuntimeGetProperty(MacroAssembler* masm);
-  static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
+  static void GenerateInitialize(MacroAssembler* masm) {
+    GenerateMiss(masm, false);
+  }
   static void GeneratePreMonomorphic(MacroAssembler* masm) {
-    GenerateMiss(masm);
+    GenerateMiss(masm, false);
   }
   static void GenerateGeneric(MacroAssembler* masm);
   static void GenerateString(MacroAssembler* masm);
 
   static void GenerateIndexedInterceptor(MacroAssembler* masm);
 
-  // Clear the use of the inlined version.
-  static void ClearInlinedVersion(Address address);
-
   // Bit mask to be tested against bit field for the cases when
   // generic stub should go into slow case.
   // Access check is necessary explicitly since generic stub does not perform
@@ -371,6 +419,27 @@
   static const int kSlowCaseBitFieldMask =
       (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
 
+  virtual MaybeObject* GetFastElementStubWithoutMapCheck(
+      bool is_js_array);
+
+  virtual MaybeObject* GetExternalArrayStubWithoutMapCheck(
+      ExternalArrayType array_type);
+
+ protected:
+  virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; }
+
+  virtual String* GetStubNameForCache(IC::State ic_state);
+
+  virtual MaybeObject* ConstructMegamorphicStub(
+      MapList* receiver_maps,
+      CodeList* targets,
+      StrictModeFlag strict_mode);
+
+  virtual Code* string_stub() {
+    return isolate()->builtins()->builtin(
+        Builtins::kKeyedLoadIC_String);
+  }
+
  private:
   // Update the inline cache.
   void UpdateCaches(LookupResult* lookup,
@@ -395,11 +464,6 @@
     return isolate()->builtins()->builtin(
         Builtins::kKeyedLoadIC_PreMonomorphic);
   }
-  Code* string_stub() {
-    return isolate()->builtins()->builtin(
-        Builtins::kKeyedLoadIC_String);
-  }
-
   Code* indexed_interceptor_stub() {
     return isolate()->builtins()->builtin(
         Builtins::kKeyedLoadIC_IndexedInterceptor);
@@ -407,10 +471,6 @@
 
   static void Clear(Address address, Code* target);
 
-  // Support for patching the map that is checked in an inlined
-  // version of keyed load.
-  static bool PatchInlinedLoad(Address address, Object* map);
-
   friend class IC;
 };
 
@@ -437,13 +497,6 @@
   static void GenerateGlobalProxy(MacroAssembler* masm,
                                   StrictModeFlag strict_mode);
 
-  // Clear the use of an inlined version.
-  static void ClearInlinedVersion(Address address);
-
-  // The offset from the inlined patch site to the start of the
-  // inlined store instruction.
-  static const int kOffsetToStoreInstruction;
-
  private:
   // Update the inline cache and the global stub cache based on the
   // lookup result.
@@ -489,38 +542,50 @@
 
   static void Clear(Address address, Code* target);
 
-  // Support for patching the index and the map that is checked in an
-  // inlined version of the named store.
-  static bool PatchInlinedStore(Address address, Object* map, int index);
-
   friend class IC;
 };
 
 
-class KeyedStoreIC: public IC {
+class KeyedStoreIC: public KeyedIC {
  public:
-  explicit KeyedStoreIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
+  explicit KeyedStoreIC(Isolate* isolate) : KeyedIC(isolate) {
+    ASSERT(target()->is_keyed_store_stub());
+  }
 
   MUST_USE_RESULT MaybeObject* Store(State state,
-                                     StrictModeFlag strict_mode,
+                                   StrictModeFlag strict_mode,
                                      Handle<Object> object,
                                      Handle<Object> name,
-                                     Handle<Object> value);
+                                     Handle<Object> value,
+                                     bool force_generic);
 
   // Code generators for stub routines.  Only called once at startup.
-  static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
-  static void GenerateMiss(MacroAssembler* masm);
+  static void GenerateInitialize(MacroAssembler* masm) {
+    GenerateMiss(masm, false);
+  }
+  static void GenerateMiss(MacroAssembler* masm, bool force_generic);
+  static void GenerateSlow(MacroAssembler* masm);
   static void GenerateRuntimeSetProperty(MacroAssembler* masm,
                                          StrictModeFlag strict_mode);
   static void GenerateGeneric(MacroAssembler* masm, StrictModeFlag strict_mode);
 
-  // Clear the inlined version so the IC is always hit.
-  static void ClearInlinedVersion(Address address);
+  virtual MaybeObject* GetFastElementStubWithoutMapCheck(
+      bool is_js_array);
 
-  // Restore the inlined version so the fast case can get hit.
-  static void RestoreInlinedVersion(Address address);
+  virtual MaybeObject* GetExternalArrayStubWithoutMapCheck(
+      ExternalArrayType array_type);
 
- private:
+ protected:
+  virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
+
+  virtual String* GetStubNameForCache(IC::State ic_state);
+
+  virtual MaybeObject* ConstructMegamorphicStub(
+      MapList* receiver_maps,
+      CodeList* targets,
+      StrictModeFlag strict_mode);
+
+  private:
   // Update the inline cache.
   void UpdateCaches(LookupResult* lookup,
                     State state,
@@ -564,20 +629,38 @@
 
   static void Clear(Address address, Code* target);
 
-  // Support for patching the map that is checked in an inlined
-  // version of keyed store.
-  // The address is the patch point for the IC call
-  // (Assembler::kCallTargetAddressOffset before the end of
-  // the call/return address).
-  // The map is the new map that the inlined code should check against.
-  static bool PatchInlinedStore(Address address, Object* map);
-
   friend class IC;
 };
 
 
+class UnaryOpIC: public IC {
+ public:
+
+  // sorted: increasingly more unspecific (ignoring UNINITIALIZED)
+  // TODO(svenpanne) Using enums+switch is an antipattern, use a class instead.
+  enum TypeInfo {
+    UNINITIALIZED,
+    SMI,
+    HEAP_NUMBER,
+    GENERIC
+  };
+
+  explicit UnaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
+
+  void patch(Code* code);
+
+  static const char* GetName(TypeInfo type_info);
+
+  static State ToState(TypeInfo type_info);
+
+  static TypeInfo GetTypeInfo(Handle<Object> operand);
+
+  static TypeInfo ComputeNewType(TypeInfo type, TypeInfo previous);
+};
+
+
 // Type Recording BinaryOpIC, that records the types of the inputs and outputs.
-class TRBinaryOpIC: public IC {
+class BinaryOpIC: public IC {
  public:
 
   enum TypeInfo {
@@ -586,11 +669,12 @@
     INT32,
     HEAP_NUMBER,
     ODDBALL,
+    BOTH_STRING,  // Only used for addition operation.
     STRING,  // Only used for addition operation.  At least one string operand.
     GENERIC
   };
 
-  explicit TRBinaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
+  explicit BinaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
 
   void patch(Code* code);
 
@@ -610,6 +694,8 @@
     UNINITIALIZED,
     SMIS,
     HEAP_NUMBERS,
+    SYMBOLS,
+    STRINGS,
     OBJECTS,
     GENERIC
   };
@@ -642,7 +728,7 @@
   Token::Value op_;
 };
 
-// Helper for TRBinaryOpIC and CompareIC.
+// Helper for BinaryOpIC and CompareIC.
 void PatchInlinedSmiCode(Address address);
 
 } }  // namespace v8::internal
diff --git a/src/frame-element.cc b/src/isolate-inl.h
similarity index 82%
copy from src/frame-element.cc
copy to src/isolate-inl.h
index f629900..aa6b537 100644
--- a/src/frame-element.cc
+++ b/src/isolate-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,26 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "v8.h"
+#ifndef V8_ISOLATE_INL_H_
+#define V8_ISOLATE_INL_H_
 
-#include "frame-element.h"
-#include "zone-inl.h"
+#include "isolate.h"
+
+#include "debug.h"
 
 namespace v8 {
 namespace internal {
 
 
+bool Isolate::DebuggerHasBreakPoints() {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  return debug()->has_break_points();
+#else
+  return false;
+#endif
+}
+
+
 } }  // namespace v8::internal
+
+#endif  // V8_ISOLATE_INL_H_
diff --git a/src/isolate.cc b/src/isolate.cc
index 5b3438f..a7bf7d9 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -40,6 +40,7 @@
 #include "isolate.h"
 #include "lithium-allocator.h"
 #include "log.h"
+#include "messages.h"
 #include "regexp-stack.h"
 #include "runtime-profiler.h"
 #include "scanner.h"
@@ -49,6 +50,7 @@
 #include "spaces.h"
 #include "stub-cache.h"
 #include "version.h"
+#include "vm-state-inl.h"
 
 
 namespace v8 {
@@ -61,6 +63,7 @@
   return new_id;
 }
 
+
 int ThreadId::GetCurrentThreadId() {
   int thread_id = Thread::GetThreadLocalInt(Isolate::thread_id_key_);
   if (thread_id == 0) {
@@ -71,6 +74,52 @@
 }
 
 
+ThreadLocalTop::ThreadLocalTop() {
+  InitializeInternal();
+}
+
+
+void ThreadLocalTop::InitializeInternal() {
+  c_entry_fp_ = 0;
+  handler_ = 0;
+#ifdef USE_SIMULATOR
+  simulator_ = NULL;
+#endif
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  js_entry_sp_ = NULL;
+  external_callback_ = NULL;
+#endif
+#ifdef ENABLE_VMSTATE_TRACKING
+  current_vm_state_ = EXTERNAL;
+#endif
+  try_catch_handler_address_ = NULL;
+  context_ = NULL;
+  thread_id_ = ThreadId::Invalid();
+  external_caught_exception_ = false;
+  failed_access_check_callback_ = NULL;
+  save_context_ = NULL;
+  catcher_ = NULL;
+}
+
+
+void ThreadLocalTop::Initialize() {
+  InitializeInternal();
+#ifdef USE_SIMULATOR
+#ifdef V8_TARGET_ARCH_ARM
+  simulator_ = Simulator::current(isolate_);
+#elif V8_TARGET_ARCH_MIPS
+  simulator_ = Simulator::current(isolate_);
+#endif
+#endif
+  thread_id_ = ThreadId::Current();
+}
+
+
+v8::TryCatch* ThreadLocalTop::TryCatchHandler() {
+  return TRY_CATCH_FROM_ADDRESS(try_catch_handler_address());
+}
+
+
 // Create a dummy thread that will wait forever on a semaphore. The only
 // purpose for this thread is to have some stack area to save essential data
 // into for use by a stacks only core dump (aka minidump).
@@ -312,6 +361,17 @@
 }
 
 
+Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThisThread() {
+  ThreadId thread_id = ThreadId::Current();
+  PerIsolateThreadData* per_thread = NULL;
+  {
+    ScopedLock lock(process_wide_mutex_);
+    per_thread = thread_data_table_->Lookup(this, thread_id);
+  }
+  return per_thread;
+}
+
+
 void Isolate::EnsureDefaultIsolate() {
   ScopedLock lock(process_wide_mutex_);
   if (default_isolate_ == NULL) {
@@ -323,14 +383,19 @@
   }
   // Can't use SetIsolateThreadLocals(default_isolate_, NULL) here
   // becase a non-null thread data may be already set.
-  Thread::SetThreadLocal(isolate_key_, default_isolate_);
+  if (Thread::GetThreadLocal(isolate_key_) == NULL) {
+    Thread::SetThreadLocal(isolate_key_, default_isolate_);
+  }
+  CHECK(default_isolate_->PreInit());
 }
 
 
+#ifdef ENABLE_DEBUGGER_SUPPORT
 Debugger* Isolate::GetDefaultIsolateDebugger() {
   EnsureDefaultIsolate();
   return default_isolate_->debugger();
 }
+#endif
 
 
 StackGuard* Isolate::GetDefaultIsolateStackGuard() {
@@ -357,6 +422,892 @@
 }
 
 
+Address Isolate::get_address_from_id(Isolate::AddressId id) {
+  return isolate_addresses_[id];
+}
+
+
+char* Isolate::Iterate(ObjectVisitor* v, char* thread_storage) {
+  ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(thread_storage);
+  Iterate(v, thread);
+  return thread_storage + sizeof(ThreadLocalTop);
+}
+
+
+void Isolate::IterateThread(ThreadVisitor* v) {
+  v->VisitThread(this, thread_local_top());
+}
+
+
+void Isolate::IterateThread(ThreadVisitor* v, char* t) {
+  ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(t);
+  v->VisitThread(this, thread);
+}
+
+
+void Isolate::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
+  // Visit the roots from the top for a given thread.
+  Object* pending;
+  // The pending exception can sometimes be a failure.  We can't show
+  // that to the GC, which only understands objects.
+  if (thread->pending_exception_->ToObject(&pending)) {
+    v->VisitPointer(&pending);
+    thread->pending_exception_ = pending;  // In case GC updated it.
+  }
+  v->VisitPointer(&(thread->pending_message_obj_));
+  v->VisitPointer(BitCast<Object**>(&(thread->pending_message_script_)));
+  v->VisitPointer(BitCast<Object**>(&(thread->context_)));
+  Object* scheduled;
+  if (thread->scheduled_exception_->ToObject(&scheduled)) {
+    v->VisitPointer(&scheduled);
+    thread->scheduled_exception_ = scheduled;
+  }
+
+  for (v8::TryCatch* block = thread->TryCatchHandler();
+       block != NULL;
+       block = TRY_CATCH_FROM_ADDRESS(block->next_)) {
+    v->VisitPointer(BitCast<Object**>(&(block->exception_)));
+    v->VisitPointer(BitCast<Object**>(&(block->message_)));
+  }
+
+  // Iterate over pointers on native execution stack.
+  for (StackFrameIterator it(this, thread); !it.done(); it.Advance()) {
+    it.frame()->Iterate(v);
+  }
+}
+
+
+void Isolate::Iterate(ObjectVisitor* v) {
+  ThreadLocalTop* current_t = thread_local_top();
+  Iterate(v, current_t);
+}
+
+
+void Isolate::RegisterTryCatchHandler(v8::TryCatch* that) {
+  // The ARM simulator has a separate JS stack.  We therefore register
+  // the C++ try catch handler with the simulator and get back an
+  // address that can be used for comparisons with addresses into the
+  // JS stack.  When running without the simulator, the address
+  // returned will be the address of the C++ try catch handler itself.
+  Address address = reinterpret_cast<Address>(
+      SimulatorStack::RegisterCTryCatch(reinterpret_cast<uintptr_t>(that)));
+  thread_local_top()->set_try_catch_handler_address(address);
+}
+
+
+void Isolate::UnregisterTryCatchHandler(v8::TryCatch* that) {
+  ASSERT(thread_local_top()->TryCatchHandler() == that);
+  thread_local_top()->set_try_catch_handler_address(
+      reinterpret_cast<Address>(that->next_));
+  thread_local_top()->catcher_ = NULL;
+  SimulatorStack::UnregisterCTryCatch();
+}
+
+
+Handle<String> Isolate::StackTraceString() {
+  if (stack_trace_nesting_level_ == 0) {
+    stack_trace_nesting_level_++;
+    HeapStringAllocator allocator;
+    StringStream::ClearMentionedObjectCache();
+    StringStream accumulator(&allocator);
+    incomplete_message_ = &accumulator;
+    PrintStack(&accumulator);
+    Handle<String> stack_trace = accumulator.ToString();
+    incomplete_message_ = NULL;
+    stack_trace_nesting_level_ = 0;
+    return stack_trace;
+  } else if (stack_trace_nesting_level_ == 1) {
+    stack_trace_nesting_level_++;
+    OS::PrintError(
+      "\n\nAttempt to print stack while printing stack (double fault)\n");
+    OS::PrintError(
+      "If you are lucky you may find a partial stack dump on stdout.\n\n");
+    incomplete_message_->OutputToStdOut();
+    return factory()->empty_symbol();
+  } else {
+    OS::Abort();
+    // Unreachable
+    return factory()->empty_symbol();
+  }
+}
+
+
+Handle<JSArray> Isolate::CaptureCurrentStackTrace(
+    int frame_limit, StackTrace::StackTraceOptions options) {
+  // Ensure no negative values.
+  int limit = Max(frame_limit, 0);
+  Handle<JSArray> stack_trace = factory()->NewJSArray(frame_limit);
+
+  Handle<String> column_key = factory()->LookupAsciiSymbol("column");
+  Handle<String> line_key = factory()->LookupAsciiSymbol("lineNumber");
+  Handle<String> script_key = factory()->LookupAsciiSymbol("scriptName");
+  Handle<String> name_or_source_url_key =
+      factory()->LookupAsciiSymbol("nameOrSourceURL");
+  Handle<String> script_name_or_source_url_key =
+      factory()->LookupAsciiSymbol("scriptNameOrSourceURL");
+  Handle<String> function_key = factory()->LookupAsciiSymbol("functionName");
+  Handle<String> eval_key = factory()->LookupAsciiSymbol("isEval");
+  Handle<String> constructor_key =
+      factory()->LookupAsciiSymbol("isConstructor");
+
+  StackTraceFrameIterator it(this);
+  int frames_seen = 0;
+  while (!it.done() && (frames_seen < limit)) {
+    JavaScriptFrame* frame = it.frame();
+    // Set initial size to the maximum inlining level + 1 for the outermost
+    // function.
+    List<FrameSummary> frames(Compiler::kMaxInliningLevels + 1);
+    frame->Summarize(&frames);
+    for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
+      // Create a JSObject to hold the information for the StackFrame.
+      Handle<JSObject> stackFrame = factory()->NewJSObject(object_function());
+
+      Handle<JSFunction> fun = frames[i].function();
+      Handle<Script> script(Script::cast(fun->shared()->script()));
+
+      if (options & StackTrace::kLineNumber) {
+        int script_line_offset = script->line_offset()->value();
+        int position = frames[i].code()->SourcePosition(frames[i].pc());
+        int line_number = GetScriptLineNumber(script, position);
+        // line_number is already shifted by the script_line_offset.
+        int relative_line_number = line_number - script_line_offset;
+        if (options & StackTrace::kColumnOffset && relative_line_number >= 0) {
+          Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
+          int start = (relative_line_number == 0) ? 0 :
+              Smi::cast(line_ends->get(relative_line_number - 1))->value() + 1;
+          int column_offset = position - start;
+          if (relative_line_number == 0) {
+            // For the case where the code is on the same line as the script
+            // tag.
+            column_offset += script->column_offset()->value();
+          }
+          SetLocalPropertyNoThrow(stackFrame, column_key,
+                                  Handle<Smi>(Smi::FromInt(column_offset + 1)));
+        }
+        SetLocalPropertyNoThrow(stackFrame, line_key,
+                                Handle<Smi>(Smi::FromInt(line_number + 1)));
+      }
+
+      if (options & StackTrace::kScriptName) {
+        Handle<Object> script_name(script->name(), this);
+        SetLocalPropertyNoThrow(stackFrame, script_key, script_name);
+      }
+
+      if (options & StackTrace::kScriptNameOrSourceURL) {
+        Handle<Object> script_name(script->name(), this);
+        Handle<JSValue> script_wrapper = GetScriptWrapper(script);
+        Handle<Object> property = GetProperty(script_wrapper,
+                                              name_or_source_url_key);
+        ASSERT(property->IsJSFunction());
+        Handle<JSFunction> method = Handle<JSFunction>::cast(property);
+        bool caught_exception;
+        Handle<Object> result = Execution::TryCall(method, script_wrapper, 0,
+                                                   NULL, &caught_exception);
+        if (caught_exception) {
+          result = factory()->undefined_value();
+        }
+        SetLocalPropertyNoThrow(stackFrame, script_name_or_source_url_key,
+                                result);
+      }
+
+      if (options & StackTrace::kFunctionName) {
+        Handle<Object> fun_name(fun->shared()->name(), this);
+        if (fun_name->ToBoolean()->IsFalse()) {
+          fun_name = Handle<Object>(fun->shared()->inferred_name(), this);
+        }
+        SetLocalPropertyNoThrow(stackFrame, function_key, fun_name);
+      }
+
+      if (options & StackTrace::kIsEval) {
+        int type = Smi::cast(script->compilation_type())->value();
+        Handle<Object> is_eval = (type == Script::COMPILATION_TYPE_EVAL) ?
+            factory()->true_value() : factory()->false_value();
+        SetLocalPropertyNoThrow(stackFrame, eval_key, is_eval);
+      }
+
+      if (options & StackTrace::kIsConstructor) {
+        Handle<Object> is_constructor = (frames[i].is_constructor()) ?
+            factory()->true_value() : factory()->false_value();
+        SetLocalPropertyNoThrow(stackFrame, constructor_key, is_constructor);
+      }
+
+      FixedArray::cast(stack_trace->elements())->set(frames_seen, *stackFrame);
+      frames_seen++;
+    }
+    it.Advance();
+  }
+
+  stack_trace->set_length(Smi::FromInt(frames_seen));
+  return stack_trace;
+}
+
+
+void Isolate::PrintStack() {
+  if (stack_trace_nesting_level_ == 0) {
+    stack_trace_nesting_level_++;
+
+    StringAllocator* allocator;
+    if (preallocated_message_space_ == NULL) {
+      allocator = new HeapStringAllocator();
+    } else {
+      allocator = preallocated_message_space_;
+    }
+
+    StringStream::ClearMentionedObjectCache();
+    StringStream accumulator(allocator);
+    incomplete_message_ = &accumulator;
+    PrintStack(&accumulator);
+    accumulator.OutputToStdOut();
+    accumulator.Log();
+    incomplete_message_ = NULL;
+    stack_trace_nesting_level_ = 0;
+    if (preallocated_message_space_ == NULL) {
+      // Remove the HeapStringAllocator created above.
+      delete allocator;
+    }
+  } else if (stack_trace_nesting_level_ == 1) {
+    stack_trace_nesting_level_++;
+    OS::PrintError(
+      "\n\nAttempt to print stack while printing stack (double fault)\n");
+    OS::PrintError(
+      "If you are lucky you may find a partial stack dump on stdout.\n\n");
+    incomplete_message_->OutputToStdOut();
+  }
+}
+
+
+static void PrintFrames(StringStream* accumulator,
+                        StackFrame::PrintMode mode) {
+  StackFrameIterator it;
+  for (int i = 0; !it.done(); it.Advance()) {
+    it.frame()->Print(accumulator, mode, i++);
+  }
+}
+
+
+void Isolate::PrintStack(StringStream* accumulator) {
+  if (!IsInitialized()) {
+    accumulator->Add(
+        "\n==== Stack trace is not available ==========================\n\n");
+    accumulator->Add(
+        "\n==== Isolate for the thread is not initialized =============\n\n");
+    return;
+  }
+  // The MentionedObjectCache is not GC-proof at the moment.
+  AssertNoAllocation nogc;
+  ASSERT(StringStream::IsMentionedObjectCacheClear());
+
+  // Avoid printing anything if there are no frames.
+  if (c_entry_fp(thread_local_top()) == 0) return;
+
+  accumulator->Add(
+      "\n==== Stack trace ============================================\n\n");
+  PrintFrames(accumulator, StackFrame::OVERVIEW);
+
+  accumulator->Add(
+      "\n==== Details ================================================\n\n");
+  PrintFrames(accumulator, StackFrame::DETAILS);
+
+  accumulator->PrintMentionedObjectCache();
+  accumulator->Add("=====================\n\n");
+}
+
+
+void Isolate::SetFailedAccessCheckCallback(
+    v8::FailedAccessCheckCallback callback) {
+  thread_local_top()->failed_access_check_callback_ = callback;
+}
+
+
+void Isolate::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) {
+  if (!thread_local_top()->failed_access_check_callback_) return;
+
+  ASSERT(receiver->IsAccessCheckNeeded());
+  ASSERT(context());
+
+  // Get the data object from access check info.
+  JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
+  if (!constructor->shared()->IsApiFunction()) return;
+  Object* data_obj =
+      constructor->shared()->get_api_func_data()->access_check_info();
+  if (data_obj == heap_.undefined_value()) return;
+
+  HandleScope scope;
+  Handle<JSObject> receiver_handle(receiver);
+  Handle<Object> data(AccessCheckInfo::cast(data_obj)->data());
+  thread_local_top()->failed_access_check_callback_(
+    v8::Utils::ToLocal(receiver_handle),
+    type,
+    v8::Utils::ToLocal(data));
+}
+
+
+enum MayAccessDecision {
+  YES, NO, UNKNOWN
+};
+
+
+static MayAccessDecision MayAccessPreCheck(Isolate* isolate,
+                                           JSObject* receiver,
+                                           v8::AccessType type) {
+  // During bootstrapping, callback functions are not enabled yet.
+  if (isolate->bootstrapper()->IsActive()) return YES;
+
+  if (receiver->IsJSGlobalProxy()) {
+    Object* receiver_context = JSGlobalProxy::cast(receiver)->context();
+    if (!receiver_context->IsContext()) return NO;
+
+    // Get the global context of current top context.
+    // avoid using Isolate::global_context() because it uses Handle.
+    Context* global_context = isolate->context()->global()->global_context();
+    if (receiver_context == global_context) return YES;
+
+    if (Context::cast(receiver_context)->security_token() ==
+        global_context->security_token())
+      return YES;
+  }
+
+  return UNKNOWN;
+}
+
+
+bool Isolate::MayNamedAccess(JSObject* receiver, Object* key,
+                             v8::AccessType type) {
+  ASSERT(receiver->IsAccessCheckNeeded());
+
+  // The callers of this method are not expecting a GC.
+  AssertNoAllocation no_gc;
+
+  // Skip checks for hidden properties access.  Note, we do not
+  // require existence of a context in this case.
+  if (key == heap_.hidden_symbol()) return true;
+
+  // Check for compatibility between the security tokens in the
+  // current lexical context and the accessed object.
+  ASSERT(context());
+
+  MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
+  if (decision != UNKNOWN) return decision == YES;
+
+  // Get named access check callback
+  JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
+  if (!constructor->shared()->IsApiFunction()) return false;
+
+  Object* data_obj =
+     constructor->shared()->get_api_func_data()->access_check_info();
+  if (data_obj == heap_.undefined_value()) return false;
+
+  Object* fun_obj = AccessCheckInfo::cast(data_obj)->named_callback();
+  v8::NamedSecurityCallback callback =
+      v8::ToCData<v8::NamedSecurityCallback>(fun_obj);
+
+  if (!callback) return false;
+
+  HandleScope scope(this);
+  Handle<JSObject> receiver_handle(receiver, this);
+  Handle<Object> key_handle(key, this);
+  Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
+  LOG(this, ApiNamedSecurityCheck(key));
+  bool result = false;
+  {
+    // Leaving JavaScript.
+    VMState state(this, EXTERNAL);
+    result = callback(v8::Utils::ToLocal(receiver_handle),
+                      v8::Utils::ToLocal(key_handle),
+                      type,
+                      v8::Utils::ToLocal(data));
+  }
+  return result;
+}
+
+
+bool Isolate::MayIndexedAccess(JSObject* receiver,
+                               uint32_t index,
+                               v8::AccessType type) {
+  ASSERT(receiver->IsAccessCheckNeeded());
+  // Check for compatibility between the security tokens in the
+  // current lexical context and the accessed object.
+  ASSERT(context());
+
+  MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
+  if (decision != UNKNOWN) return decision == YES;
+
+  // Get indexed access check callback
+  JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
+  if (!constructor->shared()->IsApiFunction()) return false;
+
+  Object* data_obj =
+      constructor->shared()->get_api_func_data()->access_check_info();
+  if (data_obj == heap_.undefined_value()) return false;
+
+  Object* fun_obj = AccessCheckInfo::cast(data_obj)->indexed_callback();
+  v8::IndexedSecurityCallback callback =
+      v8::ToCData<v8::IndexedSecurityCallback>(fun_obj);
+
+  if (!callback) return false;
+
+  HandleScope scope(this);
+  Handle<JSObject> receiver_handle(receiver, this);
+  Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
+  LOG(this, ApiIndexedSecurityCheck(index));
+  bool result = false;
+  {
+    // Leaving JavaScript.
+    VMState state(this, EXTERNAL);
+    result = callback(v8::Utils::ToLocal(receiver_handle),
+                      index,
+                      type,
+                      v8::Utils::ToLocal(data));
+  }
+  return result;
+}
+
+
+const char* const Isolate::kStackOverflowMessage =
+  "Uncaught RangeError: Maximum call stack size exceeded";
+
+
+Failure* Isolate::StackOverflow() {
+  HandleScope scope;
+  Handle<String> key = factory()->stack_overflow_symbol();
+  Handle<JSObject> boilerplate =
+      Handle<JSObject>::cast(GetProperty(js_builtins_object(), key));
+  Handle<Object> exception = Copy(boilerplate);
+  // TODO(1240995): To avoid having to call JavaScript code to compute
+  // the message for stack overflow exceptions which is very likely to
+  // double fault with another stack overflow exception, we use a
+  // precomputed message.
+  DoThrow(*exception, NULL);
+  return Failure::Exception();
+}
+
+
+Failure* Isolate::TerminateExecution() {
+  DoThrow(heap_.termination_exception(), NULL);
+  return Failure::Exception();
+}
+
+
+Failure* Isolate::Throw(Object* exception, MessageLocation* location) {
+  DoThrow(exception, location);
+  return Failure::Exception();
+}
+
+
+Failure* Isolate::ReThrow(MaybeObject* exception, MessageLocation* location) {
+  bool can_be_caught_externally = false;
+  bool catchable_by_javascript = is_catchable_by_javascript(exception);
+  ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
+
+  thread_local_top()->catcher_ = can_be_caught_externally ?
+      try_catch_handler() : NULL;
+
+  // Set the exception being re-thrown.
+  set_pending_exception(exception);
+  if (exception->IsFailure()) return exception->ToFailureUnchecked();
+  return Failure::Exception();
+}
+
+
+Failure* Isolate::ThrowIllegalOperation() {
+  return Throw(heap_.illegal_access_symbol());
+}
+
+
+void Isolate::ScheduleThrow(Object* exception) {
+  // When scheduling a throw we first throw the exception to get the
+  // error reporting if it is uncaught before rescheduling it.
+  Throw(exception);
+  thread_local_top()->scheduled_exception_ = pending_exception();
+  thread_local_top()->external_caught_exception_ = false;
+  clear_pending_exception();
+}
+
+
+Failure* Isolate::PromoteScheduledException() {
+  MaybeObject* thrown = scheduled_exception();
+  clear_scheduled_exception();
+  // Re-throw the exception to avoid getting repeated error reporting.
+  return ReThrow(thrown);
+}
+
+
+void Isolate::PrintCurrentStackTrace(FILE* out) {
+  StackTraceFrameIterator it(this);
+  while (!it.done()) {
+    HandleScope scope;
+    // Find code position if recorded in relocation info.
+    JavaScriptFrame* frame = it.frame();
+    int pos = frame->LookupCode()->SourcePosition(frame->pc());
+    Handle<Object> pos_obj(Smi::FromInt(pos));
+    // Fetch function and receiver.
+    Handle<JSFunction> fun(JSFunction::cast(frame->function()));
+    Handle<Object> recv(frame->receiver());
+    // Advance to the next JavaScript frame and determine if the
+    // current frame is the top-level frame.
+    it.Advance();
+    Handle<Object> is_top_level = it.done()
+        ? factory()->true_value()
+        : factory()->false_value();
+    // Generate and print stack trace line.
+    Handle<String> line =
+        Execution::GetStackTraceLine(recv, fun, pos_obj, is_top_level);
+    if (line->length() > 0) {
+      line->PrintOn(out);
+      fprintf(out, "\n");
+    }
+  }
+}
+
+
+void Isolate::ComputeLocation(MessageLocation* target) {
+  *target = MessageLocation(Handle<Script>(heap_.empty_script()), -1, -1);
+  StackTraceFrameIterator it(this);
+  if (!it.done()) {
+    JavaScriptFrame* frame = it.frame();
+    JSFunction* fun = JSFunction::cast(frame->function());
+    Object* script = fun->shared()->script();
+    if (script->IsScript() &&
+        !(Script::cast(script)->source()->IsUndefined())) {
+      int pos = frame->LookupCode()->SourcePosition(frame->pc());
+      // Compute the location from the function and the reloc info.
+      Handle<Script> casted_script(Script::cast(script));
+      *target = MessageLocation(casted_script, pos, pos + 1);
+    }
+  }
+}
+
+
+bool Isolate::ShouldReportException(bool* can_be_caught_externally,
+                                    bool catchable_by_javascript) {
+  // Find the top-most try-catch handler.
+  StackHandler* handler =
+      StackHandler::FromAddress(Isolate::handler(thread_local_top()));
+  while (handler != NULL && !handler->is_try_catch()) {
+    handler = handler->next();
+  }
+
+  // Get the address of the external handler so we can compare the address to
+  // determine which one is closer to the top of the stack.
+  Address external_handler_address =
+      thread_local_top()->try_catch_handler_address();
+
+  // The exception has been externally caught if and only if there is
+  // an external handler which is on top of the top-most try-catch
+  // handler.
+  *can_be_caught_externally = external_handler_address != NULL &&
+      (handler == NULL || handler->address() > external_handler_address ||
+       !catchable_by_javascript);
+
+  if (*can_be_caught_externally) {
+    // Only report the exception if the external handler is verbose.
+    return try_catch_handler()->is_verbose_;
+  } else {
+    // Report the exception if it isn't caught by JavaScript code.
+    return handler == NULL;
+  }
+}
+
+
+void Isolate::DoThrow(MaybeObject* exception, MessageLocation* location) {
+  ASSERT(!has_pending_exception());
+
+  HandleScope scope;
+  Object* exception_object = Smi::FromInt(0);
+  bool is_object = exception->ToObject(&exception_object);
+  Handle<Object> exception_handle(exception_object);
+
+  // Determine reporting and whether the exception is caught externally.
+  bool catchable_by_javascript = is_catchable_by_javascript(exception);
+  // Only real objects can be caught by JS.
+  ASSERT(!catchable_by_javascript || is_object);
+  bool can_be_caught_externally = false;
+  bool should_report_exception =
+      ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
+  bool report_exception = catchable_by_javascript && should_report_exception;
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Notify debugger of exception.
+  if (catchable_by_javascript) {
+    debugger_->OnException(exception_handle, report_exception);
+  }
+#endif
+
+  // Generate the message.
+  Handle<Object> message_obj;
+  MessageLocation potential_computed_location;
+  bool try_catch_needs_message =
+      can_be_caught_externally &&
+      try_catch_handler()->capture_message_;
+  if (report_exception || try_catch_needs_message) {
+    if (location == NULL) {
+      // If no location was specified we use a computed one instead
+      ComputeLocation(&potential_computed_location);
+      location = &potential_computed_location;
+    }
+    if (!bootstrapper()->IsActive()) {
+      // It's not safe to try to make message objects or collect stack
+      // traces while the bootstrapper is active since the infrastructure
+      // may not have been properly initialized.
+      Handle<String> stack_trace;
+      if (FLAG_trace_exception) stack_trace = StackTraceString();
+      Handle<JSArray> stack_trace_object;
+      if (report_exception && capture_stack_trace_for_uncaught_exceptions_) {
+          stack_trace_object = CaptureCurrentStackTrace(
+              stack_trace_for_uncaught_exceptions_frame_limit_,
+              stack_trace_for_uncaught_exceptions_options_);
+      }
+      ASSERT(is_object);  // Can't use the handle unless there's a real object.
+      message_obj = MessageHandler::MakeMessageObject("uncaught_exception",
+          location, HandleVector<Object>(&exception_handle, 1), stack_trace,
+          stack_trace_object);
+    }
+  }
+
+  // Save the message for reporting if the the exception remains uncaught.
+  thread_local_top()->has_pending_message_ = report_exception;
+  if (!message_obj.is_null()) {
+    thread_local_top()->pending_message_obj_ = *message_obj;
+    if (location != NULL) {
+      thread_local_top()->pending_message_script_ = *location->script();
+      thread_local_top()->pending_message_start_pos_ = location->start_pos();
+      thread_local_top()->pending_message_end_pos_ = location->end_pos();
+    }
+  }
+
+  // Do not forget to clean catcher_ if currently thrown exception cannot
+  // be caught.  If necessary, ReThrow will update the catcher.
+  thread_local_top()->catcher_ = can_be_caught_externally ?
+      try_catch_handler() : NULL;
+
+  // NOTE: Notifying the debugger or generating the message
+  // may have caused new exceptions. For now, we just ignore
+  // that and set the pending exception to the original one.
+  if (is_object) {
+    set_pending_exception(*exception_handle);
+  } else {
+    // Failures are not on the heap so they neither need nor work with handles.
+    ASSERT(exception_handle->IsFailure());
+    set_pending_exception(exception);
+  }
+}
+
+
+bool Isolate::IsExternallyCaught() {
+  ASSERT(has_pending_exception());
+
+  if ((thread_local_top()->catcher_ == NULL) ||
+      (try_catch_handler() != thread_local_top()->catcher_)) {
+    // When throwing the exception, we found no v8::TryCatch
+    // which should care about this exception.
+    return false;
+  }
+
+  if (!is_catchable_by_javascript(pending_exception())) {
+    return true;
+  }
+
+  // Get the address of the external handler so we can compare the address to
+  // determine which one is closer to the top of the stack.
+  Address external_handler_address =
+      thread_local_top()->try_catch_handler_address();
+  ASSERT(external_handler_address != NULL);
+
+  // The exception has been externally caught if and only if there is
+  // an external handler which is on top of the top-most try-finally
+  // handler.
+  // There should be no try-catch blocks as they would prohibit us from
+  // finding external catcher in the first place (see catcher_ check above).
+  //
+  // Note, that finally clause would rethrow an exception unless it's
+  // aborted by jumps in control flow like return, break, etc. and we'll
+  // have another chances to set proper v8::TryCatch.
+  StackHandler* handler =
+      StackHandler::FromAddress(Isolate::handler(thread_local_top()));
+  while (handler != NULL && handler->address() < external_handler_address) {
+    ASSERT(!handler->is_try_catch());
+    if (handler->is_try_finally()) return false;
+
+    handler = handler->next();
+  }
+
+  return true;
+}
+
+
+void Isolate::ReportPendingMessages() {
+  ASSERT(has_pending_exception());
+  PropagatePendingExceptionToExternalTryCatch();
+
+  // If the pending exception is OutOfMemoryException set out_of_memory in
+  // the global context.  Note: We have to mark the global context here
+  // since the GenerateThrowOutOfMemory stub cannot make a RuntimeCall to
+  // set it.
+  HandleScope scope;
+  if (thread_local_top_.pending_exception_ == Failure::OutOfMemoryException()) {
+    context()->mark_out_of_memory();
+  } else if (thread_local_top_.pending_exception_ ==
+             heap()->termination_exception()) {
+    // Do nothing: if needed, the exception has been already propagated to
+    // v8::TryCatch.
+  } else {
+    if (thread_local_top_.has_pending_message_) {
+      thread_local_top_.has_pending_message_ = false;
+      if (!thread_local_top_.pending_message_obj_->IsTheHole()) {
+        HandleScope scope;
+        Handle<Object> message_obj(thread_local_top_.pending_message_obj_);
+        if (thread_local_top_.pending_message_script_ != NULL) {
+          Handle<Script> script(thread_local_top_.pending_message_script_);
+          int start_pos = thread_local_top_.pending_message_start_pos_;
+          int end_pos = thread_local_top_.pending_message_end_pos_;
+          MessageLocation location(script, start_pos, end_pos);
+          MessageHandler::ReportMessage(this, &location, message_obj);
+        } else {
+          MessageHandler::ReportMessage(this, NULL, message_obj);
+        }
+      }
+    }
+  }
+  clear_pending_message();
+}
+
+
+void Isolate::TraceException(bool flag) {
+  FLAG_trace_exception = flag;  // TODO(isolates): This is an unfortunate use.
+}
+
+
+bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
+  ASSERT(has_pending_exception());
+  PropagatePendingExceptionToExternalTryCatch();
+
+  // Allways reschedule out of memory exceptions.
+  if (!is_out_of_memory()) {
+    bool is_termination_exception =
+        pending_exception() == heap_.termination_exception();
+
+    // Do not reschedule the exception if this is the bottom call.
+    bool clear_exception = is_bottom_call;
+
+    if (is_termination_exception) {
+      if (is_bottom_call) {
+        thread_local_top()->external_caught_exception_ = false;
+        clear_pending_exception();
+        return false;
+      }
+    } else if (thread_local_top()->external_caught_exception_) {
+      // If the exception is externally caught, clear it if there are no
+      // JavaScript frames on the way to the C++ frame that has the
+      // external handler.
+      ASSERT(thread_local_top()->try_catch_handler_address() != NULL);
+      Address external_handler_address =
+          thread_local_top()->try_catch_handler_address();
+      JavaScriptFrameIterator it;
+      if (it.done() || (it.frame()->sp() > external_handler_address)) {
+        clear_exception = true;
+      }
+    }
+
+    // Clear the exception if needed.
+    if (clear_exception) {
+      thread_local_top()->external_caught_exception_ = false;
+      clear_pending_exception();
+      return false;
+    }
+  }
+
+  // Reschedule the exception.
+  thread_local_top()->scheduled_exception_ = pending_exception();
+  clear_pending_exception();
+  return true;
+}
+
+
+void Isolate::SetCaptureStackTraceForUncaughtExceptions(
+      bool capture,
+      int frame_limit,
+      StackTrace::StackTraceOptions options) {
+  capture_stack_trace_for_uncaught_exceptions_ = capture;
+  stack_trace_for_uncaught_exceptions_frame_limit_ = frame_limit;
+  stack_trace_for_uncaught_exceptions_options_ = options;
+}
+
+
+bool Isolate::is_out_of_memory() {
+  if (has_pending_exception()) {
+    MaybeObject* e = pending_exception();
+    if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
+      return true;
+    }
+  }
+  if (has_scheduled_exception()) {
+    MaybeObject* e = scheduled_exception();
+    if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+Handle<Context> Isolate::global_context() {
+  GlobalObject* global = thread_local_top()->context_->global();
+  return Handle<Context>(global->global_context());
+}
+
+
+Handle<Context> Isolate::GetCallingGlobalContext() {
+  JavaScriptFrameIterator it;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  if (debug_->InDebugger()) {
+    while (!it.done()) {
+      JavaScriptFrame* frame = it.frame();
+      Context* context = Context::cast(frame->context());
+      if (context->global_context() == *debug_->debug_context()) {
+        it.Advance();
+      } else {
+        break;
+      }
+    }
+  }
+#endif  // ENABLE_DEBUGGER_SUPPORT
+  if (it.done()) return Handle<Context>::null();
+  JavaScriptFrame* frame = it.frame();
+  Context* context = Context::cast(frame->context());
+  return Handle<Context>(context->global_context());
+}
+
+
+char* Isolate::ArchiveThread(char* to) {
+  if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) {
+    RuntimeProfiler::IsolateExitedJS(this);
+  }
+  memcpy(to, reinterpret_cast<char*>(thread_local_top()),
+         sizeof(ThreadLocalTop));
+  InitializeThreadLocal();
+  return to + sizeof(ThreadLocalTop);
+}
+
+
+char* Isolate::RestoreThread(char* from) {
+  memcpy(reinterpret_cast<char*>(thread_local_top()), from,
+         sizeof(ThreadLocalTop));
+  // This might be just paranoia, but it seems to be needed in case a
+  // thread_local_top_ is restored on a separate OS thread.
+#ifdef USE_SIMULATOR
+#ifdef V8_TARGET_ARCH_ARM
+  thread_local_top()->simulator_ = Simulator::current(this);
+#elif V8_TARGET_ARCH_MIPS
+  thread_local_top()->simulator_ = Simulator::current(this);
+#endif
+#endif
+  if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) {
+    RuntimeProfiler::IsolateEnteredJS(this);
+  }
+  return from + sizeof(ThreadLocalTop);
+}
+
+
 Isolate::ThreadDataTable::ThreadDataTable()
     : list_(NULL) {
 }
@@ -417,15 +1368,11 @@
       bootstrapper_(NULL),
       runtime_profiler_(NULL),
       compilation_cache_(NULL),
-      counters_(NULL),
+      counters_(new Counters()),
       code_range_(NULL),
-      // Must be initialized early to allow v8::SetResourceConstraints calls.
       break_access_(OS::CreateMutex()),
-      debugger_initialized_(false),
-      // Must be initialized early to allow v8::Debug calls.
-      debugger_access_(OS::CreateMutex()),
-      logger_(NULL),
-      stats_table_(NULL),
+      logger_(new Logger()),
+      stats_table_(new StatsTable()),
       stub_cache_(NULL),
       deoptimizer_data_(NULL),
       capture_stack_trace_for_uncaught_exceptions_(false),
@@ -450,7 +1397,8 @@
       string_tracker_(NULL),
       regexp_stack_(NULL),
       frame_element_constant_list_(0),
-      result_constant_list_(0) {
+      result_constant_list_(0),
+      embedder_data_(NULL) {
   TRACE_ISOLATE(constructor);
 
   memset(isolate_addresses_, 0,
@@ -460,6 +1408,11 @@
   zone_.isolate_ = this;
   stack_guard_.isolate_ = this;
 
+  // ThreadManager is initialized early to support locking an isolate
+  // before it is entered.
+  thread_manager_ = new ThreadManager();
+  thread_manager_->isolate_ = this;
+
 #if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
     defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
   simulator_initialized_ = false;
@@ -467,9 +1420,6 @@
   simulator_redirection_ = NULL;
 #endif
 
-  thread_manager_ = new ThreadManager();
-  thread_manager_->isolate_ = this;
-
 #ifdef DEBUG
   // heap_histograms_ initializes itself.
   memset(&js_spill_information_, 0, sizeof(js_spill_information_));
@@ -555,7 +1505,7 @@
     logger_->TearDown();
 
     // The default isolate is re-initializable due to legacy API.
-    state_ = UNINITIALIZED;
+    state_ = PREINITIALIZED;
   }
 }
 
@@ -642,7 +1592,67 @@
 }
 
 
+bool Isolate::PreInit() {
+  if (state_ != UNINITIALIZED) return true;
+
+  TRACE_ISOLATE(preinit);
+
+  ASSERT(Isolate::Current() == this);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  debug_ = new Debug(this);
+  debugger_ = new Debugger();
+  debugger_->isolate_ = this;
+#endif
+
+  memory_allocator_ = new MemoryAllocator();
+  memory_allocator_->isolate_ = this;
+  code_range_ = new CodeRange();
+  code_range_->isolate_ = this;
+
+  // Safe after setting Heap::isolate_, initializing StackGuard and
+  // ensuring that Isolate::Current() == this.
+  heap_.SetStackLimits();
+
+#ifdef DEBUG
+  DisallowAllocationFailure disallow_allocation_failure;
+#endif
+
+#define C(name) isolate_addresses_[Isolate::k_##name] =                        \
+    reinterpret_cast<Address>(name());
+  ISOLATE_ADDRESS_LIST(C)
+  ISOLATE_ADDRESS_LIST_PROF(C)
+#undef C
+
+  string_tracker_ = new StringTracker();
+  string_tracker_->isolate_ = this;
+  compilation_cache_ = new CompilationCache(this);
+  transcendental_cache_ = new TranscendentalCache();
+  keyed_lookup_cache_ = new KeyedLookupCache();
+  context_slot_cache_ = new ContextSlotCache();
+  descriptor_lookup_cache_ = new DescriptorLookupCache();
+  unicode_cache_ = new UnicodeCache();
+  pc_to_code_cache_ = new PcToCodeCache(this);
+  write_input_buffer_ = new StringInputBuffer();
+  global_handles_ = new GlobalHandles(this);
+  bootstrapper_ = new Bootstrapper();
+  handle_scope_implementer_ = new HandleScopeImplementer(this);
+  stub_cache_ = new StubCache(this);
+  ast_sentinels_ = new AstSentinels();
+  regexp_stack_ = new RegExpStack();
+  regexp_stack_->isolate_ = this;
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  producer_heap_profile_ = new ProducerHeapProfile();
+  producer_heap_profile_->isolate_ = this;
+#endif
+
+  state_ = PREINITIALIZED;
+  return true;
+}
+
+
 void Isolate::InitializeThreadLocal() {
+  thread_local_top_.isolate_ = this;
   thread_local_top_.Initialize();
   clear_pending_exception();
   clear_pending_message();
@@ -677,77 +1687,19 @@
 }
 
 
-void Isolate::InitializeLoggingAndCounters() {
-  if (logger_ == NULL) {
-    logger_ = new Logger;
-  }
-  if (counters_ == NULL) {
-    counters_ = new Counters;
-  }
-}
-
-
-void Isolate::InitializeDebugger() {
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  ScopedLock lock(debugger_access_);
-  if (NoBarrier_Load(&debugger_initialized_)) return;
-  InitializeLoggingAndCounters();
-  debug_ = new Debug(this);
-  debugger_ = new Debugger(this);
-  Release_Store(&debugger_initialized_, true);
-#endif
-}
-
-
 bool Isolate::Init(Deserializer* des) {
   ASSERT(state_ != INITIALIZED);
-  ASSERT(Isolate::Current() == this);
+
   TRACE_ISOLATE(init);
 
+  bool create_heap_objects = des == NULL;
+
 #ifdef DEBUG
   // The initialization process does not handle memory exhaustion.
   DisallowAllocationFailure disallow_allocation_failure;
 #endif
 
-  InitializeLoggingAndCounters();
-
-  InitializeDebugger();
-
-  memory_allocator_ = new MemoryAllocator(this);
-  code_range_ = new CodeRange(this);
-
-  // Safe after setting Heap::isolate_, initializing StackGuard and
-  // ensuring that Isolate::Current() == this.
-  heap_.SetStackLimits();
-
-#define C(name) isolate_addresses_[Isolate::k_##name] =                        \
-    reinterpret_cast<Address>(name());
-  ISOLATE_ADDRESS_LIST(C)
-  ISOLATE_ADDRESS_LIST_PROF(C)
-#undef C
-
-  string_tracker_ = new StringTracker();
-  string_tracker_->isolate_ = this;
-  compilation_cache_ = new CompilationCache(this);
-  transcendental_cache_ = new TranscendentalCache();
-  keyed_lookup_cache_ = new KeyedLookupCache();
-  context_slot_cache_ = new ContextSlotCache();
-  descriptor_lookup_cache_ = new DescriptorLookupCache();
-  unicode_cache_ = new UnicodeCache();
-  pc_to_code_cache_ = new PcToCodeCache(this);
-  write_input_buffer_ = new StringInputBuffer();
-  global_handles_ = new GlobalHandles(this);
-  bootstrapper_ = new Bootstrapper();
-  handle_scope_implementer_ = new HandleScopeImplementer();
-  stub_cache_ = new StubCache(this);
-  ast_sentinels_ = new AstSentinels();
-  regexp_stack_ = new RegExpStack();
-  regexp_stack_->isolate_ = this;
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  producer_heap_profile_ = new ProducerHeapProfile();
-  producer_heap_profile_->isolate_ = this;
-#endif
+  if (state_ == UNINITIALIZED && !PreInit()) return false;
 
   // Enable logging before setting up the heap
   logger_->Setup();
@@ -758,7 +1710,7 @@
   // Initialize other runtime facilities
 #if defined(USE_SIMULATOR)
 #if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
-  Simulator::Initialize();
+  Simulator::Initialize(this);
 #endif
 #endif
 
@@ -770,8 +1722,7 @@
     stack_guard_.InitThread(lock);
   }
 
-  // Setup the object heap.
-  const bool create_heap_objects = (des == NULL);
+  // Setup the object heap
   ASSERT(!heap_.HasBeenSetup());
   if (!heap_.Setup(create_heap_objects)) {
     V8::SetFatalError();
@@ -820,7 +1771,7 @@
 
   // If we are deserializing, log non-function code objects and compiled
   // functions found in the snapshot.
-  if (des != NULL && FLAG_log_code) {
+  if (des != NULL && (FLAG_log_code || FLAG_ll_prof)) {
     HandleScope scope;
     LOG(this, LogCodeObjects());
     LOG(this, LogCompiledFunctions());
@@ -831,16 +1782,6 @@
 }
 
 
-// Initialized lazily to allow early
-// v8::V8::SetAddHistogramSampleFunction calls.
-StatsTable* Isolate::stats_table() {
-  if (stats_table_ == NULL) {
-    stats_table_ = new StatsTable;
-  }
-  return stats_table_;
-}
-
-
 void Isolate::Enter() {
   Isolate* current_isolate = NULL;
   PerIsolateThreadData* current_data = CurrentPerIsolateThreadData();
@@ -880,6 +1821,8 @@
 
   SetIsolateThreadLocals(this, data);
 
+  CHECK(PreInit());
+
   // In case it's the first time some thread enters the isolate.
   set_thread_id(data->thread_id());
 }
diff --git a/src/isolate.h b/src/isolate.h
index 167c8ef..0d36b3f 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -224,6 +224,7 @@
     ASSERT(try_catch_handler_address_ == NULL);
   }
 
+  Isolate* isolate_;
   // The context where the current execution method is created and for variable
   // lookups.
   Context* context_;
@@ -267,9 +268,6 @@
   // Call back function to report unsafe JS accesses.
   v8::FailedAccessCheckCallback failed_access_check_callback_;
 
-  // Whether out of memory exceptions should be ignored.
-  bool ignore_out_of_memory_;
-
  private:
   void InitializeInternal();
 
@@ -297,7 +295,6 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
 
 #define ISOLATE_DEBUGGER_INIT_LIST(V)                                          \
-  V(uint64_t, enabled_cpu_features, 0)                                         \
   V(v8::Debug::EventCallback, debug_event_callback, NULL)                      \
   V(DebuggerAgent*, debugger_agent_instance, NULL)
 #else
@@ -349,6 +346,7 @@
   /* A previously allocated buffer of kMinimalBufferSize bytes, or NULL. */    \
   V(byte*, assembler_spare_buffer, NULL)                                       \
   V(FatalErrorCallback, exception_behavior, NULL)                              \
+  V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, NULL)     \
   V(v8::Debug::MessageHandler, message_handler, NULL)                          \
   /* To distinguish the function templates, so that we can find them in the */ \
   /* function cache of the global context. */                                  \
@@ -373,6 +371,7 @@
   V(unsigned, ast_node_count, 0)                                               \
   /* SafeStackFrameIterator activations count. */                              \
   V(int, safe_stack_iterator_counter, 0)                                       \
+  V(uint64_t, enabled_cpu_features, 0)                                         \
   ISOLATE_PLATFORM_INIT_LIST(V)                                                \
   ISOLATE_LOGGING_INIT_LIST(V)                                                 \
   ISOLATE_DEBUGGER_INIT_LIST(V)
@@ -469,13 +468,6 @@
     return reinterpret_cast<Isolate*>(Thread::GetThreadLocal(isolate_key_));
   }
 
-  // Usually called by Init(), but can be called early e.g. to allow
-  // testing components that require logging but not the whole
-  // isolate.
-  //
-  // Safe to call more than once.
-  void InitializeLoggingAndCounters();
-
   bool Init(Deserializer* des);
 
   bool IsInitialized() { return state_ == INITIALIZED; }
@@ -496,9 +488,15 @@
   // Safe to call multiple times.
   static void EnsureDefaultIsolate();
 
+  // Find the PerThread for this particular (isolate, thread) combination
+  // If one does not yet exist, return null.
+  PerIsolateThreadData* FindPerThreadDataForThisThread();
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
   // Get the debugger from the default isolate. Preinitializes the
   // default isolate if needed.
   static Debugger* GetDefaultIsolateDebugger();
+#endif
 
   // Get the stack guard from the default isolate. Preinitializes the
   // default isolate if needed.
@@ -522,12 +520,10 @@
   // switched to non-legacy behavior).
   static void EnterDefaultIsolate();
 
+  // Debug.
   // Mutex for serializing access to break control structures.
   Mutex* break_access() { return break_access_; }
 
-  // Mutex for serializing access to debugger.
-  Mutex* debugger_access() { return debugger_access_; }
-
   Address get_address_from_id(AddressId id);
 
   // Access to top context (where the current function object was created).
@@ -688,12 +684,6 @@
   // Tells whether the current context has experienced an out of memory
   // exception.
   bool is_out_of_memory();
-  bool ignore_out_of_memory() {
-    return thread_local_top_.ignore_out_of_memory_;
-  }
-  void set_ignore_out_of_memory(bool value) {
-    thread_local_top_.ignore_out_of_memory_ = value;
-  }
 
   void PrintCurrentStackTrace(FILE* out);
   void PrintStackTrace(FILE* out, char* thread_data);
@@ -802,24 +792,14 @@
 #undef GLOBAL_CONTEXT_FIELD_ACCESSOR
 
   Bootstrapper* bootstrapper() { return bootstrapper_; }
-  Counters* counters() {
-    // Call InitializeLoggingAndCounters() if logging is needed before
-    // the isolate is fully initialized.
-    ASSERT(counters_ != NULL);
-    return counters_;
-  }
+  Counters* counters() { return counters_; }
   CodeRange* code_range() { return code_range_; }
   RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
   CompilationCache* compilation_cache() { return compilation_cache_; }
-  Logger* logger() {
-    // Call InitializeLoggingAndCounters() if logging is needed before
-    // the isolate is fully initialized.
-    ASSERT(logger_ != NULL);
-    return logger_;
-  }
+  Logger* logger() { return logger_; }
   StackGuard* stack_guard() { return &stack_guard_; }
   Heap* heap() { return &heap_; }
-  StatsTable* stats_table();
+  StatsTable* stats_table() { return stats_table_; }
   StubCache* stub_cache() { return stub_cache_; }
   DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
   ThreadLocalTop* thread_local_top() { return &thread_local_top_; }
@@ -897,14 +877,6 @@
 
   RuntimeState* runtime_state() { return &runtime_state_; }
 
-  StringInputBuffer* liveedit_compare_substrings_buf1() {
-    return &liveedit_compare_substrings_buf1_;
-  }
-
-  StringInputBuffer* liveedit_compare_substrings_buf2() {
-    return &liveedit_compare_substrings_buf2_;
-  }
-
   StaticResource<SafeStringInputBuffer>* compiler_safe_string_input_buffer() {
     return &compiler_safe_string_input_buffer_;
   }
@@ -936,16 +908,12 @@
   void PreallocatedStorageInit(size_t size);
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-  Debugger* debugger() {
-    if (!NoBarrier_Load(&debugger_initialized_)) InitializeDebugger();
-    return debugger_;
-  }
-  Debug* debug() {
-    if (!NoBarrier_Load(&debugger_initialized_)) InitializeDebugger();
-    return debug_;
-  }
+  Debugger* debugger() { return debugger_; }
+  Debug* debug() { return debug_; }
 #endif
 
+  inline bool DebuggerHasBreakPoints();
+
 #ifdef ENABLE_LOGGING_AND_PROFILING
   ProducerHeapProfile* producer_heap_profile() {
     return producer_heap_profile_;
@@ -1026,6 +994,9 @@
 
   void ResetEagerOptimizingData();
 
+  void SetData(void* data) { embedder_data_ = data; }
+  void* GetData() { return embedder_data_; }
+
  private:
   Isolate();
 
@@ -1079,6 +1050,8 @@
   static Isolate* default_isolate_;
   static ThreadDataTable* thread_data_table_;
 
+  bool PreInit();
+
   void Deinit();
 
   static void SetIsolateThreadLocals(Isolate* isolate,
@@ -1086,6 +1059,7 @@
 
   enum State {
     UNINITIALIZED,    // Some components may not have been allocated.
+    PREINITIALIZED,   // Components have been allocated but not initialized.
     INITIALIZED       // All components are fully initialized.
   };
 
@@ -1100,7 +1074,7 @@
   // If one does not yet exist, allocate a new one.
   PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
 
-  // PreInits and returns a default isolate. Needed when a new thread tries
+// PreInits and returns a default isolate. Needed when a new thread tries
   // to create a Locker for the first time (the lock itself is in the isolate).
   static Isolate* GetDefaultIsolateForLocking();
 
@@ -1129,8 +1103,6 @@
 
   void PropagatePendingExceptionToExternalTryCatch();
 
-  void InitializeDebugger();
-
   int stack_trace_nesting_level_;
   StringStream* incomplete_message_;
   // The preallocated memory thread singleton.
@@ -1144,8 +1116,6 @@
   Counters* counters_;
   CodeRange* code_range_;
   Mutex* break_access_;
-  Atomic32 debugger_initialized_;
-  Mutex* debugger_access_;
   Heap heap_;
   Logger* logger_;
   StackGuard stack_guard_;
@@ -1175,8 +1145,6 @@
   ThreadManager* thread_manager_;
   AstSentinels* ast_sentinels_;
   RuntimeState runtime_state_;
-  StringInputBuffer liveedit_compare_substrings_buf1_;
-  StringInputBuffer liveedit_compare_substrings_buf2_;
   StaticResource<SafeStringInputBuffer> compiler_safe_string_input_buffer_;
   Builtins builtins_;
   StringTracker* string_tracker_;
@@ -1191,6 +1159,7 @@
   unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
   ZoneObjectList frame_element_constant_list_;
   ZoneObjectList result_constant_list_;
+  void* embedder_data_;
 
 #if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
     defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
@@ -1238,10 +1207,13 @@
 
   friend class ExecutionAccess;
   friend class IsolateInitializer;
+  friend class ThreadManager;
+  friend class Simulator;
+  friend class StackGuard;
   friend class ThreadId;
-  friend class TestMemoryAllocatorScope;
   friend class v8::Isolate;
   friend class v8::Locker;
+  friend class v8::Unlocker;
 
   DISALLOW_COPY_AND_ASSIGN(Isolate);
 };
@@ -1399,20 +1371,6 @@
 }
 
 
-// Temporary macro to be used to flag definitions that are indeed static
-// and not per-isolate. (It would be great to be able to grep for [static]!)
-#define RLYSTC static
-
-
-// Temporary macro to be used to flag classes that should be static.
-#define STATIC_CLASS class
-
-
-// Temporary macro to be used to flag classes that are completely converted
-// to be isolate-friendly. Their mix of static/nonstatic methods/fields is
-// correct.
-#define ISOLATED_CLASS class
-
 } }  // namespace v8::internal
 
 // TODO(isolates): Get rid of these -inl.h includes and place them only where
diff --git a/src/json-parser.cc b/src/json-parser.cc
new file mode 100644
index 0000000..b7f57c2
--- /dev/null
+++ b/src/json-parser.cc
@@ -0,0 +1,513 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "char-predicates-inl.h"
+#include "conversions.h"
+#include "json-parser.h"
+#include "messages.h"
+#include "spaces.h"
+
+namespace v8 {
+namespace internal {
+
+
+Handle<Object> JsonParser::ParseJson(Handle<String> source) {
+  isolate_ = source->map()->isolate();
+  source_ = Handle<String>(source->TryFlattenGetString());
+  source_length_ = source_->length() - 1;
+
+  // Optimized fast case where we only have ascii characters.
+  if (source_->IsSeqAsciiString()) {
+      is_sequential_ascii_ = true;
+      seq_source_ = Handle<SeqAsciiString>::cast(source_);
+  } else {
+    is_sequential_ascii_ = false;
+  }
+
+  // Set initial position right before the string.
+  position_ = -1;
+  // Advance to the first character (posibly EOS)
+  Advance();
+  Next();
+  Handle<Object> result = ParseJsonValue();
+  if (result.is_null() || Next() != Token::EOS) {
+    // Parse failed. Scanner's current token is the unexpected token.
+    Token::Value token = current_.token;
+
+    const char* message;
+    const char* name_opt = NULL;
+
+    switch (token) {
+      case Token::EOS:
+        message = "unexpected_eos";
+        break;
+      case Token::NUMBER:
+        message = "unexpected_token_number";
+        break;
+      case Token::STRING:
+        message = "unexpected_token_string";
+        break;
+      case Token::IDENTIFIER:
+      case Token::FUTURE_RESERVED_WORD:
+        message = "unexpected_token_identifier";
+        break;
+      default:
+        message = "unexpected_token";
+        name_opt = Token::String(token);
+        ASSERT(name_opt != NULL);
+        break;
+    }
+
+    Factory* factory = isolate()->factory();
+    MessageLocation location(factory->NewScript(source),
+                             current_.beg_pos,
+                             current_.end_pos);
+    Handle<JSArray> array;
+    if (name_opt == NULL) {
+      array = factory->NewJSArray(0);
+    } else {
+      Handle<String> name = factory->NewStringFromUtf8(CStrVector(name_opt));
+      Handle<FixedArray> element = factory->NewFixedArray(1);
+      element->set(0, *name);
+      array = factory->NewJSArrayWithElements(element);
+    }
+    Handle<Object> result = factory->NewSyntaxError(message, array);
+    isolate()->Throw(*result, &location);
+    return Handle<Object>::null();
+  }
+  return result;
+}
+
+
+// Parse any JSON value.
+Handle<Object> JsonParser::ParseJsonValue() {
+  Token::Value token = Next();
+  switch (token) {
+    case Token::STRING:
+      return GetString(false);
+    case Token::NUMBER:
+      return isolate()->factory()->NewNumber(number_);
+    case Token::FALSE_LITERAL:
+      return isolate()->factory()->false_value();
+    case Token::TRUE_LITERAL:
+      return isolate()->factory()->true_value();
+    case Token::NULL_LITERAL:
+      return isolate()->factory()->null_value();
+    case Token::LBRACE:
+      return ParseJsonObject();
+    case Token::LBRACK:
+      return ParseJsonArray();
+    default:
+      return ReportUnexpectedToken();
+  }
+}
+
+
+// Parse a JSON object. Scanner must be right after '{' token.
+Handle<Object> JsonParser::ParseJsonObject() {
+  Handle<JSFunction> object_constructor(
+      isolate()->global_context()->object_function());
+  Handle<JSObject> json_object =
+      isolate()->factory()->NewJSObject(object_constructor);
+
+  if (Peek() == Token::RBRACE) {
+    Next();
+  } else {
+    do {
+      if (Next() != Token::STRING) {
+        return ReportUnexpectedToken();
+      }
+      Handle<String> key = GetString(true);
+      if (Next() != Token::COLON) {
+        return ReportUnexpectedToken();
+      }
+
+      Handle<Object> value = ParseJsonValue();
+      if (value.is_null()) return Handle<Object>::null();
+
+      uint32_t index;
+      if (key->AsArrayIndex(&index)) {
+        SetOwnElement(json_object, index, value, kNonStrictMode);
+      } else if (key->Equals(isolate()->heap()->Proto_symbol())) {
+        SetPrototype(json_object, value);
+      } else {
+        SetLocalPropertyIgnoreAttributes(json_object, key, value, NONE);
+      }
+    } while (Next() == Token::COMMA);
+    if (current_.token != Token::RBRACE) {
+      return ReportUnexpectedToken();
+    }
+  }
+  return json_object;
+}
+
+// Parse a JSON array. Scanner must be right after '[' token.
+Handle<Object> JsonParser::ParseJsonArray() {
+  ZoneScope zone_scope(isolate(), DELETE_ON_EXIT);
+  ZoneList<Handle<Object> > elements(4);
+
+  Token::Value token = Peek();
+  if (token == Token::RBRACK) {
+    Next();
+  } else {
+    do {
+      Handle<Object> element = ParseJsonValue();
+      if (element.is_null()) return Handle<Object>::null();
+      elements.Add(element);
+      token = Next();
+    } while (token == Token::COMMA);
+    if (token != Token::RBRACK) {
+      return ReportUnexpectedToken();
+    }
+  }
+
+  // Allocate a fixed array with all the elements.
+  Handle<FixedArray> fast_elements =
+      isolate()->factory()->NewFixedArray(elements.length());
+
+  for (int i = 0, n = elements.length(); i < n; i++) {
+    fast_elements->set(i, *elements[i]);
+  }
+
+  return isolate()->factory()->NewJSArrayWithElements(fast_elements);
+}
+
+
+Token::Value JsonParser::Next() {
+  current_ = next_;
+  ScanJson();
+  return current_.token;
+}
+
+void JsonParser::ScanJson() {
+  if (source_->IsSeqAsciiString()) {
+    is_sequential_ascii_ = true;
+  } else {
+    is_sequential_ascii_ = false;
+  }
+
+  Token::Value token;
+  do {
+    // Remember the position of the next token
+    next_.beg_pos = position_;
+    switch (c0_) {
+      case '\t':
+      case '\r':
+      case '\n':
+      case ' ':
+        Advance();
+        token = Token::WHITESPACE;
+        break;
+      case '{':
+        Advance();
+        token = Token::LBRACE;
+        break;
+      case '}':
+        Advance();
+        token = Token::RBRACE;
+        break;
+      case '[':
+        Advance();
+        token = Token::LBRACK;
+        break;
+      case ']':
+        Advance();
+        token = Token::RBRACK;
+        break;
+      case ':':
+        Advance();
+        token = Token::COLON;
+        break;
+      case ',':
+        Advance();
+        token = Token::COMMA;
+        break;
+      case '"':
+        token = ScanJsonString();
+        break;
+      case '-':
+      case '0':
+      case '1':
+      case '2':
+      case '3':
+      case '4':
+      case '5':
+      case '6':
+      case '7':
+      case '8':
+      case '9':
+        token = ScanJsonNumber();
+        break;
+      case 't':
+        token = ScanJsonIdentifier("true", Token::TRUE_LITERAL);
+        break;
+      case 'f':
+        token = ScanJsonIdentifier("false", Token::FALSE_LITERAL);
+        break;
+      case 'n':
+        token = ScanJsonIdentifier("null", Token::NULL_LITERAL);
+        break;
+      default:
+        if (c0_ < 0) {
+          Advance();
+          token = Token::EOS;
+        } else {
+          Advance();
+          token = Token::ILLEGAL;
+        }
+    }
+  } while (token == Token::WHITESPACE);
+
+  next_.end_pos = position_;
+  next_.token = token;
+}
+
+
+Token::Value JsonParser::ScanJsonIdentifier(const char* text,
+                                            Token::Value token) {
+  while (*text != '\0') {
+    if (c0_ != *text) return Token::ILLEGAL;
+    Advance();
+    text++;
+  }
+  return token;
+}
+
+
+Token::Value JsonParser::ScanJsonNumber() {
+  bool negative = false;
+
+  if (c0_ == '-') {
+    Advance();
+    negative = true;
+  }
+  if (c0_ == '0') {
+    Advance();
+    // Prefix zero is only allowed if it's the only digit before
+    // a decimal point or exponent.
+    if ('0' <= c0_ && c0_ <= '9') return Token::ILLEGAL;
+  } else {
+    int i = 0;
+    int digits = 0;
+    if (c0_ < '1' || c0_ > '9') return Token::ILLEGAL;
+    do {
+      i = i * 10 + c0_ - '0';
+      digits++;
+      Advance();
+    } while (c0_ >= '0' && c0_ <= '9');
+    if (c0_ != '.' && c0_ != 'e' && c0_ != 'E' && digits < 10) {
+      number_ = (negative ? -i : i);
+      return Token::NUMBER;
+    }
+  }
+  if (c0_ == '.') {
+    Advance();
+    if (c0_ < '0' || c0_ > '9') return Token::ILLEGAL;
+    do {
+      Advance();
+    } while (c0_ >= '0' && c0_ <= '9');
+  }
+  if (AsciiAlphaToLower(c0_) == 'e') {
+    Advance();
+    if (c0_ == '-' || c0_ == '+') Advance();
+    if (c0_ < '0' || c0_ > '9') return Token::ILLEGAL;
+    do {
+      Advance();
+    } while (c0_ >= '0' && c0_ <= '9');
+  }
+  if (is_sequential_ascii_) {
+    Vector<const char> chars(seq_source_->GetChars() +  next_.beg_pos,
+                             position_ - next_.beg_pos);
+    number_ = StringToDouble(isolate()->unicode_cache(),
+                             chars,
+                             NO_FLAGS,  // Hex, octal or trailing junk.
+                             OS::nan_value());
+  } else {
+    Vector<char> buffer = Vector<char>::New(position_ - next_.beg_pos);
+    String::WriteToFlat(*source_, buffer.start(), next_.beg_pos, position_);
+    Vector<const char> result =
+        Vector<const char>(reinterpret_cast<const char*>(buffer.start()),
+        position_ - next_.beg_pos);
+    number_ = StringToDouble(isolate()->unicode_cache(),
+                             result,
+                             NO_FLAGS,  // Hex, octal or trailing junk.
+                             0.0);
+    buffer.Dispose();
+  }
+  return Token::NUMBER;
+}
+
+Token::Value JsonParser::SlowScanJsonString() {
+  // The currently scanned ascii characters.
+  Handle<String> ascii(isolate()->factory()->NewSubString(source_,
+                                                          next_.beg_pos + 1,
+                                                          position_));
+  Handle<String> two_byte =
+      isolate()->factory()->NewRawTwoByteString(kInitialSpecialStringSize,
+                                                NOT_TENURED);
+  Handle<SeqTwoByteString> seq_two_byte =
+      Handle<SeqTwoByteString>::cast(two_byte);
+
+  int allocation_count = 1;
+  int count = 0;
+
+  while (c0_ != '"') {
+    // Create new seq string
+    if (count >= kInitialSpecialStringSize * allocation_count) {
+      allocation_count = allocation_count * 2;
+      int new_size = allocation_count * kInitialSpecialStringSize;
+      Handle<String> new_two_byte =
+          isolate()->factory()->NewRawTwoByteString(new_size,
+                                                    NOT_TENURED);
+      uc16* char_start =
+          Handle<SeqTwoByteString>::cast(new_two_byte)->GetChars();
+      String::WriteToFlat(*seq_two_byte, char_start, 0, count);
+      seq_two_byte = Handle<SeqTwoByteString>::cast(new_two_byte);
+    }
+
+    // Check for control character (0x00-0x1f) or unterminated string (<0).
+    if (c0_ < 0x20) return Token::ILLEGAL;
+    if (c0_ != '\\') {
+      seq_two_byte->SeqTwoByteStringSet(count++, c0_);
+      Advance();
+    } else {
+      Advance();
+      switch (c0_) {
+        case '"':
+        case '\\':
+        case '/':
+          seq_two_byte->SeqTwoByteStringSet(count++, c0_);
+          break;
+        case 'b':
+          seq_two_byte->SeqTwoByteStringSet(count++, '\x08');
+          break;
+        case 'f':
+          seq_two_byte->SeqTwoByteStringSet(count++, '\x0c');
+          break;
+        case 'n':
+          seq_two_byte->SeqTwoByteStringSet(count++, '\x0a');
+          break;
+        case 'r':
+          seq_two_byte->SeqTwoByteStringSet(count++, '\x0d');
+          break;
+        case 't':
+          seq_two_byte->SeqTwoByteStringSet(count++, '\x09');
+          break;
+        case 'u': {
+          uc32 value = 0;
+          for (int i = 0; i < 4; i++) {
+            Advance();
+            int digit = HexValue(c0_);
+            if (digit < 0) {
+              return Token::ILLEGAL;
+            }
+            value = value * 16 + digit;
+          }
+          seq_two_byte->SeqTwoByteStringSet(count++, value);
+          break;
+        }
+        default:
+          return Token::ILLEGAL;
+      }
+      Advance();
+    }
+  }
+  // Advance past the last '"'.
+  ASSERT_EQ('"', c0_);
+  Advance();
+
+  // Shrink the the string to our length.
+  if (isolate()->heap()->InNewSpace(*seq_two_byte)) {
+    isolate()->heap()->new_space()->
+          ShrinkStringAtAllocationBoundary<SeqTwoByteString>(*seq_two_byte,
+                                                             count);
+  } else {
+    int string_size = SeqTwoByteString::SizeFor(count);
+    int allocated_string_size =
+        SeqTwoByteString::SizeFor(kInitialSpecialStringSize * allocation_count);
+    int delta = allocated_string_size - string_size;
+    Address start_filler_object = seq_two_byte->address() + string_size;
+    seq_two_byte->set_length(count);
+    isolate()->heap()->CreateFillerObjectAt(start_filler_object, delta);
+  }
+  string_val_ = isolate()->factory()->NewConsString(ascii, seq_two_byte);
+  return Token::STRING;
+}
+
+
+Token::Value JsonParser::ScanJsonString() {
+  ASSERT_EQ('"', c0_);
+  // Set string_val to null. If string_val is not set we assume an
+  // ascii string begining at next_.beg_pos + 1 to next_.end_pos - 1.
+  string_val_ = Handle<String>::null();
+  Advance();
+  // Fast case for ascii only without escape characters.
+  while (c0_ != '"') {
+    // Check for control character (0x00-0x1f) or unterminated string (<0).
+    if (c0_ < 0x20) return Token::ILLEGAL;
+    if (c0_ != '\\' && c0_ < kMaxAsciiCharCode) {
+      Advance();
+    } else {
+      return SlowScanJsonString();
+    }
+  }
+  ASSERT_EQ('"', c0_);
+  // Advance past the last '"'.
+  Advance();
+  return Token::STRING;
+}
+
+Handle<String> JsonParser::GetString() {
+  return GetString(false);
+}
+
+Handle<String> JsonParser::GetSymbol() {
+  Handle<String> result = GetString(true);
+  if (result->IsSymbol()) return result;
+  return isolate()->factory()->LookupSymbol(result);
+}
+
+Handle<String> JsonParser::GetString(bool hint_symbol) {
+  // We have a non ascii string, return that.
+  if (!string_val_.is_null()) return string_val_;
+
+  if (is_sequential_ascii_ && hint_symbol) {
+    Handle<SeqAsciiString> seq = Handle<SeqAsciiString>::cast(source_);
+    // The current token includes the '"' in both ends.
+    int length = current_.end_pos - current_.beg_pos - 2;
+    return isolate()->factory()->LookupAsciiSymbol(seq_source_,
+                                                   current_.beg_pos + 1,
+                                                   length);
+  }
+  // The current token includes the '"' in both ends.
+  return  isolate()->factory()->NewSubString(
+      source_, current_.beg_pos + 1, current_.end_pos - 1);
+}
+
+} }  // namespace v8::internal
diff --git a/src/json-parser.h b/src/json-parser.h
new file mode 100644
index 0000000..5903d21
--- /dev/null
+++ b/src/json-parser.h
@@ -0,0 +1,161 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_JSON_PARSER_H_
+#define V8_JSON_PARSER_H_
+
+#include "token.h"
+
+namespace v8 {
+namespace internal {
+
+// A simple json parser.
+class JsonParser BASE_EMBEDDED {
+ public:
+  static Handle<Object> Parse(Handle<String> source) {
+    return JsonParser().ParseJson(source);
+  }
+
+  static const int kEndOfString = -1;
+
+ private:
+  // Parse a string containing a single JSON value.
+  Handle<Object> ParseJson(Handle<String> source);
+
+  inline void Advance() {
+    if (position_ >= source_length_) {
+      position_++;
+      c0_ = kEndOfString;
+    } else if (is_sequential_ascii_) {
+      position_++;
+      c0_ = seq_source_->SeqAsciiStringGet(position_);
+    } else {
+      position_++;
+      c0_ = source_->Get(position_);
+    }
+  }
+
+  inline Isolate* isolate() { return isolate_; }
+
+  // Get the string for the current string token.
+  Handle<String> GetString(bool hint_symbol);
+  Handle<String> GetString();
+  Handle<String> GetSymbol();
+
+  // Scan a single JSON token. The JSON lexical grammar is specified in the
+  // ECMAScript 5 standard, section 15.12.1.1.
+  // Recognizes all of the single-character tokens directly, or calls a function
+  // to scan a number, string or identifier literal.
+  // The only allowed whitespace characters between tokens are tab,
+  // carriage-return, newline and space.
+  void ScanJson();
+
+  // A JSON string (production JSONString) is subset of valid JavaScript string
+  // literals. The string must only be double-quoted (not single-quoted), and
+  // the only allowed backslash-escapes are ", /, \, b, f, n, r, t and
+  // four-digit hex escapes (uXXXX). Any other use of backslashes is invalid.
+  Token::Value ScanJsonString();
+  // Slow version for unicode support, uses the first ascii_count characters,
+  // as first part of a ConsString
+  Token::Value SlowScanJsonString();
+
+  // A JSON number (production JSONNumber) is a subset of the valid JavaScript
+  // decimal number literals.
+  // It includes an optional minus sign, must have at least one
+  // digit before and after a decimal point, may not have prefixed zeros (unless
+  // the integer part is zero), and may include an exponent part (e.g., "e-10").
+  // Hexadecimal and octal numbers are not allowed.
+  Token::Value ScanJsonNumber();
+
+  // Used to recognizes one of the literals "true", "false", or "null". These
+  // are the only valid JSON identifiers (productions JSONBooleanLiteral,
+  // JSONNullLiteral).
+  Token::Value ScanJsonIdentifier(const char* text, Token::Value token);
+
+  // Parse a single JSON value from input (grammar production JSONValue).
+  // A JSON value is either a (double-quoted) string literal, a number literal,
+  // one of "true", "false", or "null", or an object or array literal.
+  Handle<Object> ParseJsonValue();
+
+  // Parse a JSON object literal (grammar production JSONObject).
+  // An object literal is a squiggly-braced and comma separated sequence
+  // (possibly empty) of key/value pairs, where the key is a JSON string
+  // literal, the value is a JSON value, and the two are separated by a colon.
+  // A JSON array dosn't allow numbers and identifiers as keys, like a
+  // JavaScript array.
+  Handle<Object> ParseJsonObject();
+
+  // Parses a JSON array literal (grammar production JSONArray). An array
+  // literal is a square-bracketed and comma separated sequence (possibly empty)
+  // of JSON values.
+  // A JSON array doesn't allow leaving out values from the sequence, nor does
+  // it allow a terminal comma, like a JavaScript array does.
+  Handle<Object> ParseJsonArray();
+
+
+  // Mark that a parsing error has happened at the current token, and
+  // return a null handle. Primarily for readability.
+  Handle<Object> ReportUnexpectedToken() { return Handle<Object>::null(); }
+
+  // Peek at the next token.
+  Token::Value Peek() { return next_.token; }
+  // Scan the next token and return the token scanned on the last call.
+  Token::Value Next();
+
+  struct TokenInfo {
+    TokenInfo() : token(Token::ILLEGAL),
+                  beg_pos(0),
+                  end_pos(0) { }
+    Token::Value token;
+    int beg_pos;
+    int end_pos;
+  };
+
+  static const int kInitialSpecialStringSize = 1024;
+
+
+ private:
+  Handle<String> source_;
+  int source_length_;
+  Handle<SeqAsciiString> seq_source_;
+
+  bool is_sequential_ascii_;
+  // Current and next token
+  TokenInfo current_;
+  TokenInfo next_;
+  Isolate* isolate_;
+  uc32 c0_;
+  int position_;
+
+
+  Handle<String> string_val_;
+  double number_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_JSON_PARSER_H_
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 66b6332..e7aa860 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -127,7 +127,7 @@
     return re;
   }
   pattern = FlattenGetString(pattern);
-  CompilationZoneScope zone_scope(DELETE_ON_EXIT);
+  CompilationZoneScope zone_scope(isolate, DELETE_ON_EXIT);
   PostponeInterruptsScope postpone(isolate);
   RegExpCompileData parse_result;
   FlatStringReader reader(isolate, pattern);
@@ -295,23 +295,62 @@
 #else  // V8_INTERPRETED_REGEXP (RegExp native code)
   if (compiled_code->IsCode()) return true;
 #endif
+  // We could potentially have marked this as flushable, but have kept
+  // a saved version if we did not flush it yet.
+  Object* saved_code = re->DataAt(JSRegExp::saved_code_index(is_ascii));
+  if (saved_code->IsCode()) {
+    // Reinstate the code in the original place.
+    re->SetDataAt(JSRegExp::code_index(is_ascii), saved_code);
+    ASSERT(compiled_code->IsSmi());
+    return true;
+  }
   return CompileIrregexp(re, is_ascii);
 }
 
 
+static bool CreateRegExpErrorObjectAndThrow(Handle<JSRegExp> re,
+                                            bool is_ascii,
+                                            Handle<String> error_message,
+                                            Isolate* isolate) {
+  Factory* factory = isolate->factory();
+  Handle<FixedArray> elements = factory->NewFixedArray(2);
+  elements->set(0, re->Pattern());
+  elements->set(1, *error_message);
+  Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
+  Handle<Object> regexp_err =
+      factory->NewSyntaxError("malformed_regexp", array);
+  isolate->Throw(*regexp_err);
+  return false;
+}
+
+
 bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re, bool is_ascii) {
   // Compile the RegExp.
   Isolate* isolate = re->GetIsolate();
-  CompilationZoneScope zone_scope(DELETE_ON_EXIT);
+  CompilationZoneScope zone_scope(isolate, DELETE_ON_EXIT);
   PostponeInterruptsScope postpone(isolate);
+  // If we had a compilation error the last time this is saved at the
+  // saved code index.
   Object* entry = re->DataAt(JSRegExp::code_index(is_ascii));
-  if (entry->IsJSObject()) {
-    // If it's a JSObject, a previous compilation failed and threw this object.
-    // Re-throw the object without trying again.
-    isolate->Throw(entry);
+  // When arriving here entry can only be a smi, either representing an
+  // uncompiled regexp, a previous compilation error, or code that has
+  // been flushed.
+  ASSERT(entry->IsSmi());
+  int entry_value = Smi::cast(entry)->value();
+  ASSERT(entry_value == JSRegExp::kUninitializedValue ||
+         entry_value == JSRegExp::kCompilationErrorValue ||
+         (entry_value < JSRegExp::kCodeAgeMask && entry_value >= 0));
+
+  if (entry_value == JSRegExp::kCompilationErrorValue) {
+    // A previous compilation failed and threw an error which we store in
+    // the saved code index (we store the error message, not the actual
+    // error). Recreate the error object and throw it.
+    Object* error_string = re->DataAt(JSRegExp::saved_code_index(is_ascii));
+    ASSERT(error_string->IsString());
+    Handle<String> error_message(String::cast(error_string));
+    CreateRegExpErrorObjectAndThrow(re, is_ascii, error_message, isolate);
     return false;
   }
-  ASSERT(entry->IsTheHole());
 
   JSRegExp::Flags flags = re->GetFlags();
 
@@ -340,17 +379,9 @@
                             is_ascii);
   if (result.error_message != NULL) {
     // Unable to compile regexp.
-    Factory* factory = isolate->factory();
-    Handle<FixedArray> elements = factory->NewFixedArray(2);
-    elements->set(0, *pattern);
     Handle<String> error_message =
-        factory->NewStringFromUtf8(CStrVector(result.error_message));
-    elements->set(1, *error_message);
-    Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
-    Handle<Object> regexp_err =
-        factory->NewSyntaxError("malformed_regexp", array);
-    isolate->Throw(*regexp_err);
-    re->SetDataAt(JSRegExp::code_index(is_ascii), *regexp_err);
+        isolate->factory()->NewStringFromUtf8(CStrVector(result.error_message));
+    CreateRegExpErrorObjectAndThrow(re, is_ascii, error_message, isolate);
     return false;
   }
 
@@ -460,6 +491,7 @@
   ASSERT(output.length() >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
   do {
     bool is_ascii = subject->IsAsciiRepresentation();
+    EnsureCompiledIrregexp(regexp, is_ascii);
     Handle<Code> code(IrregexpNativeCode(*irregexp, is_ascii), isolate);
     NativeRegExpMacroAssembler::Result res =
         NativeRegExpMacroAssembler::Match(code,
@@ -810,6 +842,11 @@
   inline bool ignore_case() { return ignore_case_; }
   inline bool ascii() { return ascii_; }
 
+  int current_expansion_factor() { return current_expansion_factor_; }
+  void set_current_expansion_factor(int value) {
+    current_expansion_factor_ = value;
+  }
+
   static const int kNoRegister = -1;
  private:
   EndNode* accept_;
@@ -820,6 +857,7 @@
   bool ignore_case_;
   bool ascii_;
   bool reg_exp_too_big_;
+  int current_expansion_factor_;
 };
 
 
@@ -847,7 +885,8 @@
       recursion_depth_(0),
       ignore_case_(ignore_case),
       ascii_(ascii),
-      reg_exp_too_big_(false) {
+      reg_exp_too_big_(false),
+      current_expansion_factor_(1) {
   accept_ = new EndNode(EndNode::ACCEPT);
   ASSERT(next_register_ - 1 <= RegExpMacroAssembler::kMaxRegister);
 }
@@ -3727,6 +3766,44 @@
 }
 
 
+// Scoped object to keep track of how much we unroll quantifier loops in the
+// regexp graph generator.
+class RegExpExpansionLimiter {
+ public:
+  static const int kMaxExpansionFactor = 6;
+  RegExpExpansionLimiter(RegExpCompiler* compiler, int factor)
+      : compiler_(compiler),
+        saved_expansion_factor_(compiler->current_expansion_factor()),
+        ok_to_expand_(saved_expansion_factor_ <= kMaxExpansionFactor) {
+    ASSERT(factor > 0);
+    if (ok_to_expand_) {
+      if (factor > kMaxExpansionFactor) {
+        // Avoid integer overflow of the current expansion factor.
+        ok_to_expand_ = false;
+        compiler->set_current_expansion_factor(kMaxExpansionFactor + 1);
+      } else {
+        int new_factor = saved_expansion_factor_ * factor;
+        ok_to_expand_ = (new_factor <= kMaxExpansionFactor);
+        compiler->set_current_expansion_factor(new_factor);
+      }
+    }
+  }
+
+  ~RegExpExpansionLimiter() {
+    compiler_->set_current_expansion_factor(saved_expansion_factor_);
+  }
+
+  bool ok_to_expand() { return ok_to_expand_; }
+
+ private:
+  RegExpCompiler* compiler_;
+  int saved_expansion_factor_;
+  bool ok_to_expand_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpExpansionLimiter);
+};
+
+
 RegExpNode* RegExpQuantifier::ToNode(int min,
                                      int max,
                                      bool is_greedy,
@@ -3766,38 +3843,46 @@
   } else if (FLAG_regexp_optimization && !needs_capture_clearing) {
     // Only unroll if there are no captures and the body can't be
     // empty.
-    if (min > 0 && min <= kMaxUnrolledMinMatches) {
-      int new_max = (max == kInfinity) ? max : max - min;
-      // Recurse once to get the loop or optional matches after the fixed ones.
-      RegExpNode* answer = ToNode(
-          0, new_max, is_greedy, body, compiler, on_success, true);
-      // Unroll the forced matches from 0 to min.  This can cause chains of
-      // TextNodes (which the parser does not generate).  These should be
-      // combined if it turns out they hinder good code generation.
-      for (int i = 0; i < min; i++) {
-        answer = body->ToNode(compiler, answer);
-      }
-      return answer;
-    }
-    if (max <= kMaxUnrolledMaxMatches) {
-      ASSERT(min == 0);
-      // Unroll the optional matches up to max.
-      RegExpNode* answer = on_success;
-      for (int i = 0; i < max; i++) {
-        ChoiceNode* alternation = new ChoiceNode(2);
-        if (is_greedy) {
-          alternation->AddAlternative(GuardedAlternative(body->ToNode(compiler,
-                                                                      answer)));
-          alternation->AddAlternative(GuardedAlternative(on_success));
-        } else {
-          alternation->AddAlternative(GuardedAlternative(on_success));
-          alternation->AddAlternative(GuardedAlternative(body->ToNode(compiler,
-                                                                      answer)));
+    {
+      RegExpExpansionLimiter limiter(
+          compiler, min + ((max != min) ? 1 : 0));
+      if (min > 0 && min <= kMaxUnrolledMinMatches && limiter.ok_to_expand()) {
+        int new_max = (max == kInfinity) ? max : max - min;
+        // Recurse once to get the loop or optional matches after the fixed
+        // ones.
+        RegExpNode* answer = ToNode(
+            0, new_max, is_greedy, body, compiler, on_success, true);
+        // Unroll the forced matches from 0 to min.  This can cause chains of
+        // TextNodes (which the parser does not generate).  These should be
+        // combined if it turns out they hinder good code generation.
+        for (int i = 0; i < min; i++) {
+          answer = body->ToNode(compiler, answer);
         }
-        answer = alternation;
-        if (not_at_start) alternation->set_not_at_start();
+        return answer;
       }
-      return answer;
+    }
+    if (max <= kMaxUnrolledMaxMatches && min == 0) {
+      ASSERT(max > 0);  // Due to the 'if' above.
+      RegExpExpansionLimiter limiter(compiler, max);
+      if (limiter.ok_to_expand()) {
+        // Unroll the optional matches up to max.
+        RegExpNode* answer = on_success;
+        for (int i = 0; i < max; i++) {
+          ChoiceNode* alternation = new ChoiceNode(2);
+          if (is_greedy) {
+            alternation->AddAlternative(
+                GuardedAlternative(body->ToNode(compiler, answer)));
+            alternation->AddAlternative(GuardedAlternative(on_success));
+          } else {
+            alternation->AddAlternative(GuardedAlternative(on_success));
+            alternation->AddAlternative(
+                GuardedAlternative(body->ToNode(compiler, answer)));
+          }
+          answer = alternation;
+          if (not_at_start) alternation->set_not_at_start();
+        }
+        return answer;
+      }
     }
   }
   bool has_min = min > 0;
@@ -4123,12 +4208,6 @@
 }
 
 
-static void AddUncanonicals(Isolate* isolate,
-                            ZoneList<CharacterRange>* ranges,
-                            int bottom,
-                            int top);
-
-
 void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges,
                                         bool is_ascii) {
   Isolate* isolate = Isolate::Current();
@@ -4289,101 +4368,6 @@
 }
 
 
-static void AddUncanonicals(Isolate* isolate,
-                            ZoneList<CharacterRange>* ranges,
-                            int bottom,
-                            int top) {
-  unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
-  // Zones with no case mappings.  There is a DEBUG-mode loop to assert that
-  // this table is correct.
-  // 0x0600 - 0x0fff
-  // 0x1100 - 0x1cff
-  // 0x2000 - 0x20ff
-  // 0x2200 - 0x23ff
-  // 0x2500 - 0x2bff
-  // 0x2e00 - 0xa5ff
-  // 0xa800 - 0xfaff
-  // 0xfc00 - 0xfeff
-  const int boundary_count = 18;
-  int boundaries[] = {
-      0x600, 0x1000, 0x1100, 0x1d00, 0x2000, 0x2100, 0x2200, 0x2400, 0x2500,
-      0x2c00, 0x2e00, 0xa600, 0xa800, 0xfb00, 0xfc00, 0xff00};
-
-  // Special ASCII rule from spec can save us some work here.
-  if (bottom == 0x80 && top == 0xffff) return;
-
-  if (top <= boundaries[0]) {
-    CharacterRange range(bottom, top);
-    range.AddCaseEquivalents(ranges, false);
-    return;
-  }
-
-  // Split up very large ranges.  This helps remove ranges where there are no
-  // case mappings.
-  for (int i = 0; i < boundary_count; i++) {
-    if (bottom < boundaries[i] && top >= boundaries[i]) {
-      AddUncanonicals(isolate, ranges, bottom, boundaries[i] - 1);
-      AddUncanonicals(isolate, ranges, boundaries[i], top);
-      return;
-    }
-  }
-
-  // If we are completely in a zone with no case mappings then we are done.
-  for (int i = 0; i < boundary_count; i += 2) {
-    if (bottom >= boundaries[i] && top < boundaries[i + 1]) {
-#ifdef DEBUG
-      for (int j = bottom; j <= top; j++) {
-        unsigned current_char = j;
-        int length = isolate->jsregexp_uncanonicalize()->get(current_char,
-                                                             '\0', chars);
-        for (int k = 0; k < length; k++) {
-          ASSERT(chars[k] == current_char);
-        }
-      }
-#endif
-      return;
-    }
-  }
-
-  // Step through the range finding equivalent characters.
-  ZoneList<unibrow::uchar> *characters = new ZoneList<unibrow::uchar>(100);
-  for (int i = bottom; i <= top; i++) {
-    int length = isolate->jsregexp_uncanonicalize()->get(i, '\0', chars);
-    for (int j = 0; j < length; j++) {
-      uc32 chr = chars[j];
-      if (chr != i && (chr < bottom || chr > top)) {
-        characters->Add(chr);
-      }
-    }
-  }
-
-  // Step through the equivalent characters finding simple ranges and
-  // adding ranges to the character class.
-  if (characters->length() > 0) {
-    int new_from = characters->at(0);
-    int new_to = new_from;
-    for (int i = 1; i < characters->length(); i++) {
-      int chr = characters->at(i);
-      if (chr == new_to + 1) {
-        new_to++;
-      } else {
-        if (new_to == new_from) {
-          ranges->Add(CharacterRange::Singleton(new_from));
-        } else {
-          ranges->Add(CharacterRange(new_from, new_to));
-        }
-        new_from = new_to = chr;
-      }
-    }
-    if (new_to == new_from) {
-      ranges->Add(CharacterRange::Singleton(new_from));
-    } else {
-      ranges->Add(CharacterRange(new_from, new_to));
-    }
-  }
-}
-
-
 ZoneList<CharacterRange>* CharacterSet::ranges() {
   if (ranges_ == NULL) {
     ranges_ = new ZoneList<CharacterRange>(2);
diff --git a/src/jsregexp.h b/src/jsregexp.h
index d56b650..58958d8 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -28,6 +28,7 @@
 #ifndef V8_JSREGEXP_H_
 #define V8_JSREGEXP_H_
 
+#include "allocation.h"
 #include "macro-assembler.h"
 #include "zone-inl.h"
 
diff --git a/src/list-inl.h b/src/list-inl.h
index eeaea65..8ef7514 100644
--- a/src/list-inl.h
+++ b/src/list-inl.h
@@ -46,10 +46,16 @@
 
 template<typename T, class P>
 void List<T, P>::AddAll(const List<T, P>& other) {
-  int result_length = length_ + other.length_;
+  AddAll(other.ToVector());
+}
+
+
+template<typename T, class P>
+void List<T, P>::AddAll(const Vector<T>& other) {
+  int result_length = length_ + other.length();
   if (capacity_ < result_length) Resize(result_length);
-  for (int i = 0; i < other.length_; i++) {
-    data_[length_ + i] = other.data_[i];
+  for (int i = 0; i < other.length(); i++) {
+    data_[length_ + i] = other.at(i);
   }
   length_ = result_length;
 }
diff --git a/src/list.h b/src/list.h
index 9a2e698..ca2b7bc 100644
--- a/src/list.h
+++ b/src/list.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,8 @@
 #ifndef V8_LIST_H_
 #define V8_LIST_H_
 
+#include "utils.h"
+
 namespace v8 {
 namespace internal {
 
@@ -80,7 +82,7 @@
   INLINE(int length() const) { return length_; }
   INLINE(int capacity() const) { return capacity_; }
 
-  Vector<T> ToVector() { return Vector<T>(data_, length_); }
+  Vector<T> ToVector() const { return Vector<T>(data_, length_); }
 
   Vector<const T> ToConstVector() { return Vector<const T>(data_, length_); }
 
@@ -91,6 +93,9 @@
   // Add all the elements from the argument list to this list.
   void AddAll(const List<T, P>& other);
 
+  // Add all the elements from the vector to this list.
+  void AddAll(const Vector<T>& other);
+
   // Inserts the element at the specific index.
   void InsertAt(int index, const T& element);
 
@@ -159,6 +164,11 @@
   DISALLOW_COPY_AND_ASSIGN(List);
 };
 
+class Map;
+class Code;
+typedef List<Map*> MapList;
+typedef List<Code*> CodeList;
+
 } }  // namespace v8::internal
 
 #endif  // V8_LIST_H_
diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc
index f62a7db..50ed122 100644
--- a/src/lithium-allocator.cc
+++ b/src/lithium-allocator.cc
@@ -1029,6 +1029,22 @@
       chunk_->AddGapMove(cur_block->last_instruction_index() - 1,
                          operand,
                          phi_operand);
+
+      // We are going to insert a move before the branch instruction.
+      // Some branch instructions (e.g. loops' back edges)
+      // can potentially cause a GC so they have a pointer map.
+      // By inserting a move we essentially create a copy of a
+      // value which is invisible to PopulatePointerMaps(), because we store
+      // it into a location different from the operand of a live range
+      // covering a branch instruction.
+      // Thus we need to manually record a pointer.
+      if (phi->representation().IsTagged()) {
+        LInstruction* branch =
+            InstructionAt(cur_block->last_instruction_index());
+        if (branch->HasPointerMap()) {
+          branch->pointer_map()->RecordPointer(phi_operand);
+        }
+      }
     }
 
     LiveRange* live_range = LiveRangeFor(phi->id());
@@ -1116,7 +1132,7 @@
         // We are going to insert a move before the branch instruction.
         // Some branch instructions (e.g. loops' back edges)
         // can potentially cause a GC so they have a pointer map.
-        // By insterting a move we essentially create a copy of a
+        // By inserting a move we essentially create a copy of a
         // value which is invisible to PopulatePointerMaps(), because we store
         // it into a location different from the operand of a live range
         // covering a branch instruction.
@@ -2013,12 +2029,12 @@
   // We have no choice
   if (start_instr == end_instr) return end;
 
-  HBasicBlock* end_block = GetBlock(start);
-  HBasicBlock* start_block = GetBlock(end);
+  HBasicBlock* start_block = GetBlock(start);
+  HBasicBlock* end_block = GetBlock(end);
 
   if (end_block == start_block) {
-    // The interval is split in the same basic block. Split at latest possible
-    // position.
+    // The interval is split in the same basic block. Split at the latest
+    // possible position.
     return end;
   }
 
@@ -2029,7 +2045,9 @@
     block = block->parent_loop_header();
   }
 
-  if (block == end_block) return end;
+  // We did not find any suitable outer loop. Split at the latest possible
+  // position unless end_block is a loop header itself.
+  if (block == end_block && !end_block->IsLoopHeader()) return end;
 
   return LifetimePosition::FromInstructionIndex(
       block->first_instruction_index());
diff --git a/src/lithium-allocator.h b/src/lithium-allocator.h
index f109c45..d456558 100644
--- a/src/lithium-allocator.h
+++ b/src/lithium-allocator.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,7 +30,7 @@
 
 #include "v8.h"
 
-#include "data-flow.h"
+#include "allocation.h"
 #include "lithium.h"
 #include "zone.h"
 
diff --git a/src/lithium.cc b/src/lithium.cc
index aeac2db..62b263b 100644
--- a/src/lithium.cc
+++ b/src/lithium.cc
@@ -166,4 +166,25 @@
 }
 
 
+int ExternalArrayTypeToShiftSize(ExternalArrayType type) {
+  switch (type) {
+    case kExternalByteArray:
+    case kExternalUnsignedByteArray:
+    case kExternalPixelArray:
+      return 0;
+    case kExternalShortArray:
+    case kExternalUnsignedShortArray:
+      return 1;
+    case kExternalIntArray:
+    case kExternalUnsignedIntArray:
+    case kExternalFloatArray:
+      return 2;
+    case kExternalDoubleArray:
+      return 3;
+  }
+  UNREACHABLE();
+  return 0;
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/lithium.h b/src/lithium.h
index 280da47..ffc236d 100644
--- a/src/lithium.h
+++ b/src/lithium.h
@@ -28,6 +28,7 @@
 #ifndef V8_LITHIUM_H_
 #define V8_LITHIUM_H_
 
+#include "allocation.h"
 #include "hydrogen.h"
 #include "safepoint-table.h"
 
@@ -588,6 +589,10 @@
   ShallowIterator current_iterator_;
 };
 
+
+int ExternalArrayTypeToShiftSize(ExternalArrayType type);
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_LITHIUM_H_
diff --git a/src/liveedit.cc b/src/liveedit.cc
index 1466766..e89cae3 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,8 +30,8 @@
 
 #include "liveedit.h"
 
-#include "compiler.h"
 #include "compilation-cache.h"
+#include "compiler.h"
 #include "debug.h"
 #include "deoptimizer.h"
 #include "global-handles.h"
@@ -268,17 +268,10 @@
 }
 
 
-static bool CompareSubstrings(Isolate* isolate, Handle<String> s1, int pos1,
+static bool CompareSubstrings(Handle<String> s1, int pos1,
                               Handle<String> s2, int pos2, int len) {
-  StringInputBuffer& buf1 = *isolate->liveedit_compare_substrings_buf1();
-  StringInputBuffer& buf2 = *isolate->liveedit_compare_substrings_buf2();
-  buf1.Reset(*s1);
-  buf1.Seek(pos1);
-  buf2.Reset(*s2);
-  buf2.Seek(pos2);
   for (int i = 0; i < len; i++) {
-    ASSERT(buf1.has_more() && buf2.has_more());
-    if (buf1.GetNext() != buf2.GetNext()) {
+    if (s1->Get(i + pos1) != s2->Get(i + pos2)) {
       return false;
     }
   }
@@ -410,9 +403,9 @@
 // Represents 2 strings as 2 arrays of lines.
 class LineArrayCompareInput : public Comparator::Input {
  public:
-  LineArrayCompareInput(Isolate* isolate, Handle<String> s1, Handle<String> s2,
+  LineArrayCompareInput(Handle<String> s1, Handle<String> s2,
                         LineEndsWrapper line_ends1, LineEndsWrapper line_ends2)
-      : isolate_(isolate), s1_(s1), s2_(s2), line_ends1_(line_ends1),
+      : s1_(s1), s2_(s2), line_ends1_(line_ends1),
         line_ends2_(line_ends2) {
   }
   int getLength1() {
@@ -431,12 +424,11 @@
     if (len1 != len2) {
       return false;
     }
-    return CompareSubstrings(isolate_, s1_, line_start1, s2_, line_start2,
+    return CompareSubstrings(s1_, line_start1, s2_, line_start2,
                              len1);
   }
 
  private:
-  Isolate* isolate_;
   Handle<String> s1_;
   Handle<String> s2_;
   LineEndsWrapper line_ends1_;
@@ -492,11 +484,13 @@
 
 Handle<JSArray> LiveEdit::CompareStrings(Handle<String> s1,
                                          Handle<String> s2) {
+  s1 = FlattenGetString(s1);
+  s2 = FlattenGetString(s2);
+
   LineEndsWrapper line_ends1(s1);
   LineEndsWrapper line_ends2(s2);
 
-  LineArrayCompareInput
-      input(Isolate::Current(), s1, s2, line_ends1, line_ends2);
+  LineArrayCompareInput input(s1, s2, line_ends1, line_ends2);
   TokenizingLineArrayCompareOutput output(line_ends1, line_ends2, s1, s2);
 
   Comparator::CalculateDifference(&input, &output);
@@ -533,12 +527,12 @@
 
 // Wraps any object into a OpaqueReference, that will hide the object
 // from JavaScript.
-static Handle<JSValue> WrapInJSValue(Object* object) {
+static Handle<JSValue> WrapInJSValue(Handle<Object> object) {
   Handle<JSFunction> constructor =
       Isolate::Current()->opaque_reference_function();
   Handle<JSValue> result =
       Handle<JSValue>::cast(FACTORY->NewJSObject(constructor));
-  result->set_value(object);
+  result->set_value(*object);
   return result;
 }
 
@@ -605,17 +599,17 @@
   }
   void SetFunctionCode(Handle<Code> function_code,
       Handle<Object> code_scope_info) {
-    Handle<JSValue> code_wrapper = WrapInJSValue(*function_code);
+    Handle<JSValue> code_wrapper = WrapInJSValue(function_code);
     this->SetField(kCodeOffset_, code_wrapper);
 
-    Handle<JSValue> scope_wrapper = WrapInJSValue(*code_scope_info);
+    Handle<JSValue> scope_wrapper = WrapInJSValue(code_scope_info);
     this->SetField(kCodeScopeInfoOffset_, scope_wrapper);
   }
   void SetOuterScopeInfo(Handle<Object> scope_info_array) {
     this->SetField(kOuterScopeInfoOffset_, scope_info_array);
   }
   void SetSharedFunctionInfo(Handle<SharedFunctionInfo> info) {
-    Handle<JSValue> info_holder = WrapInJSValue(*info);
+    Handle<JSValue> info_holder = WrapInJSValue(info);
     this->SetField(kSharedFunctionInfoOffset_, info_holder);
   }
   int GetParentIndex() {
@@ -672,7 +666,7 @@
                      Handle<SharedFunctionInfo> info) {
     HandleScope scope;
     this->SetField(kFunctionNameOffset_, name);
-    Handle<JSValue> info_holder = WrapInJSValue(*info);
+    Handle<JSValue> info_holder = WrapInJSValue(info);
     this->SetField(kSharedInfoOffset_, info_holder);
     this->SetSmiValueField(kStartPositionOffset_, start_position);
     this->SetSmiValueField(kEndPositionOffset_, end_position);
@@ -820,7 +814,7 @@
 JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
                                      Handle<String> source) {
   Isolate* isolate = Isolate::Current();
-  CompilationZoneScope zone_scope(DELETE_ON_EXIT);
+  CompilationZoneScope zone_scope(isolate, DELETE_ON_EXIT);
 
   FunctionInfoListener listener;
   Handle<Object> original_source = Handle<Object>(script->source());
@@ -914,7 +908,7 @@
   AssertNoAllocation no_allocations_please;
 
   // A zone scope for ReferenceCollectorVisitor.
-  ZoneScope scope(DELETE_ON_EXIT);
+  ZoneScope scope(Isolate::Current(), DELETE_ON_EXIT);
 
   ReferenceCollectorVisitor visitor(original);
 
@@ -1411,6 +1405,9 @@
           Builtins::kFrameDropper_LiveEdit)) {
     // OK, we can drop our own code.
     *mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
+  } else if (pre_top_frame_code ==
+      isolate->builtins()->builtin(Builtins::kReturn_DebugBreak)) {
+    *mode = Debug::FRAME_DROPPED_IN_RETURN_CALL;
   } else if (pre_top_frame_code->kind() == Code::STUB &&
       pre_top_frame_code->major_key()) {
     // Entry from our unit tests, it's fine, we support this case.
@@ -1461,8 +1458,9 @@
 // removing all listed function if possible and if do_drop is true.
 static const char* DropActivationsInActiveThread(
     Handle<JSArray> shared_info_array, Handle<JSArray> result, bool do_drop) {
-  Debug* debug = Isolate::Current()->debug();
-  ZoneScope scope(DELETE_ON_EXIT);
+  Isolate* isolate = Isolate::Current();
+  Debug* debug = isolate->debug();
+  ZoneScope scope(isolate, DELETE_ON_EXIT);
   Vector<StackFrame*> frames = CreateStackMap();
 
   int array_len = Smi::cast(shared_info_array->length())->value();
@@ -1682,7 +1680,7 @@
 }
 
 
-bool LiveEditFunctionTracker::IsActive() {
+bool LiveEditFunctionTracker::IsActive(Isolate* isolate) {
   return false;
 }
 
diff --git a/src/liveedit.h b/src/liveedit.h
index 36c2c76..60e6238 100644
--- a/src/liveedit.h
+++ b/src/liveedit.h
@@ -49,6 +49,7 @@
 // instantiate newly compiled functions.
 
 
+#include "allocation.h"
 #include "compiler.h"
 
 namespace v8 {
diff --git a/src/liveobjectlist.cc b/src/liveobjectlist.cc
index 5795a6b..29a9b01 100644
--- a/src/liveobjectlist.cc
+++ b/src/liveobjectlist.cc
@@ -118,7 +118,7 @@
   v(Code, "meta: Code") \
   v(Map, "meta: Map") \
   v(Oddball, "Oddball") \
-  v(Proxy, "meta: Proxy") \
+  v(Foreign, "meta: Foreign") \
   v(SharedFunctionInfo, "meta: SharedFunctionInfo") \
   v(Struct, "meta: Struct") \
   \
diff --git a/src/log-utils.cc b/src/log-utils.cc
index a854ade..1bba7cd 100644
--- a/src/log-utils.cc
+++ b/src/log-utils.cc
@@ -126,7 +126,7 @@
   : write_to_file_(false),
     is_stopped_(false),
     output_handle_(NULL),
-    output_code_handle_(NULL),
+    ll_output_handle_(NULL),
     output_buffer_(NULL),
     mutex_(NULL),
     message_buffer_(NULL),
@@ -168,7 +168,7 @@
 
   bool start_logging = FLAG_log || FLAG_log_runtime || FLAG_log_api
       || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
-      || FLAG_log_regexp || FLAG_log_state_changes;
+      || FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof;
 
   bool open_log_file = start_logging || FLAG_prof_lazy;
 
@@ -233,7 +233,12 @@
 }
 
 
-static const char kCodeLogExt[] = ".code";
+// Extension added to V8 log file name to get the low-level log name.
+static const char kLowLevelLogExt[] = ".ll";
+
+// File buffer size of the low-level log. We don't use the default to
+// minimize the associated overhead.
+static const int kLowLevelLogBufferSize = 2 * MB;
 
 
 void Log::OpenFile(const char* name) {
@@ -241,14 +246,13 @@
   output_handle_ = OS::FOpen(name, OS::LogFileOpenMode);
   write_to_file_ = true;
   if (FLAG_ll_prof) {
-    // Open a file for logging the contents of code objects so that
-    // they can be disassembled later.
-    size_t name_len = strlen(name);
-    ScopedVector<char> code_name(
-        static_cast<int>(name_len + sizeof(kCodeLogExt)));
-    memcpy(code_name.start(), name, name_len);
-    memcpy(code_name.start() + name_len, kCodeLogExt, sizeof(kCodeLogExt));
-    output_code_handle_ = OS::FOpen(code_name.start(), OS::LogFileOpenMode);
+    // Open the low-level log file.
+    size_t len = strlen(name);
+    ScopedVector<char> ll_name(static_cast<int>(len + sizeof(kLowLevelLogExt)));
+    memcpy(ll_name.start(), name, len);
+    memcpy(ll_name.start() + len, kLowLevelLogExt, sizeof(kLowLevelLogExt));
+    ll_output_handle_ = OS::FOpen(ll_name.start(), OS::LogFileOpenMode);
+    setvbuf(ll_output_handle_, NULL, _IOFBF, kLowLevelLogBufferSize);
   }
 }
 
@@ -266,8 +270,8 @@
   if (write_to_file_) {
     if (output_handle_ != NULL) fclose(output_handle_);
     output_handle_ = NULL;
-    if (output_code_handle_ != NULL) fclose(output_code_handle_);
-    output_code_handle_ = NULL;
+    if (ll_output_handle_ != NULL) fclose(ll_output_handle_);
+    ll_output_handle_ = NULL;
   } else {
     delete output_buffer_;
     output_buffer_ = NULL;
@@ -361,6 +365,7 @@
 
 
 void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
+  if (str == NULL) return;
   AssertNoAllocation no_heap_allocation;  // Ensure string stay valid.
   int len = str->length();
   if (len > 0x1000)
diff --git a/src/log-utils.h b/src/log-utils.h
index 255c73c..81bbf77 100644
--- a/src/log-utils.h
+++ b/src/log-utils.h
@@ -28,6 +28,8 @@
 #ifndef V8_LOG_UTILS_H_
 #define V8_LOG_UTILS_H_
 
+#include "allocation.h"
+
 namespace v8 {
 namespace internal {
 
@@ -154,8 +156,8 @@
   // mutex_ should be acquired before using output_handle_ or output_buffer_.
   FILE* output_handle_;
 
-  // Used when low-level profiling is active to save code object contents.
-  FILE* output_code_handle_;
+  // Used when low-level profiling is active.
+  FILE* ll_output_handle_;
 
   LogDynamicBuffer* output_buffer_;
 
diff --git a/src/log.cc b/src/log.cc
index 3ce2072..0474ce0 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -334,10 +334,187 @@
 }
 
 
+// Low-level profiling event structures.
+
+struct LowLevelCodeCreateStruct {
+  static const char kTag = 'C';
+
+  int32_t name_size;
+  Address code_address;
+  int32_t code_size;
+};
+
+
+struct LowLevelCodeMoveStruct {
+  static const char kTag = 'M';
+
+  Address from_address;
+  Address to_address;
+};
+
+
+struct LowLevelCodeDeleteStruct {
+  static const char kTag = 'D';
+
+  Address address;
+};
+
+
+struct LowLevelSnapshotPositionStruct {
+  static const char kTag = 'P';
+
+  Address address;
+  int32_t position;
+};
+
+
+static const char kCodeMovingGCTag = 'G';
+
+
 //
 // Logger class implementation.
 //
 
+class Logger::NameMap {
+ public:
+  NameMap() : impl_(&PointerEquals) {}
+
+  ~NameMap() {
+    for (HashMap::Entry* p = impl_.Start(); p != NULL; p = impl_.Next(p)) {
+      DeleteArray(static_cast<const char*>(p->value));
+    }
+  }
+
+  void Insert(Address code_address, const char* name, int name_size) {
+    HashMap::Entry* entry = FindOrCreateEntry(code_address);
+    if (entry->value == NULL) {
+      entry->value = CopyName(name, name_size);
+    }
+  }
+
+  const char* Lookup(Address code_address) {
+    HashMap::Entry* entry = FindEntry(code_address);
+    return (entry != NULL) ? static_cast<const char*>(entry->value) : NULL;
+  }
+
+  void Remove(Address code_address) {
+    HashMap::Entry* entry = FindEntry(code_address);
+    if (entry != NULL) DeleteArray(static_cast<const char*>(entry->value));
+    RemoveEntry(entry);
+  }
+
+  void Move(Address from, Address to) {
+    if (from == to) return;
+    HashMap::Entry* from_entry = FindEntry(from);
+    ASSERT(from_entry != NULL);
+    void* value = from_entry->value;
+    RemoveEntry(from_entry);
+    HashMap::Entry* to_entry = FindOrCreateEntry(to);
+    ASSERT(to_entry->value == NULL);
+    to_entry->value = value;
+  }
+
+ private:
+  static bool PointerEquals(void* lhs, void* rhs) {
+    return lhs == rhs;
+  }
+
+  static char* CopyName(const char* name, int name_size) {
+    char* result = NewArray<char>(name_size + 1);
+    for (int i = 0; i < name_size; ++i) {
+      char c = name[i];
+      if (c == '\0') c = ' ';
+      result[i] = c;
+    }
+    result[name_size] = '\0';
+    return result;
+  }
+
+  HashMap::Entry* FindOrCreateEntry(Address code_address) {
+    return impl_.Lookup(code_address, ComputePointerHash(code_address), true);
+  }
+
+  HashMap::Entry* FindEntry(Address code_address) {
+    return impl_.Lookup(code_address, ComputePointerHash(code_address), false);
+  }
+
+  void RemoveEntry(HashMap::Entry* entry) {
+    impl_.Remove(entry->key, entry->hash);
+  }
+
+  HashMap impl_;
+
+  DISALLOW_COPY_AND_ASSIGN(NameMap);
+};
+
+
+class Logger::NameBuffer {
+ public:
+  NameBuffer() { Reset(); }
+
+  void Reset() {
+    utf8_pos_ = 0;
+  }
+
+  void AppendString(String* str) {
+    if (str == NULL) return;
+    if (str->HasOnlyAsciiChars()) {
+      int utf8_length = Min(str->length(), kUtf8BufferSize - utf8_pos_);
+      String::WriteToFlat(str, utf8_buffer_ + utf8_pos_, 0, utf8_length);
+      utf8_pos_ += utf8_length;
+      return;
+    }
+    int uc16_length = Min(str->length(), kUc16BufferSize);
+    String::WriteToFlat(str, uc16_buffer_, 0, uc16_length);
+    for (int i = 0; i < uc16_length && utf8_pos_ < kUtf8BufferSize; ++i) {
+      uc16 c = uc16_buffer_[i];
+      if (c <= String::kMaxAsciiCharCodeU) {
+        utf8_buffer_[utf8_pos_++] = static_cast<char>(c);
+      } else {
+        int char_length = unibrow::Utf8::Length(c);
+        if (utf8_pos_ + char_length > kUtf8BufferSize) break;
+        unibrow::Utf8::Encode(utf8_buffer_ + utf8_pos_, c);
+        utf8_pos_ += char_length;
+      }
+    }
+  }
+
+  void AppendBytes(const char* bytes, int size) {
+    size = Min(size, kUtf8BufferSize - utf8_pos_);
+    memcpy(utf8_buffer_ + utf8_pos_, bytes, size);
+    utf8_pos_ += size;
+  }
+
+  void AppendBytes(const char* bytes) {
+    AppendBytes(bytes, StrLength(bytes));
+  }
+
+  void AppendByte(char c) {
+    if (utf8_pos_ >= kUtf8BufferSize) return;
+    utf8_buffer_[utf8_pos_++] = c;
+  }
+
+  void AppendInt(int n) {
+    Vector<char> buffer(utf8_buffer_ + utf8_pos_, kUtf8BufferSize - utf8_pos_);
+    int size = OS::SNPrintF(buffer, "%d", n);
+    if (size > 0 && utf8_pos_ + size <= kUtf8BufferSize) {
+      utf8_pos_ += size;
+    }
+  }
+
+  const char* get() { return utf8_buffer_; }
+  int size() const { return utf8_pos_; }
+
+ private:
+  static const int kUtf8BufferSize = 512;
+  static const int kUc16BufferSize = 128;
+
+  int utf8_pos_;
+  char utf8_buffer_[kUtf8BufferSize];
+  uc16 uc16_buffer_[kUc16BufferSize];
+};
+
+
 Logger::Logger()
   : ticker_(NULL),
     profiler_(NULL),
@@ -347,6 +524,8 @@
     cpu_profiler_nesting_(0),
     heap_profiler_nesting_(0),
     log_(new Log(this)),
+    name_buffer_(new NameBuffer),
+    address_to_name_map_(NULL),
     is_initialized_(false),
     last_address_(NULL),
     prev_sp_(NULL),
@@ -355,10 +534,14 @@
     prev_code_(NULL) {
 }
 
+
 Logger::~Logger() {
+  delete address_to_name_map_;
+  delete name_buffer_;
   delete log_;
 }
 
+
 #define DECLARE_EVENT(ignore1, name) name,
 static const char* const kLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
   LOG_EVENTS_AND_TAGS_LIST(DECLARE_EVENT)
@@ -735,7 +918,20 @@
                              Code* code,
                              const char* comment) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!log_->IsEnabled() || !FLAG_log_code) return;
+  if (!log_->IsEnabled()) return;
+  if (FLAG_ll_prof || Serializer::enabled()) {
+    name_buffer_->Reset();
+    name_buffer_->AppendBytes(kLogEventsNames[tag]);
+    name_buffer_->AppendByte(':');
+    name_buffer_->AppendBytes(comment);
+  }
+  if (FLAG_ll_prof) {
+    LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
+  }
+  if (Serializer::enabled()) {
+    RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
+  }
+  if (!FLAG_log_code) return;
   LogMessageBuilder msg(this);
   msg.Append("%s,%s,",
              kLogEventsNames[CODE_CREATION_EVENT],
@@ -749,7 +945,6 @@
     msg.Append(*p);
   }
   msg.Append('"');
-  LowLevelCodeCreateEvent(code, &msg);
   msg.Append('\n');
   msg.WriteToLogFile();
 #endif
@@ -760,13 +955,30 @@
                              Code* code,
                              String* name) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (name != NULL) {
-    SmartPointer<char> str =
-        name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-    CodeCreateEvent(tag, code, *str);
-  } else {
-    CodeCreateEvent(tag, code, "");
+  if (!log_->IsEnabled()) return;
+  if (FLAG_ll_prof || Serializer::enabled()) {
+    name_buffer_->Reset();
+    name_buffer_->AppendBytes(kLogEventsNames[tag]);
+    name_buffer_->AppendByte(':');
+    name_buffer_->AppendString(name);
   }
+  if (FLAG_ll_prof) {
+    LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
+  }
+  if (Serializer::enabled()) {
+    RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
+  }
+  if (!FLAG_log_code) return;
+  LogMessageBuilder msg(this);
+  msg.Append("%s,%s,",
+             kLogEventsNames[CODE_CREATION_EVENT],
+             kLogEventsNames[tag]);
+  msg.AppendAddress(code->address());
+  msg.Append(",%d,\"", code->ExecutableSize());
+  msg.AppendDetailed(name, false);
+  msg.Append('"');
+  msg.Append('\n');
+  msg.WriteToLogFile();
 #endif
 }
 
@@ -788,7 +1000,21 @@
                              SharedFunctionInfo* shared,
                              String* name) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!log_->IsEnabled() || !FLAG_log_code) return;
+  if (!log_->IsEnabled()) return;
+  if (FLAG_ll_prof || Serializer::enabled()) {
+    name_buffer_->Reset();
+    name_buffer_->AppendBytes(kLogEventsNames[tag]);
+    name_buffer_->AppendByte(':');
+    name_buffer_->AppendBytes(ComputeMarker(code));
+    name_buffer_->AppendString(name);
+  }
+  if (FLAG_ll_prof) {
+    LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
+  }
+  if (Serializer::enabled()) {
+    RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
+  }
+  if (!FLAG_log_code) return;
   if (code == Isolate::Current()->builtins()->builtin(
       Builtins::kLazyCompile))
     return;
@@ -803,7 +1029,6 @@
   msg.Append(",%d,\"%s\",", code->ExecutableSize(), *str);
   msg.AppendAddress(shared->address());
   msg.Append(",%s", ComputeMarker(code));
-  LowLevelCodeCreateEvent(code, &msg);
   msg.Append('\n');
   msg.WriteToLogFile();
 #endif
@@ -818,7 +1043,25 @@
                              SharedFunctionInfo* shared,
                              String* source, int line) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!log_->IsEnabled() || !FLAG_log_code) return;
+  if (!log_->IsEnabled()) return;
+  if (FLAG_ll_prof || Serializer::enabled()) {
+    name_buffer_->Reset();
+    name_buffer_->AppendBytes(kLogEventsNames[tag]);
+    name_buffer_->AppendByte(':');
+    name_buffer_->AppendBytes(ComputeMarker(code));
+    name_buffer_->AppendString(shared->DebugName());
+    name_buffer_->AppendByte(' ');
+    name_buffer_->AppendString(source);
+    name_buffer_->AppendByte(':');
+    name_buffer_->AppendInt(line);
+  }
+  if (FLAG_ll_prof) {
+    LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
+  }
+  if (Serializer::enabled()) {
+    RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
+  }
+  if (!FLAG_log_code) return;
   LogMessageBuilder msg(this);
   SmartPointer<char> name =
       shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
@@ -835,7 +1078,6 @@
              line);
   msg.AppendAddress(shared->address());
   msg.Append(",%s", ComputeMarker(code));
-  LowLevelCodeCreateEvent(code, &msg);
   msg.Append('\n');
   msg.WriteToLogFile();
 #endif
@@ -844,14 +1086,26 @@
 
 void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!log_->IsEnabled() || !FLAG_log_code) return;
+  if (!log_->IsEnabled()) return;
+  if (FLAG_ll_prof || Serializer::enabled()) {
+    name_buffer_->Reset();
+    name_buffer_->AppendBytes(kLogEventsNames[tag]);
+    name_buffer_->AppendByte(':');
+    name_buffer_->AppendInt(args_count);
+  }
+  if (FLAG_ll_prof) {
+    LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
+  }
+  if (Serializer::enabled()) {
+    RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
+  }
+  if (!FLAG_log_code) return;
   LogMessageBuilder msg(this);
   msg.Append("%s,%s,",
              kLogEventsNames[CODE_CREATION_EVENT],
              kLogEventsNames[tag]);
   msg.AppendAddress(code->address());
   msg.Append(",%d,\"args_count: %d\"", code->ExecutableSize(), args_count);
-  LowLevelCodeCreateEvent(code, &msg);
   msg.Append('\n');
   msg.WriteToLogFile();
 #endif
@@ -860,10 +1114,8 @@
 
 void Logger::CodeMovingGCEvent() {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!log_->IsEnabled() || !FLAG_log_code || !FLAG_ll_prof) return;
-  LogMessageBuilder msg(this);
-  msg.Append("%s\n", kLogEventsNames[CODE_MOVING_GC]);
-  msg.WriteToLogFile();
+  if (!log_->IsEnabled() || !FLAG_ll_prof) return;
+  LowLevelLogWriteBytes(&kCodeMovingGCTag, sizeof(kCodeMovingGCTag));
   OS::SignalCodeMovingGC();
 #endif
 }
@@ -871,7 +1123,20 @@
 
 void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!log_->IsEnabled() || !FLAG_log_code) return;
+  if (!log_->IsEnabled()) return;
+  if (FLAG_ll_prof || Serializer::enabled()) {
+    name_buffer_->Reset();
+    name_buffer_->AppendBytes(kLogEventsNames[REG_EXP_TAG]);
+    name_buffer_->AppendByte(':');
+    name_buffer_->AppendString(source);
+  }
+  if (FLAG_ll_prof) {
+    LowLevelCodeCreateEvent(code, name_buffer_->get(), name_buffer_->size());
+  }
+  if (Serializer::enabled()) {
+    RegisterSnapshotCodeName(code, name_buffer_->get(), name_buffer_->size());
+  }
+  if (!FLAG_log_code) return;
   LogMessageBuilder msg(this);
   msg.Append("%s,%s,",
              kLogEventsNames[CODE_CREATION_EVENT],
@@ -880,7 +1145,6 @@
   msg.Append(",%d,\"", code->ExecutableSize());
   msg.AppendDetailed(source, false);
   msg.Append('\"');
-  LowLevelCodeCreateEvent(code, &msg);
   msg.Append('\n');
   msg.WriteToLogFile();
 #endif
@@ -889,6 +1153,11 @@
 
 void Logger::CodeMoveEvent(Address from, Address to) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!log_->IsEnabled()) return;
+  if (FLAG_ll_prof) LowLevelCodeMoveEvent(from, to);
+  if (Serializer::enabled() && address_to_name_map_ != NULL) {
+    address_to_name_map_->Move(from, to);
+  }
   MoveEventInternal(CODE_MOVE_EVENT, from, to);
 #endif
 }
@@ -896,6 +1165,11 @@
 
 void Logger::CodeDeleteEvent(Address from) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!log_->IsEnabled()) return;
+  if (FLAG_ll_prof) LowLevelCodeDeleteEvent(from);
+  if (Serializer::enabled() && address_to_name_map_ != NULL) {
+    address_to_name_map_->Remove(from);
+  }
   DeleteEventInternal(CODE_DELETE_EVENT, from);
 #endif
 }
@@ -903,7 +1177,21 @@
 
 void Logger::SnapshotPositionEvent(Address addr, int pos) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!log_->IsEnabled() || !FLAG_log_snapshot_positions) return;
+  if (!log_->IsEnabled()) return;
+  if (FLAG_ll_prof) LowLevelSnapshotPositionEvent(addr, pos);
+  if (Serializer::enabled() && address_to_name_map_ != NULL) {
+    const char* code_name = address_to_name_map_->Lookup(addr);
+    if (code_name == NULL) return;  // Not a code object.
+    LogMessageBuilder msg(this);
+    msg.Append("%s,%d,\"", kLogEventsNames[SNAPSHOT_CODE_NAME_EVENT], pos);
+    for (const char* p = code_name; *p != '\0'; ++p) {
+      if (*p == '"') msg.Append('\\');
+      msg.Append(*p);
+    }
+    msg.Append("\"\n");
+    msg.WriteToLogFile();
+  }
+  if (!FLAG_log_snapshot_positions) return;
   LogMessageBuilder msg(this);
   msg.Append("%s,", kLogEventsNames[SNAPSHOT_POSITION_EVENT]);
   msg.AppendAddress(addr);
@@ -1308,7 +1596,7 @@
 
 
 void Logger::LogCodeObject(Object* object) {
-  if (FLAG_log_code) {
+  if (FLAG_log_code || FLAG_ll_prof) {
     Code* code_object = Code::cast(object);
     LogEventsAndTags tag = Logger::STUB_TAG;
     const char* description = "Unknown code from the snapshot";
@@ -1316,7 +1604,8 @@
       case Code::FUNCTION:
       case Code::OPTIMIZED_FUNCTION:
         return;  // We log this later using LogCompiledFunctions.
-      case Code::TYPE_RECORDING_BINARY_OP_IC:   // fall through
+      case Code::UNARY_OP_IC:   // fall through
+      case Code::BINARY_OP_IC:   // fall through
       case Code::COMPARE_IC:  // fall through
       case Code::STUB:
         description =
@@ -1333,10 +1622,6 @@
         description = "A keyed load IC from the snapshot";
         tag = Logger::KEYED_LOAD_IC_TAG;
         break;
-      case Code::KEYED_EXTERNAL_ARRAY_LOAD_IC:
-        description = "A keyed external array load IC from the snapshot";
-        tag = Logger::KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG;
-        break;
       case Code::LOAD_IC:
         description = "A load IC from the snapshot";
         tag = Logger::LOAD_IC_TAG;
@@ -1349,10 +1634,6 @@
         description = "A keyed store IC from the snapshot";
         tag = Logger::KEYED_STORE_IC_TAG;
         break;
-      case Code::KEYED_EXTERNAL_ARRAY_STORE_IC:
-        description = "A keyed external array store IC from the snapshot";
-        tag = Logger::KEYED_EXTERNAL_ARRAY_STORE_IC_TAG;
-        break;
       case Code::CALL_IC:
         description = "A call IC from the snapshot";
         tag = Logger::CALL_IC_TAG;
@@ -1369,7 +1650,7 @@
 
 void Logger::LogCodeInfo() {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!log_->IsEnabled() || !FLAG_log_code || !FLAG_ll_prof) return;
+  if (!log_->IsEnabled() || !FLAG_ll_prof) return;
 #if V8_TARGET_ARCH_IA32
   const char arch[] = "ia32";
 #elif V8_TARGET_ARCH_X64
@@ -1379,21 +1660,69 @@
 #else
   const char arch[] = "unknown";
 #endif
-  LogMessageBuilder msg(this);
-  msg.Append("code-info,%s,%d\n", arch, Code::kHeaderSize);
-  msg.WriteToLogFile();
+  LowLevelLogWriteBytes(arch, sizeof(arch));
 #endif  // ENABLE_LOGGING_AND_PROFILING
 }
 
 
-void Logger::LowLevelCodeCreateEvent(Code* code, LogMessageBuilder* msg) {
-  if (!FLAG_ll_prof || log_->output_code_handle_ == NULL) return;
-  int pos = static_cast<int>(ftell(log_->output_code_handle_));
-  size_t rv = fwrite(code->instruction_start(), 1, code->instruction_size(),
-                     log_->output_code_handle_);
-  ASSERT(static_cast<size_t>(code->instruction_size()) == rv);
+void Logger::RegisterSnapshotCodeName(Code* code,
+                                      const char* name,
+                                      int name_size) {
+  ASSERT(Serializer::enabled());
+  if (address_to_name_map_ == NULL) {
+    address_to_name_map_ = new NameMap;
+  }
+  address_to_name_map_->Insert(code->address(), name, name_size);
+}
+
+
+void Logger::LowLevelCodeCreateEvent(Code* code,
+                                     const char* name,
+                                     int name_size) {
+  if (log_->ll_output_handle_ == NULL) return;
+  LowLevelCodeCreateStruct event;
+  event.name_size = name_size;
+  event.code_address = code->instruction_start();
+  ASSERT(event.code_address == code->address() + Code::kHeaderSize);
+  event.code_size = code->instruction_size();
+  LowLevelLogWriteStruct(event);
+  LowLevelLogWriteBytes(name, name_size);
+  LowLevelLogWriteBytes(
+      reinterpret_cast<const char*>(code->instruction_start()),
+      code->instruction_size());
+}
+
+
+void Logger::LowLevelCodeMoveEvent(Address from, Address to) {
+  if (log_->ll_output_handle_ == NULL) return;
+  LowLevelCodeMoveStruct event;
+  event.from_address = from + Code::kHeaderSize;
+  event.to_address = to + Code::kHeaderSize;
+  LowLevelLogWriteStruct(event);
+}
+
+
+void Logger::LowLevelCodeDeleteEvent(Address from) {
+  if (log_->ll_output_handle_ == NULL) return;
+  LowLevelCodeDeleteStruct event;
+  event.address = from + Code::kHeaderSize;
+  LowLevelLogWriteStruct(event);
+}
+
+
+void Logger::LowLevelSnapshotPositionEvent(Address addr, int pos) {
+  if (log_->ll_output_handle_ == NULL) return;
+  LowLevelSnapshotPositionStruct event;
+  event.address = addr + Code::kHeaderSize;
+  event.position = pos;
+  LowLevelLogWriteStruct(event);
+}
+
+
+void Logger::LowLevelLogWriteBytes(const char* bytes, int size) {
+  size_t rv = fwrite(bytes, 1, size, log_->ll_output_handle_);
+  ASSERT(static_cast<size_t>(size) == rv);
   USE(rv);
-  msg->Append(",%d", pos);
 }
 
 
@@ -1496,7 +1825,6 @@
 
   // --ll-prof implies --log-code and --log-snapshot-positions.
   if (FLAG_ll_prof) {
-    FLAG_log_code = true;
     FLAG_log_snapshot_positions = true;
   }
 
@@ -1523,7 +1851,7 @@
 
   bool start_logging = FLAG_log || FLAG_log_runtime || FLAG_log_api
     || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
-    || FLAG_log_regexp || FLAG_log_state_changes;
+    || FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof;
 
   if (start_logging) {
     logging_nesting_ = 1;
diff --git a/src/log.h b/src/log.h
index 1fa86d2..93f3fe7 100644
--- a/src/log.h
+++ b/src/log.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,7 @@
 #ifndef V8_LOG_H_
 #define V8_LOG_H_
 
+#include "allocation.h"
 #include "platform.h"
 #include "log-utils.h"
 
@@ -69,11 +70,12 @@
 // tick profiler requires code events, so --prof implies --log-code.
 
 // Forward declarations.
-class Ticker;
+class HashMap;
+class LogMessageBuilder;
 class Profiler;
 class Semaphore;
 class SlidingStateWindow;
-class LogMessageBuilder;
+class Ticker;
 
 #undef LOG
 #ifdef ENABLE_LOGGING_AND_PROFILING
@@ -88,48 +90,51 @@
 #define LOG(isolate, Call) ((void) 0)
 #endif
 
-#define LOG_EVENTS_AND_TAGS_LIST(V) \
-  V(CODE_CREATION_EVENT,            "code-creation")            \
-  V(CODE_MOVE_EVENT,                "code-move")                \
-  V(CODE_DELETE_EVENT,              "code-delete")              \
-  V(CODE_MOVING_GC,                 "code-moving-gc")           \
-  V(SHARED_FUNC_MOVE_EVENT,         "sfi-move")                 \
-  V(SNAPSHOT_POSITION_EVENT,        "snapshot-pos")             \
-  V(TICK_EVENT,                     "tick")                     \
-  V(REPEAT_META_EVENT,              "repeat")                   \
-  V(BUILTIN_TAG,                    "Builtin")                  \
-  V(CALL_DEBUG_BREAK_TAG,           "CallDebugBreak")           \
-  V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn")   \
-  V(CALL_IC_TAG,                    "CallIC")                   \
-  V(CALL_INITIALIZE_TAG,            "CallInitialize")           \
-  V(CALL_MEGAMORPHIC_TAG,           "CallMegamorphic")          \
-  V(CALL_MISS_TAG,                  "CallMiss")                 \
-  V(CALL_NORMAL_TAG,                "CallNormal")               \
-  V(CALL_PRE_MONOMORPHIC_TAG,       "CallPreMonomorphic")       \
-  V(KEYED_CALL_DEBUG_BREAK_TAG,     "KeyedCallDebugBreak")      \
-  V(KEYED_CALL_DEBUG_PREPARE_STEP_IN_TAG,                       \
-    "KeyedCallDebugPrepareStepIn")                              \
-  V(KEYED_CALL_IC_TAG,              "KeyedCallIC")              \
-  V(KEYED_CALL_INITIALIZE_TAG,      "KeyedCallInitialize")      \
-  V(KEYED_CALL_MEGAMORPHIC_TAG,     "KeyedCallMegamorphic")     \
-  V(KEYED_CALL_MISS_TAG,            "KeyedCallMiss")            \
-  V(KEYED_CALL_NORMAL_TAG,          "KeyedCallNormal")          \
-  V(KEYED_CALL_PRE_MONOMORPHIC_TAG, "KeyedCallPreMonomorphic")  \
-  V(CALLBACK_TAG,                   "Callback")                 \
-  V(EVAL_TAG,                       "Eval")                     \
-  V(FUNCTION_TAG,                   "Function")                 \
-  V(KEYED_LOAD_IC_TAG,              "KeyedLoadIC")              \
-  V(KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG, "KeyedExternalArrayLoadIC") \
-  V(KEYED_STORE_IC_TAG,             "KeyedStoreIC")             \
-  V(KEYED_EXTERNAL_ARRAY_STORE_IC_TAG, "KeyedExternalArrayStoreIC")\
-  V(LAZY_COMPILE_TAG,               "LazyCompile")              \
-  V(LOAD_IC_TAG,                    "LoadIC")                   \
-  V(REG_EXP_TAG,                    "RegExp")                   \
-  V(SCRIPT_TAG,                     "Script")                   \
-  V(STORE_IC_TAG,                   "StoreIC")                  \
-  V(STUB_TAG,                       "Stub")                     \
-  V(NATIVE_FUNCTION_TAG,            "Function")                 \
-  V(NATIVE_LAZY_COMPILE_TAG,        "LazyCompile")              \
+#define LOG_EVENTS_AND_TAGS_LIST(V)                                     \
+  V(CODE_CREATION_EVENT,            "code-creation")                    \
+  V(CODE_MOVE_EVENT,                "code-move")                        \
+  V(CODE_DELETE_EVENT,              "code-delete")                      \
+  V(CODE_MOVING_GC,                 "code-moving-gc")                   \
+  V(SHARED_FUNC_MOVE_EVENT,         "sfi-move")                         \
+  V(SNAPSHOT_POSITION_EVENT,        "snapshot-pos")                     \
+  V(SNAPSHOT_CODE_NAME_EVENT,       "snapshot-code-name")               \
+  V(TICK_EVENT,                     "tick")                             \
+  V(REPEAT_META_EVENT,              "repeat")                           \
+  V(BUILTIN_TAG,                    "Builtin")                          \
+  V(CALL_DEBUG_BREAK_TAG,           "CallDebugBreak")                   \
+  V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn")           \
+  V(CALL_IC_TAG,                    "CallIC")                           \
+  V(CALL_INITIALIZE_TAG,            "CallInitialize")                   \
+  V(CALL_MEGAMORPHIC_TAG,           "CallMegamorphic")                  \
+  V(CALL_MISS_TAG,                  "CallMiss")                         \
+  V(CALL_NORMAL_TAG,                "CallNormal")                       \
+  V(CALL_PRE_MONOMORPHIC_TAG,       "CallPreMonomorphic")               \
+  V(KEYED_CALL_DEBUG_BREAK_TAG,     "KeyedCallDebugBreak")              \
+  V(KEYED_CALL_DEBUG_PREPARE_STEP_IN_TAG,                               \
+    "KeyedCallDebugPrepareStepIn")                                      \
+  V(KEYED_CALL_IC_TAG,              "KeyedCallIC")                      \
+  V(KEYED_CALL_INITIALIZE_TAG,      "KeyedCallInitialize")              \
+  V(KEYED_CALL_MEGAMORPHIC_TAG,     "KeyedCallMegamorphic")             \
+  V(KEYED_CALL_MISS_TAG,            "KeyedCallMiss")                    \
+  V(KEYED_CALL_NORMAL_TAG,          "KeyedCallNormal")                  \
+  V(KEYED_CALL_PRE_MONOMORPHIC_TAG, "KeyedCallPreMonomorphic")          \
+  V(CALLBACK_TAG,                   "Callback")                         \
+  V(EVAL_TAG,                       "Eval")                             \
+  V(FUNCTION_TAG,                   "Function")                         \
+  V(KEYED_LOAD_IC_TAG,              "KeyedLoadIC")                      \
+  V(KEYED_LOAD_MEGAMORPHIC_IC_TAG,  "KeyedLoadMegamorphicIC")           \
+  V(KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG, "KeyedExternalArrayLoadIC")       \
+  V(KEYED_STORE_IC_TAG,             "KeyedStoreIC")                     \
+  V(KEYED_STORE_MEGAMORPHIC_IC_TAG, "KeyedStoreMegamorphicIC")          \
+  V(KEYED_EXTERNAL_ARRAY_STORE_IC_TAG, "KeyedExternalArrayStoreIC")     \
+  V(LAZY_COMPILE_TAG,               "LazyCompile")                      \
+  V(LOAD_IC_TAG,                    "LoadIC")                           \
+  V(REG_EXP_TAG,                    "RegExp")                           \
+  V(SCRIPT_TAG,                     "Script")                           \
+  V(STORE_IC_TAG,                   "StoreIC")                          \
+  V(STUB_TAG,                       "Stub")                             \
+  V(NATIVE_FUNCTION_TAG,            "Function")                         \
+  V(NATIVE_LAZY_COMPILE_TAG,        "LazyCompile")                      \
   V(NATIVE_SCRIPT_TAG,              "Script")
 // Note that 'NATIVE_' cases for functions and scripts are mapped onto
 // original tags when writing to the log.
@@ -306,6 +311,9 @@
   void LogFailure();
 
  private:
+  class NameBuffer;
+  class NameMap;
+
   Logger();
   ~Logger();
 
@@ -332,8 +340,26 @@
   // Emits general information about generated code.
   void LogCodeInfo();
 
-  // Handles code creation when low-level profiling is active.
-  void LowLevelCodeCreateEvent(Code* code, LogMessageBuilder* msg);
+  void RegisterSnapshotCodeName(Code* code, const char* name, int name_size);
+
+  // Low-level logging support.
+
+  void LowLevelCodeCreateEvent(Code* code, const char* name, int name_size);
+
+  void LowLevelCodeMoveEvent(Address from, Address to);
+
+  void LowLevelCodeDeleteEvent(Address from);
+
+  void LowLevelSnapshotPositionEvent(Address addr, int pos);
+
+  void LowLevelLogWriteBytes(const char* bytes, int size);
+
+  template <typename T>
+  void LowLevelLogWriteStruct(const T& s) {
+    char tag = T::kTag;
+    LowLevelLogWriteBytes(reinterpret_cast<const char*>(&tag), sizeof(tag));
+    LowLevelLogWriteBytes(reinterpret_cast<const char*>(&s), sizeof(s));
+  }
 
   // Emits a profiler tick event. Used by the profiler thread.
   void TickEvent(TickSample* sample, bool overflow);
@@ -385,6 +411,10 @@
 
   Log* log_;
 
+  NameBuffer* name_buffer_;
+
+  NameMap* address_to_name_map_;
+
   // Guards against multiple calls to TearDown() that can happen in some tests.
   // 'true' between Setup() and TearDown().
   bool is_initialized_;
diff --git a/src/macros.py b/src/macros.py
index 69f36c0..28d501f 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -127,7 +127,7 @@
 macro TO_UINT32(arg) = (arg >>> 0);
 macro TO_STRING_INLINE(arg) = (IS_STRING(%IS_VAR(arg)) ? arg : NonStringToString(arg));
 macro TO_NUMBER_INLINE(arg) = (IS_NUMBER(%IS_VAR(arg)) ? arg : NonNumberToNumber(arg));
-
+macro TO_OBJECT_INLINE(arg) = (IS_SPEC_OBJECT(%IS_VAR(arg)) ? arg : ToObject(arg));
 
 # Macros implemented in Python.
 python macro CHAR_CODE(str) = ord(str[1]);
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 68a5062..9439905 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -305,13 +305,11 @@
     *GetNextCandidateField(candidate) = next_candidate;
   }
 
-  STATIC_ASSERT(kPointerSize <= Code::kHeaderSize - Code::kHeaderPaddingStart);
-
   static SharedFunctionInfo** GetNextCandidateField(
       SharedFunctionInfo* candidate) {
     Code* code = candidate->unchecked_code();
     return reinterpret_cast<SharedFunctionInfo**>(
-        code->address() + Code::kHeaderPaddingStart);
+        code->address() + Code::kNextCodeFlushingCandidateOffset);
   }
 
   static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
@@ -424,6 +422,9 @@
     table_.Register(kVisitJSFunction,
                     &VisitJSFunctionAndFlushCode);
 
+    table_.Register(kVisitJSRegExp,
+                    &VisitRegExpAndFlushCode);
+
     table_.Register(kVisitPropertyCell,
                     &FixedBodyVisitor<StaticMarkingVisitor,
                                       JSGlobalPropertyCell::BodyDescriptor,
@@ -459,7 +460,7 @@
   static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
     ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
     Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
-    if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
+    if (FLAG_cleanup_code_caches_at_gc && code->is_inline_cache_stub()) {
       IC::Clear(rinfo->pc());
       // Please note targets for cleared inline cached do not have to be
       // marked since they are contained in HEAP->non_monomorphic_cache().
@@ -564,6 +565,8 @@
   // flushed.
   static const int kCodeAgeThreshold = 5;
 
+  static const int kRegExpCodeThreshold = 5;
+
   inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
     Object* undefined = heap->raw_unchecked_undefined_value();
     return (info->script() != undefined) &&
@@ -699,6 +702,68 @@
   }
 
 
+  static void UpdateRegExpCodeAgeAndFlush(Heap* heap,
+                                          JSRegExp* re,
+                                          bool is_ascii) {
+    // Make sure that the fixed array is in fact initialized on the RegExp.
+    // We could potentially trigger a GC when initializing the RegExp.
+    if (SafeMap(re->data())->instance_type() != FIXED_ARRAY_TYPE) return;
+
+    // Make sure this is a RegExp that actually contains code.
+    if (re->TypeTagUnchecked() != JSRegExp::IRREGEXP) return;
+
+    Object* code = re->DataAtUnchecked(JSRegExp::code_index(is_ascii));
+    if (!code->IsSmi() && SafeMap(code)->instance_type() == CODE_TYPE) {
+      // Save a copy that can be reinstated if we need the code again.
+      re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
+                             code,
+                             heap);
+      // Set a number in the 0-255 range to guarantee no smi overflow.
+      re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
+                             Smi::FromInt(heap->sweep_generation() & 0xff),
+                             heap);
+    } else if (code->IsSmi()) {
+      int value = Smi::cast(code)->value();
+      // The regexp has not been compiled yet or there was a compilation error.
+      if (value == JSRegExp::kUninitializedValue ||
+          value == JSRegExp::kCompilationErrorValue) {
+        return;
+      }
+
+      // Check if we should flush now.
+      if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
+        re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
+                               Smi::FromInt(JSRegExp::kUninitializedValue),
+                               heap);
+        re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
+                               Smi::FromInt(JSRegExp::kUninitializedValue),
+                               heap);
+      }
+    }
+  }
+
+
+  // Works by setting the current sweep_generation (as a smi) in the
+  // code object place in the data array of the RegExp and keeps a copy
+  // around that can be reinstated if we reuse the RegExp before flushing.
+  // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
+  // we flush the code.
+  static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
+    Heap* heap = map->heap();
+    MarkCompactCollector* collector = heap->mark_compact_collector();
+    if (!collector->is_code_flushing_enabled()) {
+      VisitJSRegExpFields(map, object);
+      return;
+    }
+    JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
+    // Flush code or set age on both ascii and two byte code.
+    UpdateRegExpCodeAgeAndFlush(heap, re, true);
+    UpdateRegExpCodeAgeAndFlush(heap, re, false);
+    // Visit the fields of the RegExp, including the updated FixedArray.
+    VisitJSRegExpFields(map, object);
+  }
+
+
   static void VisitSharedFunctionInfoAndFlushCode(Map* map,
                                                   HeapObject* object) {
     MarkCompactCollector* collector = map->heap()->mark_compact_collector();
@@ -829,6 +894,15 @@
     // Don't visit the next function list field as it is a weak reference.
   }
 
+  static inline void VisitJSRegExpFields(Map* map,
+                                         HeapObject* object) {
+    int last_property_offset =
+        JSRegExp::kSize + kPointerSize * map->inobject_properties();
+    VisitPointers(map->heap(),
+                  SLOT_ADDR(object, JSRegExp::kPropertiesOffset),
+                  SLOT_ADDR(object, last_property_offset));
+  }
+
 
   static void VisitSharedFunctionInfoFields(Heap* heap,
                                             HeapObject* object,
@@ -1058,7 +1132,7 @@
   ASSERT(HEAP->Contains(object));
   if (object->IsMap()) {
     Map* map = Map::cast(object);
-    if (FLAG_cleanup_caches_in_maps_at_gc) {
+    if (FLAG_cleanup_code_caches_at_gc) {
       map->ClearCodeCache(heap());
     }
     SetMark(map);
@@ -1083,8 +1157,13 @@
   FixedArray* prototype_transitions = map->unchecked_prototype_transitions();
   if (!prototype_transitions->IsMarked()) SetMark(prototype_transitions);
 
-  MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(
-      *HeapObject::RawField(map, Map::kInstanceDescriptorsOffset)));
+  Object* raw_descriptor_array =
+      *HeapObject::RawField(map,
+                            Map::kInstanceDescriptorsOrBitField3Offset);
+  if (!raw_descriptor_array->IsSmi()) {
+    MarkDescriptorArray(
+        reinterpret_cast<DescriptorArray*>(raw_descriptor_array));
+  }
 
   // Mark the Object* fields of the Map.
   // Since the descriptor array has been marked already, it is fine
@@ -2055,7 +2134,7 @@
   }
 
   // Update roots.
-  heap->IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
+  heap->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
   LiveObjectList::IterateElements(&updating_visitor);
 
   // Update pointers in old spaces.
diff --git a/src/messages.cc b/src/messages.cc
index abc2537..4cbf0af 100644
--- a/src/messages.cc
+++ b/src/messages.cc
@@ -125,13 +125,13 @@
       HandleScope scope;
       if (global_listeners.get(i)->IsUndefined()) continue;
       v8::NeanderObject listener(JSObject::cast(global_listeners.get(i)));
-      Handle<Proxy> callback_obj(Proxy::cast(listener.get(0)));
+      Handle<Foreign> callback_obj(Foreign::cast(listener.get(0)));
       v8::MessageCallback callback =
-          FUNCTION_CAST<v8::MessageCallback>(callback_obj->proxy());
+          FUNCTION_CAST<v8::MessageCallback>(callback_obj->address());
       Handle<Object> callback_data(listener.get(1));
       {
         // Do not allow exceptions to propagate.
-        v8::TryCatch tryCatch;
+        v8::TryCatch try_catch;
         callback(api_message_obj, v8::Utils::ToLocal(callback_data));
       }
       if (isolate->has_scheduled_exception()) {
diff --git a/src/messages.js b/src/messages.js
index d8810dc..75ea99d 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -57,11 +57,13 @@
   for (var i = 0; i < format.length; i++) {
     var str = format[i];
     for (arg_num = 0; arg_num < kReplacementMarkers.length; arg_num++) {
-      if (format[i] !== kReplacementMarkers[arg_num]) continue;
-      try {
-        str = ToDetailString(args[arg_num]);
-      } catch (e) {
-        str = "#<error>";
+      if (str == kReplacementMarkers[arg_num]) {
+        try {
+          str = ToDetailString(args[arg_num]);
+        } catch (e) {
+          str = "#<error>";
+        }
+        break;
       }
     }
     result += str;
@@ -100,7 +102,8 @@
 
 
 function ToDetailString(obj) {
-  if (obj != null && IS_OBJECT(obj) && obj.toString === $Object.prototype.toString) {
+  if (obj != null && IS_OBJECT(obj) &&
+      obj.toString === $Object.prototype.toString) {
     var constructor = obj.constructor;
     if (!constructor) return ToStringCheckErrorObject(obj);
     var constructorName = constructor.name;
@@ -142,6 +145,7 @@
     kMessages = {
       // Error
       cyclic_proto:                 ["Cyclic __proto__ value"],
+      code_gen_from_strings:        ["Code generation from strings disallowed for this context"],
       // TypeError
       unexpected_token:             ["Unexpected token ", "%0"],
       unexpected_token_number:      ["Unexpected number"],
@@ -191,6 +195,7 @@
       redefine_disallowed:          ["Cannot redefine property: ", "%0"],
       define_disallowed:            ["Cannot define property, object is not extensible: ", "%0"],
       non_extensible_proto:         ["%0", " is not extensible"],
+      handler_trap_missing:         ["Proxy handler ", "%0", " has no '", "%1", "' trap"],
       // RangeError
       invalid_array_length:         ["Invalid array length"],
       stack_overflow:               ["Maximum call stack size exceeded"],
@@ -206,6 +211,7 @@
       invalid_json:                 ["String '", "%0", "' is not valid JSON"],
       circular_structure:           ["Converting circular structure to JSON"],
       obj_ctor_property_non_object: ["Object.", "%0", " called on non-object"],
+      called_on_null_or_undefined:  ["%0", " called on null or undefined"],
       array_indexof_not_defined:    ["Array.getIndexOf: Argument undefined"],
       object_not_extensible:        ["Can't add property ", "%0", ", object is not extensible"],
       illegal_access:               ["Illegal access"],
@@ -233,11 +239,10 @@
       strict_function:              ["In strict mode code, functions can only be declared at top level or immediately within another function." ],
       strict_read_only_property:    ["Cannot assign to read only property '", "%0", "' of ", "%1"],
       strict_cannot_assign:         ["Cannot assign to read only '", "%0", "' in strict mode"],
-      strict_arguments_callee:      ["Cannot access property 'callee' of strict mode arguments"],
-      strict_arguments_caller:      ["Cannot access property 'caller' of strict mode arguments"],
-      strict_function_caller:       ["Cannot access property 'caller' of a strict mode function"],
-      strict_function_arguments:    ["Cannot access property 'arguments' of a strict mode function"],
+      strict_poison_pill:           ["'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them"],
       strict_caller:                ["Illegal access to a strict mode caller function."],
+      cant_prevent_ext_external_array_elements: ["Cannot prevent extension of an object with external array elements"],
+      redef_external_array_element: ["Cannot redefine a property of an object with external array elements"],
     };
   }
   var message_type = %MessageGetType(message);
@@ -550,6 +555,7 @@
   this.end = end;
 }
 
+SourceLocation.prototype.__proto__ = null;
 
 const kLineLengthLimit = 78;
 
@@ -639,6 +645,7 @@
   this.to_position = to_position;
 }
 
+SourceSlice.prototype.__proto__ = null;
 
 /**
  * Get the source text for a SourceSlice
@@ -700,23 +707,28 @@
   this.pos = pos;
 }
 
+CallSite.prototype.__proto__ = null;
+
 CallSite.prototype.getThis = function () {
   return this.receiver;
 };
 
 CallSite.prototype.getTypeName = function () {
   var constructor = this.receiver.constructor;
-  if (!constructor)
+  if (!constructor) {
     return %_CallFunction(this.receiver, ObjectToString);
+  }
   var constructorName = constructor.name;
-  if (!constructorName)
+  if (!constructorName) {
     return %_CallFunction(this.receiver, ObjectToString);
+  }
   return constructorName;
 };
 
 CallSite.prototype.isToplevel = function () {
-  if (this.receiver == null)
+  if (this.receiver == null) {
     return true;
+  }
   return IS_GLOBAL(this.receiver);
 };
 
@@ -749,8 +761,9 @@
   }
   // Maybe this is an evaluation?
   var script = %FunctionGetScript(this.fun);
-  if (script && script.compilation_type == COMPILATION_TYPE_EVAL)
+  if (script && script.compilation_type == COMPILATION_TYPE_EVAL) {
     return "eval";
+  }
   return null;
 };
 
@@ -772,13 +785,15 @@
         this.receiver.__lookupSetter__(prop) === this.fun ||
         (!this.receiver.__lookupGetter__(prop) && this.receiver[prop] === this.fun)) {
       // If we find more than one match bail out to avoid confusion.
-      if (name)
+      if (name) {
         return null;
+      }
       name = prop;
     }
   }
-  if (name)
+  if (name) {
     return name;
+  }
   return null;
 };
 
@@ -788,8 +803,9 @@
 };
 
 CallSite.prototype.getLineNumber = function () {
-  if (this.pos == -1)
+  if (this.pos == -1) {
     return null;
+  }
   var script = %FunctionGetScript(this.fun);
   var location = null;
   if (script) {
@@ -799,8 +815,9 @@
 };
 
 CallSite.prototype.getColumnNumber = function () {
-  if (this.pos == -1)
+  if (this.pos == -1) {
     return null;
+  }
   var script = %FunctionGetScript(this.fun);
   var location = null;
   if (script) {
@@ -820,15 +837,17 @@
 
 CallSite.prototype.isConstructor = function () {
   var constructor = this.receiver ? this.receiver.constructor : null;
-  if (!constructor)
+  if (!constructor) {
     return false;
+  }
   return this.fun === constructor;
 };
 
 function FormatEvalOrigin(script) {
   var sourceURL = script.nameOrSourceURL();
-  if (sourceURL)
+  if (sourceURL) {
     return sourceURL;
+  }
 
   var eval_origin = "eval at ";
   if (script.eval_from_function_name) {
@@ -1023,8 +1042,9 @@
 function captureStackTrace(obj, cons_opt) {
   var stackTraceLimit = $Error.stackTraceLimit;
   if (!stackTraceLimit || !IS_NUMBER(stackTraceLimit)) return;
-  if (stackTraceLimit < 0 || stackTraceLimit > 10000)
+  if (stackTraceLimit < 0 || stackTraceLimit > 10000) {
     stackTraceLimit = 10000;
+  }
   var raw_stack = %CollectStackTrace(cons_opt
                                      ? cons_opt
                                      : captureStackTrace, stackTraceLimit);
@@ -1071,6 +1091,10 @@
 }
 
 function errorToString() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Error.prototype.toString"]);
+  }
   // This helper function is needed because access to properties on
   // the builtins object do not work inside of a catch clause.
   function isCyclicErrorMarker(o) { return o === cyclic_error_marker; }
@@ -1080,8 +1104,10 @@
   } catch(e) {
     // If this error message was encountered already return the empty
     // string for it instead of recursively formatting it.
-    if (isCyclicErrorMarker(e)) return '';
-    else throw e;
+    if (isCyclicErrorMarker(e)) {
+      return '';
+    }
+    throw e;
   }
 }
 
diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h
index f7453d1..e787fed 100644
--- a/src/mips/assembler-mips-inl.h
+++ b/src/mips/assembler-mips-inl.h
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 
 #ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
@@ -45,7 +45,7 @@
 namespace internal {
 
 // -----------------------------------------------------------------------------
-// Operand and MemOperand
+// Operand and MemOperand.
 
 Operand::Operand(int32_t immediate, RelocInfo::Mode rmode)  {
   rm_ = no_reg;
@@ -80,7 +80,7 @@
 
 
 // -----------------------------------------------------------------------------
-// RelocInfo
+// RelocInfo.
 
 void RelocInfo::apply(intptr_t delta) {
   // On MIPS we do not use pc relative addressing, so we don't need to patch the
@@ -95,24 +95,8 @@
 
 
 Address RelocInfo::target_address_address() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
-                              || rmode_ == EMBEDDED_OBJECT
-                              || rmode_ == EXTERNAL_REFERENCE);
-  // Read the address of the word containing the target_address in an
-  // instruction stream.
-  // The only architecture-independent user of this function is the serializer.
-  // The serializer uses it to find out how many raw bytes of instruction to
-  // output before the next target.
-  // For an instructions like LUI/ORI where the target bits are mixed into the
-  // instruction bits, the size of the target will be zero, indicating that the
-  // serializer should not step forward in memory after a target is resolved
-  // and written.  In this case the target_address_address function should
-  // return the end of the instructions to be patched, allowing the
-  // deserializer to deserialize the instructions as raw bytes and put them in
-  // place, ready to be patched with the target. In our case, that is the
-  // address of the instruction that follows LUI/ORI instruction pair.
-  return reinterpret_cast<Address>(
-    pc_ + Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize);
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  return reinterpret_cast<Address>(pc_);
 }
 
 
@@ -144,12 +128,9 @@
   // Provide a "natural pointer" to the embedded object,
   // which can be de-referenced during heap iteration.
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  // TODO(mips): Commenting out, to simplify arch-independent changes.
-  // GC won't work like this, but this commit is for asm/disasm/sim.
-  // reconstructed_obj_ptr_ =
-  //   reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
-  // return &reconstructed_obj_ptr_;
-  return NULL;
+  reconstructed_obj_ptr_ =
+      reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+  return &reconstructed_obj_ptr_;
 }
 
 
@@ -161,11 +142,8 @@
 
 Address* RelocInfo::target_reference_address() {
   ASSERT(rmode_ == EXTERNAL_REFERENCE);
-  // TODO(mips): Commenting out, to simplify arch-independent changes.
-  // GC won't work like this, but this commit is for asm/disasm/sim.
-  // reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
-  // return &reconstructed_adr_ptr_;
-  return NULL;
+  reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
+  return &reconstructed_adr_ptr_;
 }
 
 
@@ -251,26 +229,23 @@
 void RelocInfo::Visit(ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    // RelocInfo is needed when pointer must be updated/serialized, such as
-    // UpdatingVisitor in mark-compact.cc or Serializer in serialize.cc.
-    // It is ignored by visitors that do not need it.
-    // Commenting out, to simplify arch-independednt changes.
-    // GC won't work like this, but this commit is for asm/disasm/sim.
-    // visitor->VisitPointer(target_object_address(), this);
+    Object** p = target_object_address();
+    Object* orig = *p;
+    visitor->VisitPointer(p);
+    if (*p != orig) {
+      set_target_object(*p);
+    }
   } else if (RelocInfo::IsCodeTarget(mode)) {
     visitor->VisitCodeTarget(this);
+  } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
+    visitor->VisitGlobalPropertyCell(this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
-    // RelocInfo is needed when external-references must be serialized by
-    // Serializer Visitor in serialize.cc. It is ignored by visitors that
-    // do not need it.
-    // Commenting out, to simplify arch-independednt changes.
-    // Serializer won't work like this, but this commit is for asm/disasm/sim.
-    // visitor->VisitExternalReference(target_reference_address(), this);
+    visitor->VisitExternalReference(target_reference_address());
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // TODO(isolates): Get a cached isolate below.
   } else if (((RelocInfo::IsJSReturn(mode) &&
                IsPatchedReturnSequence()) ||
-             (RelocInfo::IsDebugBreakSlot(mode) &&
+              (RelocInfo::IsDebugBreakSlot(mode) &&
                IsPatchedDebugBreakSlotSequence())) &&
              Isolate::Current()->debug()->has_break_points()) {
     visitor->VisitDebugTarget(this);
@@ -287,7 +262,9 @@
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
     StaticVisitor::VisitPointer(heap, target_object_address());
   } else if (RelocInfo::IsCodeTarget(mode)) {
-    StaticVisitor::VisitCodeTarget(this);
+    StaticVisitor::VisitCodeTarget(heap, this);
+  } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
+    StaticVisitor::VisitGlobalPropertyCell(heap, this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
     StaticVisitor::VisitExternalReference(target_reference_address());
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -296,7 +273,7 @@
               IsPatchedReturnSequence()) ||
              (RelocInfo::IsDebugBreakSlot(mode) &&
               IsPatchedDebugBreakSlotSequence()))) {
-    StaticVisitor::VisitDebugTarget(this);
+    StaticVisitor::VisitDebugTarget(heap, this);
 #endif
   } else if (mode == RelocInfo::RUNTIME_ENTRY) {
     StaticVisitor::VisitRuntimeEntry(this);
@@ -305,7 +282,7 @@
 
 
 // -----------------------------------------------------------------------------
-// Assembler
+// Assembler.
 
 
 void Assembler::CheckBuffer() {
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index 7d00da1..2e10904 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 
 #include "v8.h"
@@ -43,13 +43,17 @@
 namespace v8 {
 namespace internal {
 
-CpuFeatures::CpuFeatures()
-    : supported_(0),
-      enabled_(0),
-      found_by_runtime_probing_(0) {
-}
+#ifdef DEBUG
+bool CpuFeatures::initialized_ = false;
+#endif
+unsigned CpuFeatures::supported_ = 0;
+unsigned CpuFeatures::found_by_runtime_probing_ = 0;
 
-void CpuFeatures::Probe(bool portable) {
+void CpuFeatures::Probe() {
+  ASSERT(!initialized_);
+#ifdef DEBUG
+  initialized_ = true;
+#endif
   // If the compiler is allowed to use fpu then we can use fpu too in our
   // code generation.
 #if !defined(__mips__)
@@ -58,7 +62,7 @@
       supported_ |= 1u << FPU;
   }
 #else
-  if (portable && Serializer::enabled()) {
+  if (Serializer::enabled()) {
     supported_ |= OS::CpuFeaturesImpliedByPlatform();
     return;  // No features if we might serialize.
   }
@@ -69,8 +73,6 @@
     supported_ |= 1u << FPU;
     found_by_runtime_probing_ |= 1u << FPU;
   }
-
-  if (!portable) found_by_runtime_probing_ = 0;
 #endif
 }
 
@@ -235,12 +237,10 @@
 static const int kMinimalBufferSize = 4 * KB;
 
 
-Assembler::Assembler(void* buffer, int buffer_size)
-    : AssemblerBase(Isolate::Current()),
+Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
+    : AssemblerBase(arg_isolate),
       positions_recorder_(this),
-      allow_peephole_optimization_(false) {
-  // BUG(3245989): disable peephole optimization if crankshaft is enabled.
-  allow_peephole_optimization_ = FLAG_peephole_optimization;
+      emit_debug_code_(FLAG_debug_code) {
   if (buffer == NULL) {
     // Do our own buffer management.
     if (buffer_size <= kMinimalBufferSize) {
@@ -276,13 +276,17 @@
   no_trampoline_pool_before_ = 0;
   trampoline_pool_blocked_nesting_ = 0;
   next_buffer_check_ = kMaxBranchOffset - kTrampolineSize;
+  internal_trampoline_exception_ = false;
+  last_bound_pos_ = 0;
+
+  ast_id_for_reloc_info_ = kNoASTId;
 }
 
 
 Assembler::~Assembler() {
   if (own_buffer_) {
     if (isolate()->assembler_spare_buffer() == NULL &&
-      buffer_size_ == kMinimalBufferSize) {
+        buffer_size_ == kMinimalBufferSize) {
       isolate()->set_assembler_spare_buffer(buffer_);
     } else {
       DeleteArray(buffer_);
@@ -316,13 +320,82 @@
 }
 
 
-Register Assembler::GetRt(Instr instr) {
+Register Assembler::GetRtReg(Instr instr) {
   Register rt;
-  rt.code_ = (instr & kRtMask) >> kRtShift;
+  rt.code_ = (instr & kRtFieldMask) >> kRtShift;
   return rt;
 }
 
 
+Register Assembler::GetRsReg(Instr instr) {
+  Register rs;
+  rs.code_ = (instr & kRsFieldMask) >> kRsShift;
+  return rs;
+}
+
+
+Register Assembler::GetRdReg(Instr instr) {
+  Register rd;
+  rd.code_ = (instr & kRdFieldMask) >> kRdShift;
+  return rd;
+}
+
+
+uint32_t Assembler::GetRt(Instr instr) {
+  return (instr & kRtFieldMask) >> kRtShift;
+}
+
+
+uint32_t Assembler::GetRtField(Instr instr) {
+  return instr & kRtFieldMask;
+}
+
+
+uint32_t Assembler::GetRs(Instr instr) {
+  return (instr & kRsFieldMask) >> kRsShift;
+}
+
+
+uint32_t Assembler::GetRsField(Instr instr) {
+  return instr & kRsFieldMask;
+}
+
+
+uint32_t Assembler::GetRd(Instr instr) {
+  return  (instr & kRdFieldMask) >> kRdShift;
+}
+
+
+uint32_t Assembler::GetRdField(Instr instr) {
+  return  instr & kRdFieldMask;
+}
+
+
+uint32_t Assembler::GetSa(Instr instr) {
+  return (instr & kSaFieldMask) >> kSaShift;
+}
+
+
+uint32_t Assembler::GetSaField(Instr instr) {
+  return instr & kSaFieldMask;
+}
+
+
+uint32_t Assembler::GetOpcodeField(Instr instr) {
+  return instr & kOpcodeMask;
+}
+
+
+uint32_t Assembler::GetImmediate16(Instr instr) {
+  return instr & kImm16Mask;
+}
+
+
+uint32_t Assembler::GetLabelConst(Instr instr) {
+  return instr & ~kImm16Mask;
+}
+
+
 bool Assembler::IsPop(Instr instr) {
   return (instr & ~kRtMask) == kPopRegPattern;
 }
@@ -374,10 +447,10 @@
 
 
 bool Assembler::IsBranch(Instr instr) {
-  uint32_t opcode   = ((instr & kOpcodeMask));
-  uint32_t rt_field = ((instr & kRtFieldMask));
-  uint32_t rs_field = ((instr & kRsFieldMask));
-  uint32_t label_constant = (instr & ~kImm16Mask);
+  uint32_t opcode   = GetOpcodeField(instr);
+  uint32_t rt_field = GetRtField(instr);
+  uint32_t rs_field = GetRsField(instr);
+  uint32_t label_constant = GetLabelConst(instr);
   // Checks if the instruction is a branch.
   return opcode == BEQ ||
       opcode == BNE ||
@@ -386,7 +459,7 @@
       opcode == BEQL ||
       opcode == BNEL ||
       opcode == BLEZL ||
-      opcode == BGTZL||
+      opcode == BGTZL ||
       (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
                             rt_field == BLTZAL || rt_field == BGEZAL)) ||
       (opcode == COP1 && rs_field == BC1) ||  // Coprocessor branch.
@@ -394,13 +467,23 @@
 }
 
 
+bool Assembler::IsBeq(Instr instr) {
+  return GetOpcodeField(instr) == BEQ;
+}
+
+
+bool Assembler::IsBne(Instr instr) {
+  return GetOpcodeField(instr) == BNE;
+}
+
+
 bool Assembler::IsNop(Instr instr, unsigned int type) {
   // See Assembler::nop(type).
   ASSERT(type < 32);
-  uint32_t opcode = ((instr & kOpcodeMask));
-  uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
-  uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
-  uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
+  uint32_t opcode = GetOpcodeField(instr);
+  uint32_t rt = GetRt(instr);
+  uint32_t rs = GetRs(instr);
+  uint32_t sa = GetSa(instr);
 
   // nop(type) == sll(zero_reg, zero_reg, type);
   // Technically all these values will be 0 but
@@ -465,6 +548,11 @@
 }
 
 
+bool Assembler::IsAndImmediate(Instr instr) {
+  return GetOpcodeField(instr) == ANDI;
+}
+
+
 int Assembler::target_at(int32_t pos) {
   Instr instr = instr_at(pos);
   if ((instr & ~kImm16Mask) == 0) {
@@ -546,6 +634,10 @@
     if (dist > kMaxBranchOffset) {
       do {
         int32_t trampoline_pos = get_trampoline_entry(fixup_pos);
+        if (kInvalidSlotPos == trampoline_pos) {
+          // Internal error.
+          return;
+        }
         ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
         target_at_put(fixup_pos, trampoline_pos);
         fixup_pos = trampoline_pos;
@@ -554,6 +646,10 @@
     } else if (dist < -kMaxBranchOffset) {
       do {
         int32_t trampoline_pos = get_trampoline_entry(fixup_pos, false);
+        if (kInvalidSlotPos == trampoline_pos) {
+          // Internal error.
+          return;
+        }
         ASSERT((trampoline_pos - fixup_pos) >= -kMaxBranchOffset);
         target_at_put(fixup_pos, trampoline_pos);
         fixup_pos = trampoline_pos;
@@ -652,7 +748,7 @@
                                  FPURegister fd,
                                  SecondaryField func) {
   ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
-  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+  ASSERT(CpuFeatures::IsEnabled(FPU));
   Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
       | (fd.code() << kFdShift) | func;
   emit(instr);
@@ -666,7 +762,7 @@
                                  FPURegister fd,
                                  SecondaryField func) {
   ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
-  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+  ASSERT(CpuFeatures::IsEnabled(FPU));
   Instr instr = opcode | fmt | (rt.code() << kRtShift)
       | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
   emit(instr);
@@ -679,7 +775,7 @@
                                  FPUControlRegister fs,
                                  SecondaryField func) {
   ASSERT(fs.is_valid() && rt.is_valid());
-  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+  ASSERT(CpuFeatures::IsEnabled(FPU));
   Instr instr =
       opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
   emit(instr);
@@ -714,7 +810,7 @@
                                   FPURegister ft,
                                   int32_t j) {
   ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
-  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+  ASSERT(CpuFeatures::IsEnabled(FPU));
   Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
       | (j & kImm16Mask);
   emit(instr);
@@ -760,22 +856,27 @@
 // Returns the next free trampoline entry from the next trampoline pool.
 int32_t Assembler::get_trampoline_entry(int32_t pos, bool next_pool) {
   int trampoline_count = trampolines_.length();
-  int32_t trampoline_entry = 0;
+  int32_t trampoline_entry = kInvalidSlotPos;
   ASSERT(trampoline_count > 0);
 
-  if (next_pool) {
-    for (int i = 0; i < trampoline_count; i++) {
-      if (trampolines_[i].start() > pos) {
-       trampoline_entry = trampolines_[i].take_slot();
-       break;
+  if (!internal_trampoline_exception_) {
+    if (next_pool) {
+      for (int i = 0; i < trampoline_count; i++) {
+        if (trampolines_[i].start() > pos) {
+         trampoline_entry = trampolines_[i].take_slot();
+         break;
+        }
+      }
+    } else {  //  Caller needs a trampoline entry from the previous pool.
+      for (int i = trampoline_count-1; i >= 0; i--) {
+        if (trampolines_[i].end() < pos) {
+         trampoline_entry = trampolines_[i].take_slot();
+         break;
+        }
       }
     }
-  } else {  // Caller needs a trampoline entry from the previous pool.
-    for (int i = trampoline_count-1; i >= 0; i--) {
-      if (trampolines_[i].end() < pos) {
-       trampoline_entry = trampolines_[i].take_slot();
-       break;
-      }
+    if (kInvalidSlotPos == trampoline_entry) {
+      internal_trampoline_exception_ = true;
     }
   }
   return trampoline_entry;
@@ -792,6 +893,10 @@
     if (dist > kMaxBranchOffset) {
       do {
         int32_t trampoline_pos = get_trampoline_entry(target_pos);
+        if (kInvalidSlotPos == trampoline_pos) {
+          // Internal error.
+          return 0;
+        }
         ASSERT((trampoline_pos - target_pos) > 0);
         ASSERT((trampoline_pos - target_pos) <= kMaxBranchOffset);
         target_at_put(trampoline_pos, target_pos);
@@ -801,6 +906,10 @@
     } else if (dist < -kMaxBranchOffset) {
       do {
         int32_t trampoline_pos = get_trampoline_entry(target_pos, false);
+        if (kInvalidSlotPos == trampoline_pos) {
+          // Internal error.
+          return 0;
+        }
         ASSERT((target_pos - trampoline_pos) > 0);
         ASSERT((target_pos - trampoline_pos) <= kMaxBranchOffset);
         target_at_put(trampoline_pos, target_pos);
@@ -979,157 +1088,6 @@
 
 void Assembler::addiu(Register rd, Register rs, int32_t j) {
   GenInstrImmediate(ADDIU, rs, rd, j);
-
-  // Eliminate pattern: push(r), pop().
-  //   addiu(sp, sp, Operand(-kPointerSize));
-  //   sw(src, MemOperand(sp, 0);
-  //   addiu(sp, sp, Operand(kPointerSize));
-  // Both instructions can be eliminated.
-  if (can_peephole_optimize(3) &&
-      // Pattern.
-      instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
-      (instr_at(pc_ - 2 * kInstrSize) & ~kRtMask) == kPushRegPattern &&
-      (instr_at(pc_ - 3 * kInstrSize)) == kPushInstruction) {
-    pc_ -= 3 * kInstrSize;
-    if (FLAG_print_peephole_optimization) {
-      PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
-    }
-  }
-
-  // Eliminate pattern: push(ry), pop(rx).
-  //   addiu(sp, sp, -kPointerSize)
-  //   sw(ry, MemOperand(sp, 0)
-  //   lw(rx, MemOperand(sp, 0)
-  //   addiu(sp, sp, kPointerSize);
-  // Both instructions can be eliminated if ry = rx.
-  // If ry != rx, a register copy from ry to rx is inserted
-  // after eliminating the push and the pop instructions.
-  if (can_peephole_optimize(4)) {
-    Instr pre_push_sp_set = instr_at(pc_ - 4 * kInstrSize);
-    Instr push_instr = instr_at(pc_ - 3 * kInstrSize);
-    Instr pop_instr = instr_at(pc_ - 2 * kInstrSize);
-    Instr post_pop_sp_set = instr_at(pc_ - 1 * kInstrSize);
-
-    if (IsPush(push_instr) &&
-        IsPop(pop_instr) && pre_push_sp_set == kPushInstruction &&
-        post_pop_sp_set == kPopInstruction) {
-      if ((pop_instr & kRtMask) != (push_instr & kRtMask)) {
-        // For consecutive push and pop on different registers,
-        // we delete both the push & pop and insert a register move.
-        // push ry, pop rx --> mov rx, ry.
-        Register reg_pushed, reg_popped;
-        reg_pushed = GetRt(push_instr);
-        reg_popped = GetRt(pop_instr);
-        pc_ -= 4 * kInstrSize;
-        // Insert a mov instruction, which is better than a pair of push & pop.
-        or_(reg_popped, reg_pushed, zero_reg);
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%x push/pop (diff reg) replaced by a reg move\n",
-                 pc_offset());
-        }
-      } else {
-        // For consecutive push and pop on the same register,
-        // both the push and the pop can be deleted.
-        pc_ -= 4 * kInstrSize;
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
-        }
-      }
-    }
-  }
-
-  if (can_peephole_optimize(5)) {
-    Instr pre_push_sp_set = instr_at(pc_ - 5 * kInstrSize);
-    Instr mem_write_instr = instr_at(pc_ - 4 * kInstrSize);
-    Instr lw_instr = instr_at(pc_ - 3 * kInstrSize);
-    Instr mem_read_instr = instr_at(pc_ - 2 * kInstrSize);
-    Instr post_pop_sp_set = instr_at(pc_ - 1 * kInstrSize);
-
-    if (IsPush(mem_write_instr) &&
-        pre_push_sp_set == kPushInstruction &&
-        IsPop(mem_read_instr) &&
-        post_pop_sp_set == kPopInstruction) {
-      if ((IsLwRegFpOffset(lw_instr) ||
-        IsLwRegFpNegOffset(lw_instr))) {
-        if ((mem_write_instr & kRtMask) ==
-              (mem_read_instr & kRtMask)) {
-          // Pattern: push & pop from/to same register,
-          // with a fp+offset lw in between.
-          //
-          // The following:
-          // addiu sp, sp, -4
-          // sw rx, [sp, #0]!
-          // lw rz, [fp, #-24]
-          // lw rx, [sp, 0],
-          // addiu sp, sp, 4
-          //
-          // Becomes:
-          // if(rx == rz)
-          //   delete all
-          // else
-          //   lw rz, [fp, #-24]
-
-          if ((mem_write_instr & kRtMask) == (lw_instr & kRtMask)) {
-            pc_ -= 5 * kInstrSize;
-          } else {
-            pc_ -= 5 * kInstrSize;
-            // Reinsert back the lw rz.
-            emit(lw_instr);
-          }
-          if (FLAG_print_peephole_optimization) {
-            PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset());
-          }
-        } else {
-          // Pattern: push & pop from/to different registers
-          // with a fp + offset lw in between.
-          //
-          // The following:
-          // addiu sp, sp ,-4
-          // sw rx, [sp, 0]
-          // lw rz, [fp, #-24]
-          // lw ry, [sp, 0]
-          // addiu sp, sp, 4
-          //
-          // Becomes:
-          // if(ry == rz)
-          //   mov ry, rx;
-          // else if(rx != rz)
-          //   lw rz, [fp, #-24]
-          //   mov ry, rx
-          // else if((ry != rz) || (rx == rz)) becomes:
-          //   mov ry, rx
-          //   lw rz, [fp, #-24]
-
-          Register reg_pushed, reg_popped;
-          if ((mem_read_instr & kRtMask) == (lw_instr & kRtMask)) {
-            reg_pushed = GetRt(mem_write_instr);
-            reg_popped = GetRt(mem_read_instr);
-            pc_ -= 5 * kInstrSize;
-            or_(reg_popped, reg_pushed, zero_reg);  // Move instruction.
-          } else if ((mem_write_instr & kRtMask)
-                                != (lw_instr & kRtMask)) {
-            reg_pushed = GetRt(mem_write_instr);
-            reg_popped = GetRt(mem_read_instr);
-            pc_ -= 5 * kInstrSize;
-            emit(lw_instr);
-            or_(reg_popped, reg_pushed, zero_reg);  // Move instruction.
-          } else if (((mem_read_instr & kRtMask)
-                                     != (lw_instr & kRtMask)) ||
-                    ((mem_write_instr & kRtMask)
-                                     == (lw_instr & kRtMask)) ) {
-            reg_pushed = GetRt(mem_write_instr);
-            reg_popped = GetRt(mem_read_instr);
-            pc_ -= 5 * kInstrSize;
-            or_(reg_popped, reg_pushed, zero_reg);  // Move instruction.
-            emit(lw_instr);
-          }
-          if (FLAG_print_peephole_optimization) {
-            PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset());
-          }
-        }
-      }
-    }
-  }
 }
 
 
@@ -1317,54 +1275,6 @@
     LoadRegPlusOffsetToAt(rs);
     GenInstrImmediate(LW, at, rd, 0);  // Equiv to lw(rd, MemOperand(at, 0));
   }
-
-  if (can_peephole_optimize(2)) {
-    Instr sw_instr = instr_at(pc_ - 2 * kInstrSize);
-    Instr lw_instr = instr_at(pc_ - 1 * kInstrSize);
-
-    if ((IsSwRegFpOffset(sw_instr) &&
-         IsLwRegFpOffset(lw_instr)) ||
-       (IsSwRegFpNegOffset(sw_instr) &&
-         IsLwRegFpNegOffset(lw_instr))) {
-      if ((lw_instr & kLwSwInstrArgumentMask) ==
-            (sw_instr & kLwSwInstrArgumentMask)) {
-        // Pattern: Lw/sw same fp+offset, same register.
-        //
-        // The following:
-        // sw rx, [fp, #-12]
-        // lw rx, [fp, #-12]
-        //
-        // Becomes:
-        // sw rx, [fp, #-12]
-
-        pc_ -= 1 * kInstrSize;
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%x sw/lw (fp + same offset), same reg\n", pc_offset());
-        }
-      } else if ((lw_instr & kLwSwOffsetMask) ==
-                 (sw_instr & kLwSwOffsetMask)) {
-        // Pattern: Lw/sw same fp+offset, different register.
-        //
-        // The following:
-        // sw rx, [fp, #-12]
-        // lw ry, [fp, #-12]
-        //
-        // Becomes:
-        // sw rx, [fp, #-12]
-        // mov ry, rx
-
-        Register reg_stored, reg_loaded;
-        reg_stored = GetRt(sw_instr);
-        reg_loaded = GetRt(lw_instr);
-        pc_ -= 1 * kInstrSize;
-        // Insert a mov instruction, which is better than lw.
-        or_(reg_loaded, reg_stored, zero_reg);  // Move instruction.
-        if (FLAG_print_peephole_optimization) {
-          PrintF("%x sw/lw (fp + same offset), diff reg \n", pc_offset());
-        }
-      }
-    }
-  }
 }
 
 
@@ -1405,23 +1315,6 @@
     LoadRegPlusOffsetToAt(rs);
     GenInstrImmediate(SW, at, rd, 0);  // Equiv to sw(rd, MemOperand(at, 0));
   }
-
-  // Eliminate pattern: pop(), push(r).
-  //     addiu sp, sp, Operand(kPointerSize);
-  //     addiu sp, sp, Operand(-kPointerSize);
-  // ->  sw r, MemOpernad(sp, 0);
-  if (can_peephole_optimize(3) &&
-     // Pattern.
-     instr_at(pc_ - 1 * kInstrSize) ==
-       (kPushRegPattern | (rd.code() << kRtShift)) &&
-     instr_at(pc_ - 2 * kInstrSize) == kPushInstruction &&
-     instr_at(pc_ - 3 * kInstrSize) == kPopInstruction) {
-    pc_ -= 3 * kInstrSize;
-    GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
-    if (FLAG_print_peephole_optimization) {
-      PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
-    }
-  }
 }
 
 
@@ -1545,14 +1438,14 @@
 
 void Assembler::movt(Register rd, Register rs, uint16_t cc) {
   Register rt;
-  rt.code_ = (cc & 0x0003) << 2 | 1;
+  rt.code_ = (cc & 0x0007) << 2 | 1;
   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
 }
 
 
 void Assembler::movf(Register rd, Register rs, uint16_t cc) {
   Register rt;
-  rt.code_ = (cc & 0x0003) << 2 | 0;
+  rt.code_ = (cc & 0x0007) << 2 | 0;
   GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
 }
 
@@ -1816,7 +1709,7 @@
 // Conditions.
 void Assembler::c(FPUCondition cond, SecondaryField fmt,
     FPURegister fs, FPURegister ft, uint16_t cc) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+  ASSERT(CpuFeatures::IsEnabled(FPU));
   ASSERT(is_uint3(cc));
   ASSERT((fmt & ~(31 << kRsShift)) == 0);
   Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
@@ -1827,7 +1720,7 @@
 
 void Assembler::fcmp(FPURegister src1, const double src2,
       FPUCondition cond) {
-  ASSERT(isolate()->cpu_features()->IsSupported(FPU));
+  ASSERT(CpuFeatures::IsEnabled(FPU));
   ASSERT(src2 == 0.0);
   mtc1(zero_reg, f14);
   cvt_d_w(f14, f14);
@@ -1836,7 +1729,7 @@
 
 
 void Assembler::bc1f(int16_t offset, uint16_t cc) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+  ASSERT(CpuFeatures::IsEnabled(FPU));
   ASSERT(is_uint3(cc));
   Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
   emit(instr);
@@ -1844,7 +1737,7 @@
 
 
 void Assembler::bc1t(int16_t offset, uint16_t cc) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+  ASSERT(CpuFeatures::IsEnabled(FPU));
   ASSERT(is_uint3(cc));
   Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
   emit(instr);
@@ -1949,7 +1842,14 @@
       return;
     }
     ASSERT(buffer_space() >= kMaxRelocSize);  // Too late to grow buffer here.
-    reloc_info_writer.Write(&rinfo);
+    if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
+      ASSERT(ast_id_for_reloc_info_ != kNoASTId);
+      RelocInfo reloc_info_with_ast_id(pc_, rmode, ast_id_for_reloc_info_);
+      ast_id_for_reloc_info_ = kNoASTId;
+      reloc_info_writer.Write(&reloc_info_with_ast_id);
+    } else {
+      reloc_info_writer.Write(&rinfo);
+    }
   }
 }
 
@@ -2017,72 +1917,39 @@
 Address Assembler::target_address_at(Address pc) {
   Instr instr1 = instr_at(pc);
   Instr instr2 = instr_at(pc + kInstrSize);
-  // Check we have 2 instructions generated by li.
-  ASSERT(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
-         ((instr1 == nopInstr) && ((instr2 & kOpcodeMask) == ADDI ||
-                            (instr2 & kOpcodeMask) == ORI ||
-                            (instr2 & kOpcodeMask) == LUI)));
-  // Interpret these 2 instructions.
-  if (instr1 == nopInstr) {
-    if ((instr2 & kOpcodeMask) == ADDI) {
-      return reinterpret_cast<Address>(((instr2 & kImm16Mask) << 16) >> 16);
-    } else if ((instr2 & kOpcodeMask) == ORI) {
-      return reinterpret_cast<Address>(instr2 & kImm16Mask);
-    } else if ((instr2 & kOpcodeMask) == LUI) {
-      return reinterpret_cast<Address>((instr2 & kImm16Mask) << 16);
-    }
-  } else if ((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) {
-    // 32 bit value.
+  // Interpret 2 instructions generated by li: lui/ori
+  if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) {
+    // Assemble the 32 bit value.
     return reinterpret_cast<Address>(
-        (instr1 & kImm16Mask) << 16 | (instr2 & kImm16Mask));
+        (GetImmediate16(instr1) << 16) | GetImmediate16(instr2));
   }
 
-  // We should never get here.
+  // We should never get here, force a bad address if we do.
   UNREACHABLE();
   return (Address)0x0;
 }
 
 
 void Assembler::set_target_address_at(Address pc, Address target) {
-  // On MIPS we need to patch the code to generate.
+  // On MIPS we patch the address into lui/ori instruction pair.
 
-  // First check we have a li.
+  // First check we have an li (lui/ori pair).
   Instr instr2 = instr_at(pc + kInstrSize);
 #ifdef DEBUG
   Instr instr1 = instr_at(pc);
 
   // Check we have indeed the result from a li with MustUseReg true.
-  CHECK(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
-        ((instr1 == 0) && ((instr2 & kOpcodeMask)== ADDIU ||
-                           (instr2 & kOpcodeMask)== ORI ||
-                           (instr2 & kOpcodeMask)== LUI)));
+  CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI));
 #endif
 
-  uint32_t rt_code = (instr2 & kRtFieldMask);
+  uint32_t rt_code = GetRtField(instr2);
   uint32_t* p = reinterpret_cast<uint32_t*>(pc);
   uint32_t itarget = reinterpret_cast<uint32_t>(target);
 
-  if (is_int16(itarget)) {
-    // nop.
-    // addiu rt zero_reg j.
-    *p = nopInstr;
-    *(p+1) = ADDIU | rt_code | (itarget & kImm16Mask);
-  } else if (!(itarget & kHiMask)) {
-    // nop.
-    // ori rt zero_reg j.
-    *p = nopInstr;
-    *(p+1) = ORI | rt_code | (itarget & kImm16Mask);
-  } else if (!(itarget & kImm16Mask)) {
-    // nop.
-    // lui rt (kHiMask & itarget) >> kLuiShift.
-    *p = nopInstr;
-    *(p+1) = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
-  } else {
-    // lui rt (kHiMask & itarget) >> kLuiShift.
-    // ori rt rt, (kImm16Mask & itarget).
-    *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
-    *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
-  }
+  // lui rt, high-16.
+  // ori rt rt, low-16.
+  *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
+  *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
 
   CPU::FlushICache(pc, 2 * sizeof(int32_t));
 }
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index 5a6e271..a167393 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 
 #ifndef V8_MIPS_ASSEMBLER_MIPS_H_
@@ -67,12 +67,13 @@
 
 
 // -----------------------------------------------------------------------------
-// Implementation of Register and FPURegister
+// Implementation of Register and FPURegister.
 
 // Core register.
 struct Register {
   static const int kNumRegisters = v8::internal::kNumRegisters;
-  static const int kNumAllocatableRegisters = 14;  // v0 through t7
+  static const int kNumAllocatableRegisters = 14;  // v0 through t7.
+  static const int kSizeInBytes = 4;
 
   static int ToAllocationIndex(Register reg) {
     return reg.code() - 2;  // zero_reg and 'at' are skipped.
@@ -267,9 +268,6 @@
 // FPU (coprocessor 1) control registers.
 // Currently only FCSR (#31) is implemented.
 struct FPUControlRegister {
-  static const int kFCSRRegister = 31;
-  static const int kInvalidFPUControlRegister = -1;
-
   bool is_valid() const { return code_ == kFCSRRegister; }
   bool is(FPUControlRegister creg) const { return code_ == creg.code_; }
   int code() const {
@@ -288,7 +286,7 @@
   int code_;
 };
 
-const FPUControlRegister no_fpucreg = { -1 };
+const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
 const FPUControlRegister FCSR = { kFCSRRegister };
 
 
@@ -318,7 +316,7 @@
 
  private:
   Register rm_;
-  int32_t imm32_;  // Valid if rm_ == no_reg
+  int32_t imm32_;  // Valid if rm_ == no_reg.
   RelocInfo::Mode rmode_;
 
   friend class Assembler;
@@ -342,58 +340,98 @@
 
 // CpuFeatures keeps track of which features are supported by the target CPU.
 // Supported features must be enabled by a Scope before use.
-class CpuFeatures {
+class CpuFeatures : public AllStatic {
  public:
   // Detect features of the target CPU. Set safe defaults if the serializer
   // is enabled (snapshots must be portable).
-  void Probe(bool portable);
+  static void Probe();
 
   // Check whether a feature is supported by the target CPU.
-  bool IsSupported(CpuFeature f) const {
+  static bool IsSupported(CpuFeature f) {
+    ASSERT(initialized_);
     if (f == FPU && !FLAG_enable_fpu) return false;
     return (supported_ & (1u << f)) != 0;
   }
 
+
+#ifdef DEBUG
   // Check whether a feature is currently enabled.
-  bool IsEnabled(CpuFeature f) const {
-    return (enabled_ & (1u << f)) != 0;
+  static bool IsEnabled(CpuFeature f) {
+    ASSERT(initialized_);
+    Isolate* isolate = Isolate::UncheckedCurrent();
+    if (isolate == NULL) {
+      // When no isolate is available, work as if we're running in
+      // release mode.
+      return IsSupported(f);
+    }
+    unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
+    return (enabled & (1u << f)) != 0;
   }
+#endif
 
   // Enable a specified feature within a scope.
   class Scope BASE_EMBEDDED {
 #ifdef DEBUG
    public:
-    explicit Scope(CpuFeature f)
-        : cpu_features_(Isolate::Current()->cpu_features()),
-          isolate_(Isolate::Current()) {
-      ASSERT(cpu_features_->IsSupported(f));
+    explicit Scope(CpuFeature f) {
+      unsigned mask = 1u << f;
+      ASSERT(CpuFeatures::IsSupported(f));
       ASSERT(!Serializer::enabled() ||
-             (cpu_features_->found_by_runtime_probing_ & (1u << f)) == 0);
-      old_enabled_ = cpu_features_->enabled_;
-      cpu_features_->enabled_ |= 1u << f;
+             (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
+      isolate_ = Isolate::UncheckedCurrent();
+      old_enabled_ = 0;
+      if (isolate_ != NULL) {
+        old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
+        isolate_->set_enabled_cpu_features(old_enabled_ | mask);
+      }
     }
     ~Scope() {
-      ASSERT_EQ(Isolate::Current(), isolate_);
-      cpu_features_->enabled_ = old_enabled_;
-     }
+      ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
+      if (isolate_ != NULL) {
+        isolate_->set_enabled_cpu_features(old_enabled_);
+      }
+    }
    private:
-    unsigned old_enabled_;
-    CpuFeatures* cpu_features_;
     Isolate* isolate_;
+    unsigned old_enabled_;
 #else
    public:
     explicit Scope(CpuFeature f) {}
 #endif
   };
 
+  class TryForceFeatureScope BASE_EMBEDDED {
+   public:
+    explicit TryForceFeatureScope(CpuFeature f)
+        : old_supported_(CpuFeatures::supported_) {
+      if (CanForce()) {
+        CpuFeatures::supported_ |= (1u << f);
+      }
+    }
+
+    ~TryForceFeatureScope() {
+      if (CanForce()) {
+        CpuFeatures::supported_ = old_supported_;
+      }
+    }
+
+   private:
+    static bool CanForce() {
+      // It's only safe to temporarily force support of CPU features
+      // when there's only a single isolate, which is guaranteed when
+      // the serializer is enabled.
+      return Serializer::enabled();
+    }
+
+    const unsigned old_supported_;
+  };
+
  private:
-  CpuFeatures();
-
-  unsigned supported_;
-  unsigned enabled_;
-  unsigned found_by_runtime_probing_;
-
-  friend class Isolate;
+#ifdef DEBUG
+  static bool initialized_;
+#endif
+  static unsigned supported_;
+  static unsigned found_by_runtime_probing_;
 
   DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
 };
@@ -414,7 +452,7 @@
   // for code generation and assumes its size to be buffer_size. If the buffer
   // is too small, a fatal error occurs. No deallocation of the buffer is done
   // upon destruction of the assembler.
-  Assembler(void* buffer, int buffer_size);
+  Assembler(Isolate* isolate, void* buffer, int buffer_size);
   ~Assembler();
 
   // Overrides the default provided by FLAG_debug_code.
@@ -439,10 +477,10 @@
   //
   // Note: The same Label can be used for forward and backward branches
   // but it may be bound only once.
-  void bind(Label* L);  // binds an unbound label L to the current code position
+  void bind(Label* L);  // Binds an unbound label L to current code position.
 
-  // Returns the branch offset to the given label from the current code position
-  // Links the label to the current position if it is still unbound
+  // Returns the branch offset to the given label from the current code
+  // position. Links the label to the current position if it is still unbound.
   // Manages the jump elimination optimization if the second parameter is true.
   int32_t branch_offset(Label* L, bool jump_elimination_allowed);
   int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
@@ -541,14 +579,14 @@
     FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
   };
 
-  // type == 0 is the default non-marking type.
+  // Type == 0 is the default non-marking type.
   void nop(unsigned int type = 0) {
     ASSERT(type < 32);
     sll(zero_reg, zero_reg, type, true);
   }
 
 
-  //------- Branch and jump  instructions --------
+  // --------Branch-and-jump-instructions----------
   // We don't use likely variant of instructions.
   void b(int16_t offset);
   void b(Label* L) { b(branch_offset(L, false)>>2); }
@@ -571,7 +609,7 @@
   }
 
   // Never use the int16_t b(l)cond version with a branch offset
-  // instead of using the Label* version. See Twiki for infos.
+  // instead of using the Label* version.
 
   // Jump targets must be in the current 256 MB-aligned region. ie 28 bits.
   void j(int32_t target);
@@ -761,6 +799,10 @@
   // Mark address of a debug break slot.
   void RecordDebugBreakSlot();
 
+  // Record the AST id of the CallIC being compiled, so that it can be placed
+  // in the relocation information.
+  void RecordAstId(unsigned ast_id) { ast_id_for_reloc_info_ = ast_id; }
+
   // Record a comment relocation entry that can be used by a disassembler.
   // Use --code-comments to enable.
   void RecordComment(const char* msg);
@@ -774,12 +816,6 @@
 
   PositionsRecorder* positions_recorder() { return &positions_recorder_; }
 
-  bool can_peephole_optimize(int instructions) {
-    if (!allow_peephole_optimization_) return false;
-    if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false;
-    return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize;
-  }
-
   // Postpone the generation of the trampoline pool for the specified number of
   // instructions.
   void BlockTrampolinePoolFor(int instructions);
@@ -804,6 +840,8 @@
 
   // Check if an instruction is a branch of some kind.
   static bool IsBranch(Instr instr);
+  static bool IsBeq(Instr instr);
+  static bool IsBne(Instr instr);
 
   static bool IsNop(Instr instr, unsigned int type);
   static bool IsPop(Instr instr);
@@ -813,7 +851,21 @@
   static bool IsLwRegFpNegOffset(Instr instr);
   static bool IsSwRegFpNegOffset(Instr instr);
 
-  static Register GetRt(Instr instr);
+  static Register GetRtReg(Instr instr);
+  static Register GetRsReg(Instr instr);
+  static Register GetRdReg(Instr instr);
+
+  static uint32_t GetRt(Instr instr);
+  static uint32_t GetRtField(Instr instr);
+  static uint32_t GetRs(Instr instr);
+  static uint32_t GetRsField(Instr instr);
+  static uint32_t GetRd(Instr instr);
+  static uint32_t GetRdField(Instr instr);
+  static uint32_t GetSa(Instr instr);
+  static uint32_t GetSaField(Instr instr);
+  static uint32_t GetOpcodeField(Instr instr);
+  static uint32_t GetImmediate16(Instr instr);
+  static uint32_t GetLabelConst(Instr instr);
 
   static int32_t GetBranchOffset(Instr instr);
   static bool IsLw(Instr instr);
@@ -825,9 +877,16 @@
   static bool IsAddImmediate(Instr instr);
   static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
 
+  static bool IsAndImmediate(Instr instr);
+
   void CheckTrampolinePool(bool force_emit = false);
 
  protected:
+  // Relocation for a type-recording IC has the AST id added to it.  This
+  // member variable is a way to pass the information from the call site to
+  // the relocation info.
+  unsigned ast_id_for_reloc_info_;
+
   bool emit_debug_code() const { return emit_debug_code_; }
 
   int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
@@ -861,6 +920,10 @@
     return trampoline_pool_blocked_nesting_ > 0;
   }
 
+  bool has_exception() const {
+    return internal_trampoline_exception_;
+  }
+
  private:
   // Code buffer:
   // The buffer into which code and relocation info are generated.
@@ -1005,10 +1068,18 @@
       return end_;
     }
     int take_slot() {
-      int trampoline_slot = next_slot_;
-      ASSERT(free_slot_count_ > 0);
-      free_slot_count_--;
-      next_slot_ += 2 * kInstrSize;
+      int trampoline_slot = kInvalidSlotPos;
+      if (free_slot_count_ <= 0) {
+        // We have run out of space on trampolines.
+        // Make sure we fail in debug mode, so we become aware of each case
+        // when this happens.
+        ASSERT(0);
+        // Internal exception will be caught.
+      } else {
+        trampoline_slot = next_slot_;
+        free_slot_count_--;
+        next_slot_ += 2*kInstrSize;
+      }
       return trampoline_slot;
     }
     int take_label() {
@@ -1038,8 +1109,10 @@
   static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
   static const int kMaxDistBetweenPools =
       kMaxBranchOffset - 2 * kTrampolineSize;
+  static const int kInvalidSlotPos = -1;
 
   List<Trampoline> trampolines_;
+  bool internal_trampoline_exception_;
 
   friend class RegExpMacroAssemblerMIPS;
   friend class RelocInfo;
@@ -1047,7 +1120,6 @@
   friend class BlockTrampolinePoolScope;
 
   PositionsRecorder positions_recorder_;
-  bool allow_peephole_optimization_;
   bool emit_debug_code_;
   friend class PositionsRecorder;
   friend class EnsureSpace;
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index b4bab8e..e22259d 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,7 +31,7 @@
 
 #if defined(V8_TARGET_ARCH_MIPS)
 
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "debug.h"
 #include "deoptimizer.h"
 #include "full-codegen.h"
@@ -47,97 +47,1577 @@
 void Builtins::Generate_Adaptor(MacroAssembler* masm,
                                 CFunctionId id,
                                 BuiltinExtraArguments extra_args) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0                 : number of arguments excluding receiver
+  //  -- a1                 : called function (only guaranteed when
+  //  --                      extra_args requires it)
+  //  -- cp                 : context
+  //  -- sp[0]              : last argument
+  //  -- ...
+  //  -- sp[4 * (argc - 1)] : first argument
+  //  -- sp[4 * agrc]       : receiver
+  // -----------------------------------
+
+  // Insert extra arguments.
+  int num_extra_args = 0;
+  if (extra_args == NEEDS_CALLED_FUNCTION) {
+    num_extra_args = 1;
+    __ push(a1);
+  } else {
+    ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+  }
+
+  // JumpToExternalReference expects a0 to contain the number of arguments
+  // including the receiver and the extra arguments.
+  __ Addu(a0, a0, Operand(num_extra_args + 1));
+  __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
+}
+
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+  // Load the global context.
+
+  __ lw(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ lw(result,
+         FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
+  // Load the Array function from the global context.
+  __ lw(result,
+         MemOperand(result,
+                    Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+// This constant has the same value as JSArray::kPreallocatedArrayElements and
+// if JSArray::kPreallocatedArrayElements is changed handling of loop unfolding
+// below should be reconsidered.
+static const int kLoopUnfoldLimit = 4;
+
+
+// Allocate an empty JSArray. The allocated array is put into the result
+// register. An elements backing store is allocated with size initial_capacity
+// and filled with the hole values.
+static void AllocateEmptyJSArray(MacroAssembler* masm,
+                                 Register array_function,
+                                 Register result,
+                                 Register scratch1,
+                                 Register scratch2,
+                                 Register scratch3,
+                                 int initial_capacity,
+                                 Label* gc_required) {
+  ASSERT(initial_capacity > 0);
+  // Load the initial map from the array function.
+  __ lw(scratch1, FieldMemOperand(array_function,
+                                  JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Allocate the JSArray object together with space for a fixed array with the
+  // requested elements.
+  int size = JSArray::kSize + FixedArray::SizeFor(initial_capacity);
+  __ AllocateInNewSpace(size,
+                        result,
+                        scratch2,
+                        scratch3,
+                        gc_required,
+                        TAG_OBJECT);
+  // Allocated the JSArray. Now initialize the fields except for the elements
+  // array.
+  // result: JSObject
+  // scratch1: initial map
+  // scratch2: start of next object
+  __ sw(scratch1, FieldMemOperand(result, JSObject::kMapOffset));
+  __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+  __ sw(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
+  // Field JSArray::kElementsOffset is initialized later.
+  __ mov(scratch3,  zero_reg);
+  __ sw(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
+
+  // Calculate the location of the elements array and set elements array member
+  // of the JSArray.
+  // result: JSObject
+  // scratch2: start of next object
+  __ Addu(scratch1, result, Operand(JSArray::kSize));
+  __ sw(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
+
+  // Clear the heap tag on the elements array.
+  __ And(scratch1, scratch1, Operand(~kHeapObjectTagMask));
+
+  // Initialize the FixedArray and fill it with holes. FixedArray length is
+  // stored as a smi.
+  // result: JSObject
+  // scratch1: elements array (untagged)
+  // scratch2: start of next object
+  __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
+  ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+  __ sw(scratch3, MemOperand(scratch1));
+  __ Addu(scratch1, scratch1, kPointerSize);
+  __ li(scratch3,  Operand(Smi::FromInt(initial_capacity)));
+  ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+  __ sw(scratch3, MemOperand(scratch1));
+  __ Addu(scratch1, scratch1, kPointerSize);
+
+  // Fill the FixedArray with the hole value.
+  ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+  ASSERT(initial_capacity <= kLoopUnfoldLimit);
+  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+  for (int i = 0; i < initial_capacity; i++) {
+    __ sw(scratch3, MemOperand(scratch1));
+    __ Addu(scratch1, scratch1, kPointerSize);
+  }
+}
+
+
+// Allocate a JSArray with the number of elements stored in a register. The
+// register array_function holds the built-in Array function and the register
+// array_size holds the size of the array as a smi. The allocated array is put
+// into the result register and beginning and end of the FixedArray elements
+// storage is put into registers elements_array_storage and elements_array_end
+// (see  below for when that is not the case). If the parameter fill_with_holes
+// is true the allocated elements backing store is filled with the hole values
+// otherwise it is left uninitialized. When the backing store is filled the
+// register elements_array_storage is scratched.
+static void AllocateJSArray(MacroAssembler* masm,
+                            Register array_function,  // Array function.
+                            Register array_size,  // As a smi.
+                            Register result,
+                            Register elements_array_storage,
+                            Register elements_array_end,
+                            Register scratch1,
+                            Register scratch2,
+                            bool fill_with_hole,
+                            Label* gc_required) {
+  Label not_empty, allocated;
+
+  // Load the initial map from the array function.
+  __ lw(elements_array_storage,
+         FieldMemOperand(array_function,
+                         JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check whether an empty sized array is requested.
+  __ Branch(&not_empty, ne, array_size, Operand(zero_reg));
+
+  // If an empty array is requested allocate a small elements array anyway. This
+  // keeps the code below free of special casing for the empty array.
+  int size = JSArray::kSize +
+             FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
+  __ AllocateInNewSpace(size,
+                        result,
+                        elements_array_end,
+                        scratch1,
+                        gc_required,
+                        TAG_OBJECT);
+  __ Branch(&allocated);
+
+  // Allocate the JSArray object together with space for a FixedArray with the
+  // requested number of elements.
+  __ bind(&not_empty);
+  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  __ li(elements_array_end,
+        (JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize);
+  __ sra(scratch1, array_size, kSmiTagSize);
+  __ Addu(elements_array_end, elements_array_end, scratch1);
+  __ AllocateInNewSpace(
+      elements_array_end,
+      result,
+      scratch1,
+      scratch2,
+      gc_required,
+      static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+
+  // Allocated the JSArray. Now initialize the fields except for the elements
+  // array.
+  // result: JSObject
+  // elements_array_storage: initial map
+  // array_size: size of array (smi)
+  __ bind(&allocated);
+  __ sw(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
+  __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
+  __ sw(elements_array_storage,
+         FieldMemOperand(result, JSArray::kPropertiesOffset));
+  // Field JSArray::kElementsOffset is initialized later.
+  __ sw(array_size, FieldMemOperand(result, JSArray::kLengthOffset));
+
+  // Calculate the location of the elements array and set elements array member
+  // of the JSArray.
+  // result: JSObject
+  // array_size: size of array (smi)
+  __ Addu(elements_array_storage, result, Operand(JSArray::kSize));
+  __ sw(elements_array_storage,
+         FieldMemOperand(result, JSArray::kElementsOffset));
+
+  // Clear the heap tag on the elements array.
+  __ And(elements_array_storage,
+          elements_array_storage,
+          Operand(~kHeapObjectTagMask));
+  // Initialize the fixed array and fill it with holes. FixedArray length is
+  // stored as a smi.
+  // result: JSObject
+  // elements_array_storage: elements array (untagged)
+  // array_size: size of array (smi)
+  __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+  ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
+  __ sw(scratch1, MemOperand(elements_array_storage));
+  __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
+
+  // Length of the FixedArray is the number of pre-allocated elements if
+  // the actual JSArray has length 0 and the size of the JSArray for non-empty
+  // JSArrays. The length of a FixedArray is stored as a smi.
+  ASSERT(kSmiTag == 0);
+  __ li(at, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
+  __ movz(array_size, at, array_size);
+
+  ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+  __ sw(array_size, MemOperand(elements_array_storage));
+  __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
+
+  // Calculate elements array and elements array end.
+  // result: JSObject
+  // elements_array_storage: elements array element storage
+  // array_size: smi-tagged size of elements array
+  ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  __ sll(elements_array_end, array_size, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(elements_array_end, elements_array_storage, elements_array_end);
+
+  // Fill the allocated FixedArray with the hole value if requested.
+  // result: JSObject
+  // elements_array_storage: elements array element storage
+  // elements_array_end: start of next object
+  if (fill_with_hole) {
+    Label loop, entry;
+    __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
+    __ Branch(&entry);
+    __ bind(&loop);
+    __ sw(scratch1, MemOperand(elements_array_storage));
+    __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
+
+    __ bind(&entry);
+    __ Branch(&loop, lt, elements_array_storage, Operand(elements_array_end));
+  }
+}
+
+
+// Create a new array for the built-in Array function. This function allocates
+// the JSArray object and the FixedArray elements array and initializes these.
+// If the Array cannot be constructed in native code the runtime is called. This
+// function assumes the following state:
+//   a0: argc
+//   a1: constructor (built-in Array function)
+//   ra: return address
+//   sp[0]: last argument
+// This function is used for both construct and normal calls of Array. The only
+// difference between handling a construct call and a normal call is that for a
+// construct call the constructor function in a1 needs to be preserved for
+// entering the generic code. In both cases argc in a0 needs to be preserved.
+// Both registers are preserved by this code so no need to differentiate between
+// construct call and normal call.
+static void ArrayNativeCode(MacroAssembler* masm,
+                            Label* call_generic_code) {
+  Counters* counters = masm->isolate()->counters();
+  Label argc_one_or_more, argc_two_or_more;
+
+  // Check for array construction with zero arguments or one.
+  __ Branch(&argc_one_or_more, ne, a0, Operand(zero_reg));
+  // Handle construction of an empty array.
+  AllocateEmptyJSArray(masm,
+                       a1,
+                       a2,
+                       a3,
+                       t0,
+                       t1,
+                       JSArray::kPreallocatedArrayElements,
+                       call_generic_code);
+  __ IncrementCounter(counters->array_function_native(), 1, a3, t0);
+  // Setup return value, remove receiver from stack and return.
+  __ mov(v0, a2);
+  __ Addu(sp, sp, Operand(kPointerSize));
+  __ Ret();
+
+  // Check for one argument. Bail out if argument is not smi or if it is
+  // negative.
+  __ bind(&argc_one_or_more);
+  __ Branch(&argc_two_or_more, ne, a0, Operand(1));
+
+  ASSERT(kSmiTag == 0);
+  __ lw(a2, MemOperand(sp));  // Get the argument from the stack.
+  __ And(a3, a2, Operand(kIntptrSignBit | kSmiTagMask));
+  __ Branch(call_generic_code, eq, a3, Operand(zero_reg));
+
+  // Handle construction of an empty array of a certain size. Bail out if size
+  // is too large to actually allocate an elements array.
+  ASSERT(kSmiTag == 0);
+  __ Branch(call_generic_code, ge, a2,
+            Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
+
+  // a0: argc
+  // a1: constructor
+  // a2: array_size (smi)
+  // sp[0]: argument
+  AllocateJSArray(masm,
+                  a1,
+                  a2,
+                  a3,
+                  t0,
+                  t1,
+                  t2,
+                  t3,
+                  true,
+                  call_generic_code);
+  __ IncrementCounter(counters->array_function_native(), 1, a2, t0);
+
+  // Setup return value, remove receiver and argument from stack and return.
+  __ mov(v0, a3);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  // Handle construction of an array from a list of arguments.
+  __ bind(&argc_two_or_more);
+  __ sll(a2, a0, kSmiTagSize);  // Convert argc to a smi.
+
+  // a0: argc
+  // a1: constructor
+  // a2: array_size (smi)
+  // sp[0]: last argument
+  AllocateJSArray(masm,
+                  a1,
+                  a2,
+                  a3,
+                  t0,
+                  t1,
+                  t2,
+                  t3,
+                  false,
+                  call_generic_code);
+  __ IncrementCounter(counters->array_function_native(), 1, a2, t2);
+
+  // Fill arguments as array elements. Copy from the top of the stack (last
+  // element) to the array backing store filling it backwards. Note:
+  // elements_array_end points after the backing store.
+  // a0: argc
+  // a3: JSArray
+  // t0: elements_array storage start (untagged)
+  // t1: elements_array_end (untagged)
+  // sp[0]: last argument
+
+  Label loop, entry;
+  __ Branch(&entry);
+  __ bind(&loop);
+  __ pop(a2);
+  __ Addu(t1, t1, -kPointerSize);
+  __ sw(a2, MemOperand(t1));
+  __ bind(&entry);
+  __ Branch(&loop, lt, t0, Operand(t1));
+
+  // Remove caller arguments and receiver from the stack, setup return value and
+  // return.
+  // a0: argc
+  // a3: JSArray
+  // sp[0]: receiver
+  __ Addu(sp, sp, Operand(kPointerSize));
+  __ mov(v0, a3);
+  __ Ret();
 }
 
 
 void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0     : number of arguments
+  //  -- ra     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_array_code;
+
+  // Get the Array function.
+  GenerateLoadArrayFunction(masm, a1);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin Array functions should be maps.
+    __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ And(t0, a2, Operand(kSmiTagMask));
+    __ Assert(ne, "Unexpected initial map for Array function (1)",
+              t0, Operand(zero_reg));
+    __ GetObjectType(a2, a3, t0);
+    __ Assert(eq, "Unexpected initial map for Array function (2)",
+              t0, Operand(MAP_TYPE));
+  }
+
+  // Run the native code for the Array function called as a normal function.
+  ArrayNativeCode(masm, &generic_array_code);
+
+  // Jump to the generic array code if the specialized code cannot handle
+  // the construction.
+  __ bind(&generic_array_code);
+
+  Handle<Code> array_code =
+      masm->isolate()->builtins()->ArrayCodeGeneric();
+  __ Jump(array_code, RelocInfo::CODE_TARGET);
 }
 
 
 void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0     : number of arguments
+  //  -- a1     : constructor function
+  //  -- ra     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_constructor;
+
+  if (FLAG_debug_code) {
+    // The array construct code is only set for the builtin and internal
+    // Array functions which always have a map.
+    // Initial map for the builtin Array function should be a map.
+    __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ And(t0, a2, Operand(kSmiTagMask));
+    __ Assert(ne, "Unexpected initial map for Array function (3)",
+              t0, Operand(zero_reg));
+    __ GetObjectType(a2, a3, t0);
+    __ Assert(eq, "Unexpected initial map for Array function (4)",
+              t0, Operand(MAP_TYPE));
+  }
+
+  // Run the native code for the Array function called as a constructor.
+  ArrayNativeCode(masm, &generic_constructor);
+
+  // Jump to the generic construct code in case the specialized code cannot
+  // handle the construction.
+  __ bind(&generic_constructor);
+
+  Handle<Code> generic_construct_stub =
+      masm->isolate()->builtins()->JSConstructStubGeneric();
+  __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
 }
 
 
 void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0                     : number of arguments
+  //  -- a1                     : constructor function
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->string_ctor_calls(), 1, a2, a3);
+
+  Register function = a1;
+  if (FLAG_debug_code) {
+    __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, a2);
+    __ Assert(eq, "Unexpected String function", function, Operand(a2));
+  }
+
+  // Load the first arguments in a0 and get rid of the rest.
+  Label no_arguments;
+  __ Branch(&no_arguments, eq, a0, Operand(zero_reg));
+  // First args = sp[(argc - 1) * 4].
+  __ Subu(a0, a0, Operand(1));
+  __ sll(a0, a0, kPointerSizeLog2);
+  __ Addu(sp, a0, sp);
+  __ lw(a0, MemOperand(sp));
+  // sp now point to args[0], drop args[0] + receiver.
+  __ Drop(2);
+
+  Register argument = a2;
+  Label not_cached, argument_is_string;
+  NumberToStringStub::GenerateLookupNumberStringCache(
+      masm,
+      a0,        // Input.
+      argument,  // Result.
+      a3,        // Scratch.
+      t0,        // Scratch.
+      t1,        // Scratch.
+      false,     // Is it a Smi?
+      &not_cached);
+  __ IncrementCounter(counters->string_ctor_cached_number(), 1, a3, t0);
+  __ bind(&argument_is_string);
+
+  // ----------- S t a t e -------------
+  //  -- a2     : argument converted to string
+  //  -- a1     : constructor function
+  //  -- ra     : return address
+  // -----------------------------------
+
+  Label gc_required;
+  __ AllocateInNewSpace(JSValue::kSize,
+                        v0,  // Result.
+                        a3,  // Scratch.
+                        t0,  // Scratch.
+                        &gc_required,
+                        TAG_OBJECT);
+
+  // Initialising the String Object.
+  Register map = a3;
+  __ LoadGlobalFunctionInitialMap(function, map, t0);
+  if (FLAG_debug_code) {
+    __ lbu(t0, FieldMemOperand(map, Map::kInstanceSizeOffset));
+    __ Assert(eq, "Unexpected string wrapper instance size",
+        t0, Operand(JSValue::kSize >> kPointerSizeLog2));
+    __ lbu(t0, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
+    __ Assert(eq, "Unexpected unused properties of string wrapper",
+        t0, Operand(zero_reg));
+  }
+  __ sw(map, FieldMemOperand(v0, HeapObject::kMapOffset));
+
+  __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
+  __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+  __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+  __ sw(argument, FieldMemOperand(v0, JSValue::kValueOffset));
+
+  // Ensure the object is fully initialized.
+  STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+
+  __ Ret();
+
+  // The argument was not found in the number to string cache. Check
+  // if it's a string already before calling the conversion builtin.
+  Label convert_argument;
+  __ bind(&not_cached);
+  __ JumpIfSmi(a0, &convert_argument);
+
+  // Is it a String?
+  __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
+  __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+  ASSERT(kNotStringTag != 0);
+  __ And(t0, a3, Operand(kIsNotStringMask));
+  __ Branch(&convert_argument, ne, t0, Operand(zero_reg));
+  __ mov(argument, a0);
+  __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
+  __ Branch(&argument_is_string);
+
+  // Invoke the conversion builtin and put the result into a2.
+  __ bind(&convert_argument);
+  __ push(function);  // Preserve the function.
+  __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
+  __ EnterInternalFrame();
+  __ push(v0);
+  __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+  __ LeaveInternalFrame();
+  __ pop(function);
+  __ mov(argument, v0);
+  __ Branch(&argument_is_string);
+
+  // Load the empty string into a2, remove the receiver from the
+  // stack, and jump back to the case where the argument is a string.
+  __ bind(&no_arguments);
+  __ LoadRoot(argument, Heap::kEmptyStringRootIndex);
+  __ Drop(1);
+  __ Branch(&argument_is_string);
+
+  // At this point the argument is already a string. Call runtime to
+  // create a string wrapper.
+  __ bind(&gc_required);
+  __ IncrementCounter(counters->string_ctor_gc_required(), 1, a3, t0);
+  __ EnterInternalFrame();
+  __ push(argument);
+  __ CallRuntime(Runtime::kNewStringWrapper, 1);
+  __ LeaveInternalFrame();
+  __ Ret();
 }
 
 
 void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0     : number of arguments
+  //  -- a1     : constructor function
+  //  -- ra     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+
+  Label non_function_call;
+  // Check that the function is not a smi.
+  __ And(t0, a1, Operand(kSmiTagMask));
+  __ Branch(&non_function_call, eq, t0, Operand(zero_reg));
+  // Check that the function is a JSFunction.
+  __ GetObjectType(a1, a2, a2);
+  __ Branch(&non_function_call, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+  // Jump to the function-specific construct stub.
+  __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kConstructStubOffset));
+  __ Addu(t9, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(Operand(t9));
+
+  // a0: number of arguments
+  // a1: called object
+  __ bind(&non_function_call);
+  // CALL_NON_FUNCTION expects the non-function constructor as receiver
+  // (instead of the original receiver from the call site). The receiver is
+  // stack element argc.
+  // Set expected number of arguments to zero (not changing a0).
+  __ mov(a2, zero_reg);
+  __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ SetCallKind(t1, CALL_AS_METHOD);
+  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+          RelocInfo::CODE_TARGET);
+}
+
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+                                           bool is_api_function,
+                                           bool count_constructions) {
+  // Should never count constructions for api objects.
+  ASSERT(!is_api_function || !count_constructions);
+
+  Isolate* isolate = masm->isolate();
+
+  // ----------- S t a t e -------------
+  //  -- a0     : number of arguments
+  //  -- a1     : constructor function
+  //  -- ra     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+
+  // Enter a construct frame.
+  __ EnterConstructFrame();
+
+  // Preserve the two incoming parameters on the stack.
+  __ sll(a0, a0, kSmiTagSize);  // Tag arguments count.
+  __ MultiPushReversed(a0.bit() | a1.bit());
+
+  // Use t7 to hold undefined, which is used in several places below.
+  __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+
+  Label rt_call, allocated;
+  // Try to allocate the object without transitioning into C code. If any of the
+  // preconditions is not met, the code bails out to the runtime call.
+  if (FLAG_inline_new) {
+    Label undo_allocation;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+    ExternalReference debug_step_in_fp =
+        ExternalReference::debug_step_in_fp_address(isolate);
+    __ li(a2, Operand(debug_step_in_fp));
+    __ lw(a2, MemOperand(a2));
+    __ Branch(&rt_call, ne, a2, Operand(zero_reg));
+#endif
+
+    // Load the initial map and verify that it is in fact a map.
+    // a1: constructor function
+    __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ And(t0, a2, Operand(kSmiTagMask));
+    __ Branch(&rt_call, eq, t0, Operand(zero_reg));
+    __ GetObjectType(a2, a3, t4);
+    __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
+
+    // Check that the constructor is not constructing a JSFunction (see comments
+    // in Runtime_NewObject in runtime.cc). In which case the initial map's
+    // instance type would be JS_FUNCTION_TYPE.
+    // a1: constructor function
+    // a2: initial map
+    __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+    __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
+
+    if (count_constructions) {
+      Label allocate;
+      // Decrease generous allocation count.
+      __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+      MemOperand constructor_count =
+         FieldMemOperand(a3, SharedFunctionInfo::kConstructionCountOffset);
+      __ lbu(t0, constructor_count);
+      __ Subu(t0, t0, Operand(1));
+      __ sb(t0, constructor_count);
+      __ Branch(&allocate, ne, t0, Operand(zero_reg));
+
+      __ Push(a1, a2);
+
+      __ push(a1);  // Constructor.
+      // The call will replace the stub, so the countdown is only done once.
+      __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+      __ pop(a2);
+      __ pop(a1);
+
+      __ bind(&allocate);
+    }
+
+    // Now allocate the JSObject on the heap.
+    // a1: constructor function
+    // a2: initial map
+    __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+    __ AllocateInNewSpace(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
+
+    // Allocated the JSObject, now initialize the fields. Map is set to initial
+    // map and properties and elements are set to empty fixed array.
+    // a1: constructor function
+    // a2: initial map
+    // a3: object size
+    // t4: JSObject (not tagged)
+    __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
+    __ mov(t5, t4);
+    __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
+    __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
+    __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
+    __ Addu(t5, t5, Operand(3*kPointerSize));
+    ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+    ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+    ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+
+    // Fill all the in-object properties with appropriate filler.
+    // a1: constructor function
+    // a2: initial map
+    // a3: object size (in words)
+    // t4: JSObject (not tagged)
+    // t5: First in-object property of JSObject (not tagged)
+    __ sll(t0, a3, kPointerSizeLog2);
+    __ addu(t6, t4, t0);   // End of object.
+    ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+    { Label loop, entry;
+      if (count_constructions) {
+        // To allow for truncation.
+        __ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
+      } else {
+        __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+      }
+      __ jmp(&entry);
+      __ bind(&loop);
+      __ sw(t7, MemOperand(t5, 0));
+      __ addiu(t5, t5, kPointerSize);
+      __ bind(&entry);
+      __ Branch(&loop, Uless, t5, Operand(t6));
+    }
+
+    // Add the object tag to make the JSObject real, so that we can continue and
+    // jump into the continuation code at any time from now on. Any failures
+    // need to undo the allocation, so that the heap is in a consistent state
+    // and verifiable.
+    __ Addu(t4, t4, Operand(kHeapObjectTag));
+
+    // Check if a non-empty properties array is needed. Continue with allocated
+    // object if not fall through to runtime call if it is.
+    // a1: constructor function
+    // t4: JSObject
+    // t5: start of next object (not tagged)
+    __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+    // The field instance sizes contains both pre-allocated property fields and
+    // in-object properties.
+    __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
+    __ And(t6,
+           a0,
+           Operand(0x000000FF << Map::kPreAllocatedPropertyFieldsByte * 8));
+    __ srl(t0, t6, Map::kPreAllocatedPropertyFieldsByte * 8);
+    __ Addu(a3, a3, Operand(t0));
+    __ And(t6, a0, Operand(0x000000FF << Map::kInObjectPropertiesByte * 8));
+    __ srl(t0, t6, Map::kInObjectPropertiesByte * 8);
+    __ subu(a3, a3, t0);
+
+    // Done if no extra properties are to be allocated.
+    __ Branch(&allocated, eq, a3, Operand(zero_reg));
+    __ Assert(greater_equal, "Property allocation count failed.",
+        a3, Operand(zero_reg));
+
+    // Scale the number of elements by pointer size and add the header for
+    // FixedArrays to the start of the next object calculation from above.
+    // a1: constructor
+    // a3: number of elements in properties array
+    // t4: JSObject
+    // t5: start of next object
+    __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
+    __ AllocateInNewSpace(
+        a0,
+        t5,
+        t6,
+        a2,
+        &undo_allocation,
+        static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
+
+    // Initialize the FixedArray.
+    // a1: constructor
+    // a3: number of elements in properties array (un-tagged)
+    // t4: JSObject
+    // t5: start of next object
+    __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex);
+    __ mov(a2, t5);
+    __ sw(t6, MemOperand(a2, JSObject::kMapOffset));
+    __ sll(a0, a3, kSmiTagSize);
+    __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset));
+    __ Addu(a2, a2, Operand(2 * kPointerSize));
+
+    ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+    ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+
+    // Initialize the fields to undefined.
+    // a1: constructor
+    // a2: First element of FixedArray (not tagged)
+    // a3: number of elements in properties array
+    // t4: JSObject
+    // t5: FixedArray (not tagged)
+    __ sll(t3, a3, kPointerSizeLog2);
+    __ addu(t6, a2, t3);  // End of object.
+    ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+    { Label loop, entry;
+      if (count_constructions) {
+        __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+      } else if (FLAG_debug_code) {
+        __ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
+        __ Assert(eq, "Undefined value not loaded.", t7, Operand(t8));
+      }
+      __ jmp(&entry);
+      __ bind(&loop);
+      __ sw(t7, MemOperand(a2));
+      __ addiu(a2, a2, kPointerSize);
+      __ bind(&entry);
+      __ Branch(&loop, less, a2, Operand(t6));
+    }
+
+    // Store the initialized FixedArray into the properties field of
+    // the JSObject.
+    // a1: constructor function
+    // t4: JSObject
+    // t5: FixedArray (not tagged)
+    __ Addu(t5, t5, Operand(kHeapObjectTag));  // Add the heap tag.
+    __ sw(t5, FieldMemOperand(t4, JSObject::kPropertiesOffset));
+
+    // Continue with JSObject being successfully allocated.
+    // a1: constructor function
+    // a4: JSObject
+    __ jmp(&allocated);
+
+    // Undo the setting of the new top so that the heap is verifiable. For
+    // example, the map's unused properties potentially do not match the
+    // allocated objects unused properties.
+    // t4: JSObject (previous new top)
+    __ bind(&undo_allocation);
+    __ UndoAllocationInNewSpace(t4, t5);
+  }
+
+  __ bind(&rt_call);
+  // Allocate the new receiver object using the runtime call.
+  // a1: constructor function
+  __ push(a1);  // Argument for Runtime_NewObject.
+  __ CallRuntime(Runtime::kNewObject, 1);
+  __ mov(t4, v0);
+
+  // Receiver for constructor call allocated.
+  // t4: JSObject
+  __ bind(&allocated);
+  __ push(t4);
+
+  // Push the function and the allocated receiver from the stack.
+  // sp[0]: receiver (newly allocated object)
+  // sp[1]: constructor function
+  // sp[2]: number of arguments (smi-tagged)
+  __ lw(a1, MemOperand(sp, kPointerSize));
+  __ MultiPushReversed(a1.bit() | t4.bit());
+
+  // Reload the number of arguments from the stack.
+  // a1: constructor function
+  // sp[0]: receiver
+  // sp[1]: constructor function
+  // sp[2]: receiver
+  // sp[3]: constructor function
+  // sp[4]: number of arguments (smi-tagged)
+  __ lw(a3, MemOperand(sp, 4 * kPointerSize));
+
+  // Setup pointer to last argument.
+  __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+  // Setup number of arguments for function call below.
+  __ srl(a0, a3, kSmiTagSize);
+
+  // Copy arguments and receiver to the expression stack.
+  // a0: number of arguments
+  // a1: constructor function
+  // a2: address of last argument (caller sp)
+  // a3: number of arguments (smi-tagged)
+  // sp[0]: receiver
+  // sp[1]: constructor function
+  // sp[2]: receiver
+  // sp[3]: constructor function
+  // sp[4]: number of arguments (smi-tagged)
+  Label loop, entry;
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(t0, a2, Operand(t0));
+  __ lw(t1, MemOperand(t0));
+  __ push(t1);
+  __ bind(&entry);
+  __ Addu(a3, a3, Operand(-2));
+  __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
+
+  // Call the function.
+  // a0: number of arguments
+  // a1: constructor function
+  if (is_api_function) {
+    __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+    Handle<Code> code =
+        masm->isolate()->builtins()->HandleApiCallConstruct();
+    ParameterCount expected(0);
+    __ InvokeCode(code, expected, expected,
+                  RelocInfo::CODE_TARGET, CALL_FUNCTION);
+  } else {
+    ParameterCount actual(a0);
+    __ InvokeFunction(a1, actual, CALL_FUNCTION);
+  }
+
+  // Pop the function from the stack.
+  // v0: result
+  // sp[0]: constructor function
+  // sp[2]: receiver
+  // sp[3]: constructor function
+  // sp[4]: number of arguments (smi-tagged)
+  __ Pop();
+
+  // Restore context from the frame.
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+  // If the result is an object (in the ECMA sense), we should get rid
+  // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+  // on page 74.
+  Label use_receiver, exit;
+
+  // If the result is a smi, it is *not* an object in the ECMA sense.
+  // v0: result
+  // sp[0]: receiver (newly allocated object)
+  // sp[1]: constructor function
+  // sp[2]: number of arguments (smi-tagged)
+  __ And(t0, v0, Operand(kSmiTagMask));
+  __ Branch(&use_receiver, eq, t0, Operand(zero_reg));
+
+  // If the type of the result (stored in its map) is less than
+  // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
+  __ GetObjectType(v0, a3, a3);
+  __ Branch(&exit, greater_equal, a3, Operand(FIRST_JS_OBJECT_TYPE));
+
+  // Throw away the result of the constructor invocation and use the
+  // on-stack receiver as the result.
+  __ bind(&use_receiver);
+  __ lw(v0, MemOperand(sp));
+
+  // Remove receiver from the stack, remove caller arguments, and
+  // return.
+  __ bind(&exit);
+  // v0: result
+  // sp[0]: receiver (newly allocated object)
+  // sp[1]: constructor function
+  // sp[2]: number of arguments (smi-tagged)
+  __ lw(a1, MemOperand(sp, 2 * kPointerSize));
+  __ LeaveConstructFrame();
+  __ sll(t0, a1, kPointerSizeLog2 - 1);
+  __ Addu(sp, sp, t0);
+  __ Addu(sp, sp, kPointerSize);
+  __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
+  __ Ret();
 }
 
 
 void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Generate_JSConstructStubHelper(masm, false, true);
 }
 
 
 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Generate_JSConstructStubHelper(masm, false, false);
 }
 
 
 void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Generate_JSConstructStubHelper(masm, true, false);
+}
+
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+                                             bool is_construct) {
+  // Called from JSEntryStub::GenerateBody
+
+  // ----------- S t a t e -------------
+  //  -- a0: code entry
+  //  -- a1: function
+  //  -- a2: reveiver_pointer
+  //  -- a3: argc
+  //  -- s0: argv
+  // -----------------------------------
+
+  // Clear the context before we push it when entering the JS frame.
+  __ mov(cp, zero_reg);
+
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Set up the context from the function argument.
+  __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+  // Set up the roots register.
+  ExternalReference roots_address =
+      ExternalReference::roots_address(masm->isolate());
+  __ li(s6, Operand(roots_address));
+
+  // Push the function and the receiver onto the stack.
+  __ Push(a1, a2);
+
+  // Copy arguments to the stack in a loop.
+  // a3: argc
+  // s0: argv, ie points to first arg
+  Label loop, entry;
+  __ sll(t0, a3, kPointerSizeLog2);
+  __ addu(t2, s0, t0);
+  __ b(&entry);
+  __ nop();   // Branch delay slot nop.
+  // t2 points past last arg.
+  __ bind(&loop);
+  __ lw(t0, MemOperand(s0));  // Read next parameter.
+  __ addiu(s0, s0, kPointerSize);
+  __ lw(t0, MemOperand(t0));  // Dereference handle.
+  __ push(t0);  // Push parameter.
+  __ bind(&entry);
+  __ Branch(&loop, ne, s0, Operand(t2));
+
+  // Initialize all JavaScript callee-saved registers, since they will be seen
+  // by the garbage collector as part of handlers.
+  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+  __ mov(s1, t0);
+  __ mov(s2, t0);
+  __ mov(s3, t0);
+  __ mov(s4, t0);
+  __ mov(s5, t0);
+  // s6 holds the root address. Do not clobber.
+  // s7 is cp. Do not init.
+
+  // Invoke the code and pass argc as a0.
+  __ mov(a0, a3);
+  if (is_construct) {
+    __ Call(masm->isolate()->builtins()->JSConstructCall(),
+            RelocInfo::CODE_TARGET);
+  } else {
+    ParameterCount actual(a0);
+    __ InvokeFunction(a1, actual, CALL_FUNCTION);
+  }
+
+  __ LeaveInternalFrame();
+
+  __ Jump(ra);
 }
 
 
 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Generate_JSEntryTrampolineHelper(masm, false);
 }
 
 
 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Generate_JSEntryTrampolineHelper(masm, true);
 }
 
 
 void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Preserve the function.
+  __ push(a1);
+  // Push call kind information.
+  __ push(t1);
+
+  // Push the function on the stack as the argument to the runtime function.
+  __ push(a1);
+  // Call the runtime function.
+  __ CallRuntime(Runtime::kLazyCompile, 1);
+  // Calculate the entry point.
+  __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
+
+  // Restore call kind information.
+  __ pop(t1);
+  // Restore saved function.
+  __ pop(a1);
+
+  // Tear down temporary frame.
+  __ LeaveInternalFrame();
+
+  // Do a tail-call of the compiled function.
+  __ Jump(t9);
 }
 
 
 void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Preserve the function.
+  __ push(a1);
+  // Push call kind information.
+  __ push(t1);
+
+  // Push the function on the stack as the argument to the runtime function.
+  __ push(a1);
+  __ CallRuntime(Runtime::kLazyRecompile, 1);
+  // Calculate the entry point.
+  __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  // Restore call kind information.
+  __ pop(t1);
+  // Restore saved function.
+  __ pop(a1);
+
+  // Tear down temporary frame.
+  __ LeaveInternalFrame();
+
+  // Do a tail-call of the compiled function.
+  __ Jump(t9);
 }
 
 
+// These functions are called from C++ but cannot be used in live code.
 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  __ Abort("Call to unimplemented function in builtins-mips.cc");
 }
 
 
 void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  __ Abort("Call to unimplemented function in builtins-mips.cc");
 }
 
 
 void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  __ Abort("Call to unimplemented function in builtins-mips.cc");
 }
 
 
 void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  __ Abort("Call to unimplemented function in builtins-mips.cc");
 }
 
 
 void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // 1. Make sure we have at least one argument.
+  // a0: actual number of arguments
+  { Label done;
+    __ Branch(&done, ne, a0, Operand(zero_reg));
+    __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
+    __ push(t2);
+    __ Addu(a0, a0, Operand(1));
+    __ bind(&done);
+  }
+
+  // 2. Get the function to call (passed as receiver) from the stack, check
+  //    if it is a function.
+  // a0: actual number of arguments
+  Label non_function;
+  __ sll(at, a0, kPointerSizeLog2);
+  __ addu(at, sp, at);
+  __ lw(a1, MemOperand(at));
+  __ And(at, a1, Operand(kSmiTagMask));
+  __ Branch(&non_function, eq, at, Operand(zero_reg));
+  __ GetObjectType(a1, a2, a2);
+  __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+  // 3a. Patch the first argument if necessary when calling a function.
+  // a0: actual number of arguments
+  // a1: function
+  Label shift_arguments;
+  { Label convert_to_object, use_global_receiver, patch_receiver;
+    // Change context eagerly in case we need the global receiver.
+    __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+    // Do not transform the receiver for strict mode functions.
+    __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+    __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+    __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+                                 kSmiTagSize)));
+    __ Branch(&shift_arguments, ne, t0, Operand(zero_reg));
+
+    // Do not transform the receiver for native (Compilerhints already in a3).
+    __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kES5Native +
+                                 kSmiTagSize)));
+    __ Branch(&shift_arguments, ne, t0, Operand(zero_reg));
+
+    // Compute the receiver in non-strict mode.
+    // Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2).
+    __ sll(at, a0, kPointerSizeLog2);
+    __ addu(a2, sp, at);
+    __ lw(a2, MemOperand(a2, -kPointerSize));
+    // a0: actual number of arguments
+    // a1: function
+    // a2: first argument
+    __ JumpIfSmi(a2, &convert_to_object, t2);
+
+    __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+    __ Branch(&use_global_receiver, eq, a2, Operand(a3));
+    __ LoadRoot(a3, Heap::kNullValueRootIndex);
+    __ Branch(&use_global_receiver, eq, a2, Operand(a3));
+
+    STATIC_ASSERT(LAST_JS_OBJECT_TYPE + 1 == LAST_TYPE);
+    STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+    __ GetObjectType(a2, a3, a3);
+    __ Branch(&shift_arguments, ge, a3, Operand(FIRST_JS_OBJECT_TYPE));
+
+    __ bind(&convert_to_object);
+    __ EnterInternalFrame();  // In order to preserve argument count.
+    __ sll(a0, a0, kSmiTagSize);  // Smi tagged.
+    __ push(a0);
+
+    __ push(a2);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+    __ mov(a2, v0);
+
+    __ pop(a0);
+    __ sra(a0, a0, kSmiTagSize);  // Un-tag.
+    __ LeaveInternalFrame();
+    // Restore the function to a1.
+    __ sll(at, a0, kPointerSizeLog2);
+    __ addu(at, sp, at);
+    __ lw(a1, MemOperand(at));
+    __ Branch(&patch_receiver);
+
+    // Use the global receiver object from the called function as the
+    // receiver.
+    __ bind(&use_global_receiver);
+    const int kGlobalIndex =
+        Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+    __ lw(a2, FieldMemOperand(cp, kGlobalIndex));
+    __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
+    __ lw(a2, FieldMemOperand(a2, kGlobalIndex));
+    __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
+
+    __ bind(&patch_receiver);
+    __ sll(at, a0, kPointerSizeLog2);
+    __ addu(a3, sp, at);
+    __ sw(a2, MemOperand(a3, -kPointerSize));
+
+    __ Branch(&shift_arguments);
+  }
+
+  // 3b. Patch the first argument when calling a non-function.  The
+  //     CALL_NON_FUNCTION builtin expects the non-function callee as
+  //     receiver, so overwrite the first argument which will ultimately
+  //     become the receiver.
+  // a0: actual number of arguments
+  // a1: function
+  __ bind(&non_function);
+  // Restore the function in case it has been modified.
+  __ sll(at, a0, kPointerSizeLog2);
+  __ addu(a2, sp, at);
+  __ sw(a1, MemOperand(a2, -kPointerSize));
+  // Clear a1 to indicate a non-function being called.
+  __ mov(a1, zero_reg);
+
+  // 4. Shift arguments and return address one slot down on the stack
+  //    (overwriting the original receiver).  Adjust argument count to make
+  //    the original first argument the new receiver.
+  // a0: actual number of arguments
+  // a1: function
+  __ bind(&shift_arguments);
+  { Label loop;
+    // Calculate the copy start address (destination). Copy end address is sp.
+    __ sll(at, a0, kPointerSizeLog2);
+    __ addu(a2, sp, at);
+
+    __ bind(&loop);
+    __ lw(at, MemOperand(a2, -kPointerSize));
+    __ sw(at, MemOperand(a2));
+    __ Subu(a2, a2, Operand(kPointerSize));
+    __ Branch(&loop, ne, a2, Operand(sp));
+    // Adjust the actual number of arguments and remove the top element
+    // (which is a copy of the last argument).
+    __ Subu(a0, a0, Operand(1));
+    __ Pop();
+  }
+
+  // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
+  // a0: actual number of arguments
+  // a1: function
+  { Label function;
+    __ Branch(&function, ne, a1, Operand(zero_reg));
+    __ mov(a2, zero_reg);  // expected arguments is 0 for CALL_NON_FUNCTION
+    __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
+    __ SetCallKind(t1, CALL_AS_METHOD);
+    __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+            RelocInfo::CODE_TARGET);
+    __ bind(&function);
+  }
+
+  // 5b. Get the code to call from the function and check that the number of
+  //     expected arguments matches what we're providing.  If so, jump
+  //     (tail-call) to the code in register edx without checking arguments.
+  // a0: actual number of arguments
+  // a1: function
+  __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(a2,
+         FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ sra(a2, a2, kSmiTagSize);
+  __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+  __ SetCallKind(t1, CALL_AS_METHOD);
+  // Check formal and actual parameter counts.
+  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+          RelocInfo::CODE_TARGET, ne, a2, Operand(a0));
+
+  ParameterCount expected(0);
+  __ InvokeCode(a3, expected, expected, JUMP_FUNCTION);
 }
 
 
 void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  const int kIndexOffset    = -5 * kPointerSize;
+  const int kLimitOffset    = -4 * kPointerSize;
+  const int kArgsOffset     =  2 * kPointerSize;
+  const int kRecvOffset     =  3 * kPointerSize;
+  const int kFunctionOffset =  4 * kPointerSize;
+
+  __ EnterInternalFrame();
+
+  __ lw(a0, MemOperand(fp, kFunctionOffset));  // Get the function.
+  __ push(a0);
+  __ lw(a0, MemOperand(fp, kArgsOffset));  // Get the args array.
+  __ push(a0);
+  // Returns (in v0) number of arguments to copy to stack as Smi.
+  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+  // Check the stack for overflow. We are not trying need to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  Label okay;
+  __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+  // Make a2 the space we have left. The stack might already be overflowed
+  // here which will cause a2 to become negative.
+  __ subu(a2, sp, a2);
+  // Check if the arguments will overflow the stack.
+  __ sll(t0, v0, kPointerSizeLog2 - kSmiTagSize);
+  __ Branch(&okay, gt, a2, Operand(t0));  // Signed comparison.
+
+  // Out of stack space.
+  __ lw(a1, MemOperand(fp, kFunctionOffset));
+  __ push(a1);
+  __ push(v0);
+  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+  // End of stack check.
+
+  // Push current limit and index.
+  __ bind(&okay);
+  __ push(v0);  // Limit.
+  __ mov(a1, zero_reg);  // Initial index.
+  __ push(a1);
+
+  // Change context eagerly to get the right global object if necessary.
+  __ lw(a0, MemOperand(fp, kFunctionOffset));
+  __ lw(cp, FieldMemOperand(a0, JSFunction::kContextOffset));
+  // Load the shared function info while the function is still in a0.
+  __ lw(a1, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
+
+  // Compute the receiver.
+  Label call_to_object, use_global_receiver, push_receiver;
+  __ lw(a0, MemOperand(fp, kRecvOffset));
+
+  // Do not transform the receiver for strict mode functions.
+  __ lw(a2, FieldMemOperand(a1, SharedFunctionInfo::kCompilerHintsOffset));
+  __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+                               kSmiTagSize)));
+  __ Branch(&push_receiver, ne, t0, Operand(zero_reg));
+
+  // Do not transform the receiver for native (Compilerhints already in a2).
+  __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kES5Native +
+                               kSmiTagSize)));
+  __ Branch(&push_receiver, ne, t0, Operand(zero_reg));
+
+  // Compute the receiver in non-strict mode.
+  __ And(t0, a0, Operand(kSmiTagMask));
+  __ Branch(&call_to_object, eq, t0, Operand(zero_reg));
+  __ LoadRoot(a1, Heap::kNullValueRootIndex);
+  __ Branch(&use_global_receiver, eq, a0, Operand(a1));
+  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+  __ Branch(&use_global_receiver, eq, a0, Operand(a2));
+
+  // Check if the receiver is already a JavaScript object.
+  // a0: receiver
+  STATIC_ASSERT(LAST_JS_OBJECT_TYPE + 1 == LAST_TYPE);
+  STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+  __ GetObjectType(a0, a1, a1);
+  __ Branch(&push_receiver, ge, a1, Operand(FIRST_JS_OBJECT_TYPE));
+
+  // Convert the receiver to a regular object.
+  // a0: receiver
+  __ bind(&call_to_object);
+  __ push(a0);
+  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+  __ mov(a0, v0);  // Put object in a0 to match other paths to push_receiver.
+  __ Branch(&push_receiver);
+
+  // Use the current global receiver object as the receiver.
+  __ bind(&use_global_receiver);
+  const int kGlobalOffset =
+      Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+  __ lw(a0, FieldMemOperand(cp, kGlobalOffset));
+  __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
+  __ lw(a0, FieldMemOperand(a0, kGlobalOffset));
+  __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
+
+  // Push the receiver.
+  // a0: receiver
+  __ bind(&push_receiver);
+  __ push(a0);
+
+  // Copy all arguments from the array to the stack.
+  Label entry, loop;
+  __ lw(a0, MemOperand(fp, kIndexOffset));
+  __ Branch(&entry);
+
+  // Load the current argument from the arguments array and push it to the
+  // stack.
+  // a0: current argument index
+  __ bind(&loop);
+  __ lw(a1, MemOperand(fp, kArgsOffset));
+  __ push(a1);
+  __ push(a0);
+
+  // Call the runtime to access the property in the arguments array.
+  __ CallRuntime(Runtime::kGetProperty, 2);
+  __ push(v0);
+
+  // Use inline caching to access the arguments.
+  __ lw(a0, MemOperand(fp, kIndexOffset));
+  __ Addu(a0, a0, Operand(1 << kSmiTagSize));
+  __ sw(a0, MemOperand(fp, kIndexOffset));
+
+  // Test if the copy loop has finished copying all the elements from the
+  // arguments object.
+  __ bind(&entry);
+  __ lw(a1, MemOperand(fp, kLimitOffset));
+  __ Branch(&loop, ne, a0, Operand(a1));
+  // Invoke the function.
+  ParameterCount actual(a0);
+  __ sra(a0, a0, kSmiTagSize);
+  __ lw(a1, MemOperand(fp, kFunctionOffset));
+  __ InvokeFunction(a1, actual, CALL_FUNCTION);
+
+  // Tear down the internal frame and remove function, receiver and args.
+  __ LeaveInternalFrame();
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+}
+
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+  __ sll(a0, a0, kSmiTagSize);
+  __ li(t0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ MultiPush(a0.bit() | a1.bit() | t0.bit() | fp.bit() | ra.bit());
+  __ Addu(fp, sp, Operand(3 * kPointerSize));
+}
+
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- v0 : result being passed through
+  // -----------------------------------
+  // Get the number of arguments passed (as a smi), tear down the frame and
+  // then tear down the parameters.
+  __ lw(a1, MemOperand(fp, -3 * kPointerSize));
+  __ mov(sp, fp);
+  __ MultiPop(fp.bit() | ra.bit());
+  __ sll(t0, a1, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(sp, sp, t0);
+  // Adjust for the receiver.
+  __ Addu(sp, sp, Operand(kPointerSize));
 }
 
 
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // State setup as expected by MacroAssembler::InvokePrologue.
+  // ----------- S t a t e -------------
+  //  -- a0: actual arguments count
+  //  -- a1: function (passed through to callee)
+  //  -- a2: expected arguments count
+  //  -- a3: callee code entry
+  //  -- t1: call kind information
+  // -----------------------------------
+
+  Label invoke, dont_adapt_arguments;
+
+  Label enough, too_few;
+  __ Branch(&dont_adapt_arguments, eq,
+      a2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+  // We use Uless as the number of argument should always be greater than 0.
+  __ Branch(&too_few, Uless, a0, Operand(a2));
+
+  {  // Enough parameters: actual >= expected.
+    // a0: actual number of arguments as a smi
+    // a1: function
+    // a2: expected number of arguments
+    // a3: code entry to call
+    __ bind(&enough);
+    EnterArgumentsAdaptorFrame(masm);
+
+    // Calculate copy start address into a0 and copy end address into a2.
+    __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize);
+    __ Addu(a0, fp, a0);
+    // Adjust for return address and receiver.
+    __ Addu(a0, a0, Operand(2 * kPointerSize));
+    // Compute copy end address.
+    __ sll(a2, a2, kPointerSizeLog2);
+    __ subu(a2, a0, a2);
+
+    // Copy the arguments (including the receiver) to the new stack frame.
+    // a0: copy start address
+    // a1: function
+    // a2: copy end address
+    // a3: code entry to call
+
+    Label copy;
+    __ bind(&copy);
+    __ lw(t0, MemOperand(a0));
+    __ push(t0);
+    __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(a2));
+    __ addiu(a0, a0, -kPointerSize);  // In delay slot.
+
+    __ jmp(&invoke);
+  }
+
+  {  // Too few parameters: Actual < expected.
+    __ bind(&too_few);
+    EnterArgumentsAdaptorFrame(masm);
+
+    // TODO(MIPS): Optimize these loops.
+
+    // Calculate copy start address into a0 and copy end address is fp.
+    // a0: actual number of arguments as a smi
+    // a1: function
+    // a2: expected number of arguments
+    // a3: code entry to call
+    __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize);
+    __ Addu(a0, fp, a0);
+    // Adjust for return address and receiver.
+    __ Addu(a0, a0, Operand(2 * kPointerSize));
+    // Compute copy end address. Also adjust for return address.
+    __ Addu(t1, fp, kPointerSize);
+
+    // Copy the arguments (including the receiver) to the new stack frame.
+    // a0: copy start address
+    // a1: function
+    // a2: expected number of arguments
+    // a3: code entry to call
+    // t1: copy end address
+    Label copy;
+    __ bind(&copy);
+    __ lw(t0, MemOperand(a0));  // Adjusted above for return addr and receiver.
+    __ push(t0);
+    __ Subu(a0, a0, kPointerSize);
+    __ Branch(&copy, ne, a0, Operand(t1));
+
+    // Fill the remaining expected arguments with undefined.
+    // a1: function
+    // a2: expected number of arguments
+    // a3: code entry to call
+    __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+    __ sll(t2, a2, kPointerSizeLog2);
+    __ Subu(a2, fp, Operand(t2));
+    __ Addu(a2, a2, Operand(-4 * kPointerSize));  // Adjust for frame.
+
+    Label fill;
+    __ bind(&fill);
+    __ push(t0);
+    __ Branch(&fill, ne, sp, Operand(a2));
+  }
+
+  // Call the entry point.
+  __ bind(&invoke);
+
+  __ Call(a3);
+
+  // Exit frame and return.
+  LeaveArgumentsAdaptorFrame(masm);
+  __ Ret();
+
+
+  // -------------------------------------------
+  // Don't adapt arguments.
+  // -------------------------------------------
+  __ bind(&dont_adapt_arguments);
+  __ Jump(a3);
 }
 
 
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 6cc272c..c999994 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -31,7 +31,7 @@
 
 #include "bootstrapper.h"
 #include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "regexp-macro-assembler.h"
 
 namespace v8 {
@@ -40,24 +40,233 @@
 
 #define __ ACCESS_MASM(masm)
 
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+                                          Label* slow,
+                                          Condition cc,
+                                          bool never_nan_nan);
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+                                    Register lhs,
+                                    Register rhs,
+                                    Label* rhs_not_nan,
+                                    Label* slow,
+                                    bool strict);
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+                                           Register lhs,
+                                           Register rhs);
+
+
+// Check if the operand is a heap number.
+static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
+                                   Register scratch1, Register scratch2,
+                                   Label* not_a_heap_number) {
+  __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
+  __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
+  __ Branch(not_a_heap_number, ne, scratch1, Operand(scratch2));
+}
+
 
 void ToNumberStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // The ToNumber stub takes one argument in a0.
+  Label check_heap_number, call_builtin;
+  __ JumpIfNotSmi(a0, &check_heap_number);
+  __ mov(v0, a0);
+  __ Ret();
+
+  __ bind(&check_heap_number);
+  EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
+  __ mov(v0, a0);
+  __ Ret();
+
+  __ bind(&call_builtin);
+  __ push(a0);
+  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
 }
 
 
 void FastNewClosureStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Create a new closure from the given function info in new
+  // space. Set the context to the current context in cp.
+  Label gc;
+
+  // Pop the function info from the stack.
+  __ pop(a3);
+
+  // Attempt to allocate new JSFunction in new space.
+  __ AllocateInNewSpace(JSFunction::kSize,
+                        v0,
+                        a1,
+                        a2,
+                        &gc,
+                        TAG_OBJECT);
+
+  int map_index = strict_mode_ == kStrictMode
+      ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
+      : Context::FUNCTION_MAP_INDEX;
+
+  // Compute the function map in the current global context and set that
+  // as the map of the allocated object.
+  __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
+  __ lw(a2, MemOperand(a2, Context::SlotOffset(map_index)));
+  __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+
+  // Initialize the rest of the function. We don't have to update the
+  // write barrier because the allocated object is in new space.
+  __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
+  __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
+  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+  __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+  __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset));
+  __ sw(a2, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
+  __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
+  __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset));
+  __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset));
+  __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
+
+  // Initialize the code pointer in the function to be the one
+  // found in the shared function info object.
+  __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
+  __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
+
+  // Return result. The argument function info has been popped already.
+  __ Ret();
+
+  // Create a new closure through the slower runtime call.
+  __ bind(&gc);
+  __ LoadRoot(t0, Heap::kFalseValueRootIndex);
+  __ Push(cp, a3, t0);
+  __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
 }
 
 
 void FastNewContextStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Try to allocate the context in new space.
+  Label gc;
+  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+
+  // Attempt to allocate the context in new space.
+  __ AllocateInNewSpace(FixedArray::SizeFor(length),
+                        v0,
+                        a1,
+                        a2,
+                        &gc,
+                        TAG_OBJECT);
+
+  // Load the function from the stack.
+  __ lw(a3, MemOperand(sp, 0));
+
+  // Setup the object header.
+  __ LoadRoot(a2, Heap::kContextMapRootIndex);
+  __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+  __ li(a2, Operand(Smi::FromInt(length)));
+  __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
+
+  // Setup the fixed slots.
+  __ li(a1, Operand(Smi::FromInt(0)));
+  __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
+  __ sw(v0, MemOperand(v0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+  __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+  __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
+
+  // Copy the global object from the surrounding context.
+  __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
+
+  // Initialize the rest of the slots to undefined.
+  __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+  for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
+    __ sw(a1, MemOperand(v0, Context::SlotOffset(i)));
+  }
+
+  // Remove the on-stack argument and return.
+  __ mov(cp, v0);
+  __ Pop();
+  __ Ret();
+
+  // Need to collect. Call into runtime system.
+  __ bind(&gc);
+  __ TailCallRuntime(Runtime::kNewContext, 1, 1);
 }
 
 
 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Stack layout on entry:
+  // [sp]: constant elements.
+  // [sp + kPointerSize]: literal index.
+  // [sp + (2 * kPointerSize)]: literals array.
+
+  // All sizes here are multiples of kPointerSize.
+  int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
+  int size = JSArray::kSize + elements_size;
+
+  // Load boilerplate object into r3 and check if we need to create a
+  // boilerplate.
+  Label slow_case;
+  __ lw(a3, MemOperand(sp, 2 * kPointerSize));
+  __ lw(a0, MemOperand(sp, 1 * kPointerSize));
+  __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(t0, a3, t0);
+  __ lw(a3, MemOperand(t0));
+  __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
+  __ Branch(&slow_case, eq, a3, Operand(t1));
+
+  if (FLAG_debug_code) {
+    const char* message;
+    Heap::RootListIndex expected_map_index;
+    if (mode_ == CLONE_ELEMENTS) {
+      message = "Expected (writable) fixed array";
+      expected_map_index = Heap::kFixedArrayMapRootIndex;
+    } else {
+      ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+      message = "Expected copy-on-write fixed array";
+      expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
+    }
+    __ push(a3);
+    __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
+    __ lw(a3, FieldMemOperand(a3, HeapObject::kMapOffset));
+    __ LoadRoot(at, expected_map_index);
+    __ Assert(eq, message, a3, Operand(at));
+    __ pop(a3);
+  }
+
+  // Allocate both the JS array and the elements array in one big
+  // allocation. This avoids multiple limit checks.
+  // Return new object in v0.
+  __ AllocateInNewSpace(size,
+                        v0,
+                        a1,
+                        a2,
+                        &slow_case,
+                        TAG_OBJECT);
+
+  // Copy the JS array part.
+  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
+      __ lw(a1, FieldMemOperand(a3, i));
+      __ sw(a1, FieldMemOperand(v0, i));
+    }
+  }
+
+  if (length_ > 0) {
+    // Get hold of the elements array of the boilerplate and setup the
+    // elements pointer in the resulting object.
+    __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
+    __ Addu(a2, v0, Operand(JSArray::kSize));
+    __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
+
+    // Copy the elements array.
+    __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
+  }
+
+  // Return and remove the on-stack parameters.
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  __ bind(&slow_case);
+  __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
 }
 
 
@@ -107,72 +316,94 @@
 
 
 void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+#ifndef BIG_ENDIAN_FLOATING_POINT
+  Register exponent = result1_;
+  Register mantissa = result2_;
+#else
+  Register exponent = result2_;
+  Register mantissa = result1_;
+#endif
+  Label not_special;
+  // Convert from Smi to integer.
+  __ sra(source_, source_, kSmiTagSize);
+  // Move sign bit from source to destination.  This works because the sign bit
+  // in the exponent word of the double has the same position and polarity as
+  // the 2's complement sign bit in a Smi.
+  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
+  __ And(exponent, source_, Operand(HeapNumber::kSignMask));
+  // Subtract from 0 if source was negative.
+  __ subu(at, zero_reg, source_);
+  __ movn(source_, at, exponent);
+
+  // We have -1, 0 or 1, which we treat specially. Register source_ contains
+  // absolute value: it is either equal to 1 (special case of -1 and 1),
+  // greater than 1 (not a special case) or less than 1 (special case of 0).
+  __ Branch(&not_special, gt, source_, Operand(1));
+
+  // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
+  static const uint32_t exponent_word_for_1 =
+      HeapNumber::kExponentBias << HeapNumber::kExponentShift;
+  // Safe to use 'at' as dest reg here.
+  __ Or(at, exponent, Operand(exponent_word_for_1));
+  __ movn(exponent, at, source_);  // Write exp when source not 0.
+  // 1, 0 and -1 all have 0 for the second word.
+  __ mov(mantissa, zero_reg);
+  __ Ret();
+
+  __ bind(&not_special);
+  // Count leading zeros.
+  // Gets the wrong answer for 0, but we already checked for that case above.
+  __ clz(zeros_, source_);
+  // Compute exponent and or it into the exponent register.
+  // We use mantissa as a scratch register here.
+  __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
+  __ subu(mantissa, mantissa, zeros_);
+  __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
+  __ Or(exponent, exponent, mantissa);
+
+  // Shift up the source chopping the top bit off.
+  __ Addu(zeros_, zeros_, Operand(1));
+  // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
+  __ sllv(source_, source_, zeros_);
+  // Compute lower part of fraction (last 12 bits).
+  __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
+  // And the top (top 20 bits).
+  __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
+  __ or_(exponent, exponent, source_);
+
+  __ Ret();
 }
 
 
-class FloatingPointHelper : public AllStatic {
- public:
-
-  enum Destination {
-    kFPURegisters,
-    kCoreRegisters
-  };
-
-
-  // Loads smis from a0 and a1 (right and left in binary operations) into
-  // floating point registers. Depending on the destination the values ends up
-  // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
-  // is floating point registers FPU must be supported. If core registers are
-  // requested when FPU is supported f12 and f14 will be scratched.
-  static void LoadSmis(MacroAssembler* masm,
-                       Destination destination,
-                       Register scratch1,
-                       Register scratch2);
-
-  // Loads objects from a0 and a1 (right and left in binary operations) into
-  // floating point registers. Depending on the destination the values ends up
-  // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
-  // is floating point registers FPU must be supported. If core registers are
-  // requested when FPU is supported f12 and f14 will still be scratched. If
-  // either a0 or a1 is not a number (not smi and not heap number object) the
-  // not_number label is jumped to with a0 and a1 intact.
-  static void LoadOperands(MacroAssembler* masm,
-                           FloatingPointHelper::Destination destination,
-                           Register heap_number_map,
-                           Register scratch1,
-                           Register scratch2,
-                           Label* not_number);
-  // Loads the number from object into dst as a 32-bit integer if possible. If
-  // the object is not a 32-bit integer control continues at the label
-  // not_int32. If FPU is supported double_scratch is used but not scratch2.
-  static void LoadNumberAsInteger(MacroAssembler* masm,
-                                  Register object,
-                                  Register dst,
-                                  Register heap_number_map,
-                                  Register scratch1,
-                                  Register scratch2,
-                                  FPURegister double_scratch,
-                                  Label* not_int32);
- private:
-  static void LoadNumber(MacroAssembler* masm,
-                         FloatingPointHelper::Destination destination,
-                         Register object,
-                         FPURegister dst,
-                         Register dst1,
-                         Register dst2,
-                         Register heap_number_map,
-                         Register scratch1,
-                         Register scratch2,
-                         Label* not_number);
-};
-
-
 void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
                                    FloatingPointHelper::Destination destination,
                                    Register scratch1,
                                    Register scratch2) {
-  UNIMPLEMENTED_MIPS();
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    __ sra(scratch1, a0, kSmiTagSize);
+    __ mtc1(scratch1, f14);
+    __ cvt_d_w(f14, f14);
+    __ sra(scratch1, a1, kSmiTagSize);
+    __ mtc1(scratch1, f12);
+    __ cvt_d_w(f12, f12);
+    if (destination == kCoreRegisters) {
+      __ Move(a2, a3, f14);
+      __ Move(a0, a1, f12);
+    }
+  } else {
+    ASSERT(destination == kCoreRegisters);
+    // Write Smi from a0 to a3 and a2 in double format.
+    __ mov(scratch1, a0);
+    ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
+    __ push(ra);
+    __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+    // Write Smi from a1 to a1 and a0 in double format.
+    __ mov(scratch1, a1);
+    ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
+    __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+    __ pop(ra);
+  }
 }
 
 
@@ -183,7 +414,14 @@
     Register scratch1,
     Register scratch2,
     Label* slow) {
-  UNIMPLEMENTED_MIPS();
+
+  // Load right operand (a0) to f12 or a2/a3.
+  LoadNumber(masm, destination,
+             a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow);
+
+  // Load left operand (a1) to f14 or a0/a1.
+  LoadNumber(masm, destination,
+             a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow);
 }
 
 
@@ -197,30 +435,991 @@
                                      Register scratch1,
                                      Register scratch2,
                                      Label* not_number) {
-  UNIMPLEMENTED_MIPS();
+  if (FLAG_debug_code) {
+    __ AbortIfNotRootValue(heap_number_map,
+                           Heap::kHeapNumberMapRootIndex,
+                           "HeapNumberMap register clobbered.");
+  }
+
+  Label is_smi, done;
+
+  __ JumpIfSmi(object, &is_smi);
+  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
+
+  // Handle loading a double from a heap number.
+  if (CpuFeatures::IsSupported(FPU) &&
+      destination == kFPURegisters) {
+    CpuFeatures::Scope scope(FPU);
+    // Load the double from tagged HeapNumber to double register.
+
+    // ARM uses a workaround here because of the unaligned HeapNumber
+    // kValueOffset. On MIPS this workaround is built into ldc1 so there's no
+    // point in generating even more instructions.
+    __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
+  } else {
+    ASSERT(destination == kCoreRegisters);
+    // Load the double from heap number to dst1 and dst2 in double format.
+    __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset));
+    __ lw(dst2, FieldMemOperand(object,
+        HeapNumber::kValueOffset + kPointerSize));
+  }
+  __ Branch(&done);
+
+  // Handle loading a double from a smi.
+  __ bind(&is_smi);
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    // Convert smi to double using FPU instructions.
+    __ SmiUntag(scratch1, object);
+    __ mtc1(scratch1, dst);
+    __ cvt_d_w(dst, dst);
+    if (destination == kCoreRegisters) {
+      // Load the converted smi to dst1 and dst2 in double format.
+      __ Move(dst1, dst2, dst);
+    }
+  } else {
+    ASSERT(destination == kCoreRegisters);
+    // Write smi to dst1 and dst2 double format.
+    __ mov(scratch1, object);
+    ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
+    __ push(ra);
+    __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+    __ pop(ra);
+  }
+
+  __ bind(&done);
 }
 
 
-void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm,
-                                              Register object,
-                                              Register dst,
-                                              Register heap_number_map,
-                                              Register scratch1,
-                                              Register scratch2,
-                                              FPURegister double_scratch,
-                                              Label* not_int32) {
-  UNIMPLEMENTED_MIPS();
+void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
+                                               Register object,
+                                               Register dst,
+                                               Register heap_number_map,
+                                               Register scratch1,
+                                               Register scratch2,
+                                               Register scratch3,
+                                               FPURegister double_scratch,
+                                               Label* not_number) {
+  if (FLAG_debug_code) {
+    __ AbortIfNotRootValue(heap_number_map,
+                           Heap::kHeapNumberMapRootIndex,
+                           "HeapNumberMap register clobbered.");
+  }
+  Label is_smi;
+  Label done;
+  Label not_in_int32_range;
+
+  __ JumpIfSmi(object, &is_smi);
+  __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
+  __ Branch(not_number, ne, scratch1, Operand(heap_number_map));
+  __ ConvertToInt32(object,
+                    dst,
+                    scratch1,
+                    scratch2,
+                    double_scratch,
+                    &not_in_int32_range);
+  __ jmp(&done);
+
+  __ bind(&not_in_int32_range);
+  __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
+  __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+
+  __ EmitOutOfInt32RangeTruncate(dst,
+                                 scratch1,
+                                 scratch2,
+                                 scratch3);
+
+  __ jmp(&done);
+
+  __ bind(&is_smi);
+  __ SmiUntag(dst, object);
+  __ bind(&done);
+}
+
+
+void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
+                                             Register int_scratch,
+                                             Destination destination,
+                                             FPURegister double_dst,
+                                             Register dst1,
+                                             Register dst2,
+                                             Register scratch2,
+                                             FPURegister single_scratch) {
+  ASSERT(!int_scratch.is(scratch2));
+  ASSERT(!int_scratch.is(dst1));
+  ASSERT(!int_scratch.is(dst2));
+
+  Label done;
+
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    __ mtc1(int_scratch, single_scratch);
+    __ cvt_d_w(double_dst, single_scratch);
+    if (destination == kCoreRegisters) {
+      __ Move(dst1, dst2, double_dst);
+    }
+  } else {
+    Label fewer_than_20_useful_bits;
+    // Expected output:
+    // |         dst2            |         dst1            |
+    // | s |   exp   |              mantissa               |
+
+    // Check for zero.
+    __ mov(dst2, int_scratch);
+    __ mov(dst1, int_scratch);
+    __ Branch(&done, eq, int_scratch, Operand(zero_reg));
+
+    // Preload the sign of the value.
+    __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask));
+    // Get the absolute value of the object (as an unsigned integer).
+    Label skip_sub;
+    __ Branch(&skip_sub, ge, dst2, Operand(zero_reg));
+    __ Subu(int_scratch, zero_reg, int_scratch);
+    __ bind(&skip_sub);
+
+    // Get mantisssa[51:20].
+
+    // Get the position of the first set bit.
+    __ clz(dst1, int_scratch);
+    __ li(scratch2, 31);
+    __ Subu(dst1, scratch2, dst1);
+
+    // Set the exponent.
+    __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias));
+    __ Ins(dst2, scratch2,
+        HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+
+    // Clear the first non null bit.
+    __ li(scratch2, Operand(1));
+    __ sllv(scratch2, scratch2, dst1);
+    __ li(at, -1);
+    __ Xor(scratch2, scratch2, at);
+    __ And(int_scratch, int_scratch, scratch2);
+
+    // Get the number of bits to set in the lower part of the mantissa.
+    __ Subu(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
+    __ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg));
+    // Set the higher 20 bits of the mantissa.
+    __ srlv(at, int_scratch, scratch2);
+    __ or_(dst2, dst2, at);
+    __ li(at, 32);
+    __ subu(scratch2, at, scratch2);
+    __ sllv(dst1, int_scratch, scratch2);
+    __ Branch(&done);
+
+    __ bind(&fewer_than_20_useful_bits);
+    __ li(at, HeapNumber::kMantissaBitsInTopWord);
+    __ subu(scratch2, at, dst1);
+    __ sllv(scratch2, int_scratch, scratch2);
+    __ Or(dst2, dst2, scratch2);
+    // Set dst1 to 0.
+    __ mov(dst1, zero_reg);
+  }
+  __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
+                                                  Register object,
+                                                  Destination destination,
+                                                  FPURegister double_dst,
+                                                  Register dst1,
+                                                  Register dst2,
+                                                  Register heap_number_map,
+                                                  Register scratch1,
+                                                  Register scratch2,
+                                                  FPURegister single_scratch,
+                                                  Label* not_int32) {
+  ASSERT(!scratch1.is(object) && !scratch2.is(object));
+  ASSERT(!scratch1.is(scratch2));
+  ASSERT(!heap_number_map.is(object) &&
+         !heap_number_map.is(scratch1) &&
+         !heap_number_map.is(scratch2));
+
+  Label done, obj_is_not_smi;
+
+  __ JumpIfNotSmi(object, &obj_is_not_smi);
+  __ SmiUntag(scratch1, object);
+  ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
+                     scratch2, single_scratch);
+  __ Branch(&done);
+
+  __ bind(&obj_is_not_smi);
+  if (FLAG_debug_code) {
+    __ AbortIfNotRootValue(heap_number_map,
+                           Heap::kHeapNumberMapRootIndex,
+                           "HeapNumberMap register clobbered.");
+  }
+  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+
+  // Load the number.
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    // Load the double value.
+    __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+    // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
+    // On MIPS a lot of things cannot be implemented the same way so right
+    // now it makes a lot more sense to just do things manually.
+
+    // Save FCSR.
+    __ cfc1(scratch1, FCSR);
+    // Disable FPU exceptions.
+    __ ctc1(zero_reg, FCSR);
+    __ trunc_w_d(single_scratch, double_dst);
+    // Retrieve FCSR.
+    __ cfc1(scratch2, FCSR);
+    // Restore FCSR.
+    __ ctc1(scratch1, FCSR);
+
+    // Check for inexact conversion.
+    __ srl(scratch2, scratch2, kFCSRFlagShift);
+    __ And(scratch2, scratch2, (kFCSRFlagMask | kFCSRInexactFlagBit));
+
+    // Jump to not_int32 if the operation did not succeed.
+    __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
+
+    if (destination == kCoreRegisters) {
+      __ Move(dst1, dst2, double_dst);
+    }
+
+  } else {
+    ASSERT(!scratch1.is(object) && !scratch2.is(object));
+    // Load the double value in the destination registers.
+    __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
+    __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+
+    // Check for 0 and -0.
+    __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask));
+    __ Or(scratch1, scratch1, Operand(dst2));
+    __ Branch(&done, eq, scratch1, Operand(zero_reg));
+
+    // Check that the value can be exactly represented by a 32-bit integer.
+    // Jump to not_int32 if that's not the case.
+    DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
+
+    // dst1 and dst2 were trashed. Reload the double value.
+    __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
+    __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+  }
+
+  __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
+                                            Register object,
+                                            Register dst,
+                                            Register heap_number_map,
+                                            Register scratch1,
+                                            Register scratch2,
+                                            Register scratch3,
+                                            FPURegister double_scratch,
+                                            Label* not_int32) {
+  ASSERT(!dst.is(object));
+  ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
+  ASSERT(!scratch1.is(scratch2) &&
+         !scratch1.is(scratch3) &&
+         !scratch2.is(scratch3));
+
+  Label done;
+
+  // Untag the object into the destination register.
+  __ SmiUntag(dst, object);
+  // Just return if the object is a smi.
+  __ JumpIfSmi(object, &done);
+
+  if (FLAG_debug_code) {
+    __ AbortIfNotRootValue(heap_number_map,
+                           Heap::kHeapNumberMapRootIndex,
+                           "HeapNumberMap register clobbered.");
+  }
+  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+
+  // Object is a heap number.
+  // Convert the floating point value to a 32-bit integer.
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    // Load the double value.
+    __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+    // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
+    // On MIPS a lot of things cannot be implemented the same way so right
+    // now it makes a lot more sense to just do things manually.
+
+    // Save FCSR.
+    __ cfc1(scratch1, FCSR);
+    // Disable FPU exceptions.
+    __ ctc1(zero_reg, FCSR);
+    __ trunc_w_d(double_scratch, double_scratch);
+    // Retrieve FCSR.
+    __ cfc1(scratch2, FCSR);
+    // Restore FCSR.
+    __ ctc1(scratch1, FCSR);
+
+    // Check for inexact conversion.
+    __ srl(scratch2, scratch2, kFCSRFlagShift);
+    __ And(scratch2, scratch2, (kFCSRFlagMask | kFCSRInexactFlagBit));
+
+    // Jump to not_int32 if the operation did not succeed.
+    __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
+    // Get the result in the destination register.
+    __ mfc1(dst, double_scratch);
+
+  } else {
+    // Load the double value in the destination registers.
+    __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
+    __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+
+    // Check for 0 and -0.
+    __ And(dst, scratch1, Operand(~HeapNumber::kSignMask));
+    __ Or(dst, scratch2, Operand(dst));
+    __ Branch(&done, eq, dst, Operand(zero_reg));
+
+    DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
+
+    // Registers state after DoubleIs32BitInteger.
+    // dst: mantissa[51:20].
+    // scratch2: 1
+
+    // Shift back the higher bits of the mantissa.
+    __ srlv(dst, dst, scratch3);
+    // Set the implicit first bit.
+    __ li(at, 32);
+    __ subu(scratch3, at, scratch3);
+    __ sllv(scratch2, scratch2, scratch3);
+    __ Or(dst, dst, scratch2);
+    // Set the sign.
+    __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
+    __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+    Label skip_sub;
+    __ Branch(&skip_sub, ge, scratch1, Operand(zero_reg));
+    __ Subu(dst, zero_reg, dst);
+    __ bind(&skip_sub);
+  }
+
+  __ bind(&done);
+}
+
+
+void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
+                                               Register src1,
+                                               Register src2,
+                                               Register dst,
+                                               Register scratch,
+                                               Label* not_int32) {
+  // Get exponent alone in scratch.
+  __ Ext(scratch,
+         src1,
+         HeapNumber::kExponentShift,
+         HeapNumber::kExponentBits);
+
+  // Substract the bias from the exponent.
+  __ Subu(scratch, scratch, Operand(HeapNumber::kExponentBias));
+
+  // src1: higher (exponent) part of the double value.
+  // src2: lower (mantissa) part of the double value.
+  // scratch: unbiased exponent.
+
+  // Fast cases. Check for obvious non 32-bit integer values.
+  // Negative exponent cannot yield 32-bit integers.
+  __ Branch(not_int32, lt, scratch, Operand(zero_reg));
+  // Exponent greater than 31 cannot yield 32-bit integers.
+  // Also, a positive value with an exponent equal to 31 is outside of the
+  // signed 32-bit integer range.
+  // Another way to put it is that if (exponent - signbit) > 30 then the
+  // number cannot be represented as an int32.
+  Register tmp = dst;
+  __ srl(at, src1, 31);
+  __ subu(tmp, scratch, at);
+  __ Branch(not_int32, gt, tmp, Operand(30));
+  // - Bits [21:0] in the mantissa are not null.
+  __ And(tmp, src2, 0x3fffff);
+  __ Branch(not_int32, ne, tmp, Operand(zero_reg));
+
+  // Otherwise the exponent needs to be big enough to shift left all the
+  // non zero bits left. So we need the (30 - exponent) last bits of the
+  // 31 higher bits of the mantissa to be null.
+  // Because bits [21:0] are null, we can check instead that the
+  // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
+
+  // Get the 32 higher bits of the mantissa in dst.
+  __ Ext(dst,
+         src2,
+         HeapNumber::kMantissaBitsInTopWord,
+         32 - HeapNumber::kMantissaBitsInTopWord);
+  __ sll(at, src1, HeapNumber::kNonMantissaBitsInTopWord);
+  __ or_(dst, dst, at);
+
+  // Create the mask and test the lower bits (of the higher bits).
+  __ li(at, 32);
+  __ subu(scratch, at, scratch);
+  __ li(src2, 1);
+  __ sllv(src1, src2, scratch);
+  __ Subu(src1, src1, Operand(1));
+  __ And(src1, dst, src1);
+  __ Branch(not_int32, ne, src1, Operand(zero_reg));
+}
+
+
+void FloatingPointHelper::CallCCodeForDoubleOperation(
+    MacroAssembler* masm,
+    Token::Value op,
+    Register heap_number_result,
+    Register scratch) {
+  // Using core registers:
+  // a0: Left value (least significant part of mantissa).
+  // a1: Left value (sign, exponent, top of mantissa).
+  // a2: Right value (least significant part of mantissa).
+  // a3: Right value (sign, exponent, top of mantissa).
+
+  // Assert that heap_number_result is saved.
+  // We currently always use s0 to pass it.
+  ASSERT(heap_number_result.is(s0));
+
+  // Push the current return address before the C call.
+  __ push(ra);
+  __ PrepareCallCFunction(4, scratch);  // Two doubles are 4 arguments.
+  if (!IsMipsSoftFloatABI) {
+    CpuFeatures::Scope scope(FPU);
+    // We are not using MIPS FPU instructions, and parameters for the runtime
+    // function call are prepaired in a0-a3 registers, but function we are
+    // calling is compiled with hard-float flag and expecting hard float ABI
+    // (parameters in f12/f14 registers). We need to copy parameters from
+    // a0-a3 registers to f12/f14 register pairs.
+    __ Move(f12, a0, a1);
+    __ Move(f14, a2, a3);
+  }
+  // Call C routine that may not cause GC or other trouble.
+  __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
+                   4);
+  // Store answer in the overwritable heap number.
+  if (!IsMipsSoftFloatABI) {
+    CpuFeatures::Scope scope(FPU);
+    // Double returned in register f0.
+    __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
+  } else {
+    // Double returned in registers v0 and v1.
+    __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
+    __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
+  }
+  // Place heap_number_result in v0 and return to the pushed return address.
+  __ mov(v0, heap_number_result);
+  __ pop(ra);
+  __ Ret();
 }
 
 
 // See comment for class, this does NOT work for int32's that are in Smi range.
 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Label max_negative_int;
+  // the_int_ has the answer which is a signed int32 but not a Smi.
+  // We test for the special value that has a different exponent.
+  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
+  // Test sign, and save for later conditionals.
+  __ And(sign_, the_int_, Operand(0x80000000u));
+  __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
+
+  // Set up the correct exponent in scratch_.  All non-Smi int32s have the same.
+  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
+  uint32_t non_smi_exponent =
+      (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+  __ li(scratch_, Operand(non_smi_exponent));
+  // Set the sign bit in scratch_ if the value was negative.
+  __ or_(scratch_, scratch_, sign_);
+  // Subtract from 0 if the value was negative.
+  __ subu(at, zero_reg, the_int_);
+  __ movn(the_int_, at, sign_);
+  // We should be masking the implict first digit of the mantissa away here,
+  // but it just ends up combining harmlessly with the last digit of the
+  // exponent that happens to be 1.  The sign bit is 0 so we shift 10 to get
+  // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
+  ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
+  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+  __ srl(at, the_int_, shift_distance);
+  __ or_(scratch_, scratch_, at);
+  __ sw(scratch_, FieldMemOperand(the_heap_number_,
+                                   HeapNumber::kExponentOffset));
+  __ sll(scratch_, the_int_, 32 - shift_distance);
+  __ sw(scratch_, FieldMemOperand(the_heap_number_,
+                                   HeapNumber::kMantissaOffset));
+  __ Ret();
+
+  __ bind(&max_negative_int);
+  // The max negative int32 is stored as a positive number in the mantissa of
+  // a double because it uses a sign bit instead of using two's complement.
+  // The actual mantissa bits stored are all 0 because the implicit most
+  // significant 1 bit is not stored.
+  non_smi_exponent += 1 << HeapNumber::kExponentShift;
+  __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
+  __ sw(scratch_,
+        FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
+  __ mov(scratch_, zero_reg);
+  __ sw(scratch_,
+        FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
+  __ Ret();
+}
+
+
+// Handle the case where the lhs and rhs are the same object.
+// Equality is almost reflexive (everything but NaN), so this is a test
+// for "identity and not NaN".
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+                                          Label* slow,
+                                          Condition cc,
+                                          bool never_nan_nan) {
+  Label not_identical;
+  Label heap_number, return_equal;
+  Register exp_mask_reg = t5;
+
+  __ Branch(&not_identical, ne, a0, Operand(a1));
+
+  // The two objects are identical. If we know that one of them isn't NaN then
+  // we now know they test equal.
+  if (cc != eq || !never_nan_nan) {
+    __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
+
+    // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
+    // so we do the second best thing - test it ourselves.
+    // They are both equal and they are not both Smis so both of them are not
+    // Smis. If it's not a heap number, then return equal.
+    if (cc == less || cc == greater) {
+      __ GetObjectType(a0, t4, t4);
+      __ Branch(slow, greater, t4, Operand(FIRST_JS_OBJECT_TYPE));
+    } else {
+      __ GetObjectType(a0, t4, t4);
+      __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
+      // Comparing JS objects with <=, >= is complicated.
+      if (cc != eq) {
+      __ Branch(slow, greater, t4, Operand(FIRST_JS_OBJECT_TYPE));
+        // Normally here we fall through to return_equal, but undefined is
+        // special: (undefined == undefined) == true, but
+        // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
+        if (cc == less_equal || cc == greater_equal) {
+          __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
+          __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
+          __ Branch(&return_equal, ne, a0, Operand(t2));
+          if (cc == le) {
+            // undefined <= undefined should fail.
+            __ li(v0, Operand(GREATER));
+          } else  {
+            // undefined >= undefined should fail.
+            __ li(v0, Operand(LESS));
+          }
+          __ Ret();
+        }
+      }
+    }
+  }
+
+  __ bind(&return_equal);
+  if (cc == less) {
+    __ li(v0, Operand(GREATER));  // Things aren't less than themselves.
+  } else if (cc == greater) {
+    __ li(v0, Operand(LESS));     // Things aren't greater than themselves.
+  } else {
+    __ mov(v0, zero_reg);         // Things are <=, >=, ==, === themselves.
+  }
+  __ Ret();
+
+  if (cc != eq || !never_nan_nan) {
+    // For less and greater we don't have to check for NaN since the result of
+    // x < x is false regardless.  For the others here is some code to check
+    // for NaN.
+    if (cc != lt && cc != gt) {
+      __ bind(&heap_number);
+      // It is a heap number, so return non-equal if it's NaN and equal if it's
+      // not NaN.
+
+      // The representation of NaN values has all exponent bits (52..62) set,
+      // and not all mantissa bits (0..51) clear.
+      // Read top bits of double representation (second word of value).
+      __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+      // Test that exponent bits are all set.
+      __ And(t3, t2, Operand(exp_mask_reg));
+      // If all bits not set (ne cond), then not a NaN, objects are equal.
+      __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
+
+      // Shift out flag and all exponent bits, retaining only mantissa.
+      __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
+      // Or with all low-bits of mantissa.
+      __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
+      __ Or(v0, t3, Operand(t2));
+      // For equal we already have the right value in v0:  Return zero (equal)
+      // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+      // not (it's a NaN).  For <= and >= we need to load v0 with the failing
+      // value if it's a NaN.
+      if (cc != eq) {
+        // All-zero means Infinity means equal.
+        __ Ret(eq, v0, Operand(zero_reg));
+        if (cc == le) {
+          __ li(v0, Operand(GREATER));  // NaN <= NaN should fail.
+        } else {
+          __ li(v0, Operand(LESS));     // NaN >= NaN should fail.
+        }
+      }
+      __ Ret();
+    }
+    // No fall through here.
+  }
+
+  __ bind(&not_identical);
+}
+
+
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+                                    Register lhs,
+                                    Register rhs,
+                                    Label* both_loaded_as_doubles,
+                                    Label* slow,
+                                    bool strict) {
+  ASSERT((lhs.is(a0) && rhs.is(a1)) ||
+         (lhs.is(a1) && rhs.is(a0)));
+
+  Label lhs_is_smi;
+  __ And(t0, lhs, Operand(kSmiTagMask));
+  __ Branch(&lhs_is_smi, eq, t0, Operand(zero_reg));
+  // Rhs is a Smi.
+  // Check whether the non-smi is a heap number.
+  __ GetObjectType(lhs, t4, t4);
+  if (strict) {
+    // If lhs was not a number and rhs was a Smi then strict equality cannot
+    // succeed. Return non-equal (lhs is already not zero).
+    __ mov(v0, lhs);
+    __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
+  } else {
+    // Smi compared non-strictly with a non-Smi non-heap-number. Call
+    // the runtime.
+    __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
+  }
+
+  // Rhs is a smi, lhs is a number.
+  // Convert smi rhs to double.
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    __ sra(at, rhs, kSmiTagSize);
+    __ mtc1(at, f14);
+    __ cvt_d_w(f14, f14);
+    __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+  } else {
+    // Load lhs to a double in a2, a3.
+    __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
+    __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+
+    // Write Smi from rhs to a1 and a0 in double format. t5 is scratch.
+    __ mov(t6, rhs);
+    ConvertToDoubleStub stub1(a1, a0, t6, t5);
+    __ push(ra);
+    __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+
+    __ pop(ra);
+  }
+
+  // We now have both loaded as doubles.
+  __ jmp(both_loaded_as_doubles);
+
+  __ bind(&lhs_is_smi);
+  // Lhs is a Smi.  Check whether the non-smi is a heap number.
+  __ GetObjectType(rhs, t4, t4);
+  if (strict) {
+    // If lhs was not a number and rhs was a Smi then strict equality cannot
+    // succeed. Return non-equal.
+    __ li(v0, Operand(1));
+    __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
+  } else {
+    // Smi compared non-strictly with a non-Smi non-heap-number. Call
+    // the runtime.
+    __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
+  }
+
+  // Lhs is a smi, rhs is a number.
+  // Convert smi lhs to double.
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    __ sra(at, lhs, kSmiTagSize);
+    __ mtc1(at, f12);
+    __ cvt_d_w(f12, f12);
+    __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+  } else {
+    // Convert lhs to a double format. t5 is scratch.
+    __ mov(t6, lhs);
+    ConvertToDoubleStub stub2(a3, a2, t6, t5);
+    __ push(ra);
+    __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+    __ pop(ra);
+    // Load rhs to a double in a1, a0.
+    if (rhs.is(a0)) {
+      __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
+      __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+    } else {
+      __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+      __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
+    }
+  }
+  // Fall through to both_loaded_as_doubles.
 }
 
 
 void EmitNanCheck(MacroAssembler* masm, Condition cc) {
-  UNIMPLEMENTED_MIPS();
+  bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    // Lhs and rhs are already loaded to f12 and f14 register pairs.
+    __ Move(t0, t1, f14);
+    __ Move(t2, t3, f12);
+  } else {
+    // Lhs and rhs are already loaded to GP registers.
+    __ mov(t0, a0);  // a0 has LS 32 bits of rhs.
+    __ mov(t1, a1);  // a1 has MS 32 bits of rhs.
+    __ mov(t2, a2);  // a2 has LS 32 bits of lhs.
+    __ mov(t3, a3);  // a3 has MS 32 bits of lhs.
+  }
+  Register rhs_exponent = exp_first ? t0 : t1;
+  Register lhs_exponent = exp_first ? t2 : t3;
+  Register rhs_mantissa = exp_first ? t1 : t0;
+  Register lhs_mantissa = exp_first ? t3 : t2;
+  Label one_is_nan, neither_is_nan;
+  Label lhs_not_nan_exp_mask_is_loaded;
+
+  Register exp_mask_reg = t4;
+  __ li(exp_mask_reg, HeapNumber::kExponentMask);
+  __ and_(t5, lhs_exponent, exp_mask_reg);
+  __ Branch(&lhs_not_nan_exp_mask_is_loaded, ne, t5, Operand(exp_mask_reg));
+
+  __ sll(t5, lhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
+  __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
+
+  __ Branch(&one_is_nan, ne, lhs_mantissa, Operand(zero_reg));
+
+  __ li(exp_mask_reg, HeapNumber::kExponentMask);
+  __ bind(&lhs_not_nan_exp_mask_is_loaded);
+  __ and_(t5, rhs_exponent, exp_mask_reg);
+
+  __ Branch(&neither_is_nan, ne, t5, Operand(exp_mask_reg));
+
+  __ sll(t5, rhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
+  __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
+
+  __ Branch(&neither_is_nan, eq, rhs_mantissa, Operand(zero_reg));
+
+  __ bind(&one_is_nan);
+  // NaN comparisons always fail.
+  // Load whatever we need in v0 to make the comparison fail.
+  if (cc == lt || cc == le) {
+    __ li(v0, Operand(GREATER));
+  } else {
+    __ li(v0, Operand(LESS));
+  }
+  __ Ret();  // Return.
+
+  __ bind(&neither_is_nan);
+}
+
+
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
+  // f12 and f14 have the two doubles.  Neither is a NaN.
+  // Call a native function to do a comparison between two non-NaNs.
+  // Call C routine that may not cause GC or other trouble.
+  // We use a call_was and return manually because we need arguments slots to
+  // be freed.
+
+  Label return_result_not_equal, return_result_equal;
+  if (cc == eq) {
+    // Doubles are not equal unless they have the same bit pattern.
+    // Exception: 0 and -0.
+    bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
+    if (CpuFeatures::IsSupported(FPU)) {
+      CpuFeatures::Scope scope(FPU);
+      // Lhs and rhs are already loaded to f12 and f14 register pairs.
+      __ Move(t0, t1, f14);
+      __ Move(t2, t3, f12);
+    } else {
+      // Lhs and rhs are already loaded to GP registers.
+      __ mov(t0, a0);  // a0 has LS 32 bits of rhs.
+      __ mov(t1, a1);  // a1 has MS 32 bits of rhs.
+      __ mov(t2, a2);  // a2 has LS 32 bits of lhs.
+      __ mov(t3, a3);  // a3 has MS 32 bits of lhs.
+    }
+    Register rhs_exponent = exp_first ? t0 : t1;
+    Register lhs_exponent = exp_first ? t2 : t3;
+    Register rhs_mantissa = exp_first ? t1 : t0;
+    Register lhs_mantissa = exp_first ? t3 : t2;
+
+    __ xor_(v0, rhs_mantissa, lhs_mantissa);
+    __ Branch(&return_result_not_equal, ne, v0, Operand(zero_reg));
+
+    __ subu(v0, rhs_exponent, lhs_exponent);
+    __ Branch(&return_result_equal, eq, v0, Operand(zero_reg));
+    // 0, -0 case.
+    __ sll(rhs_exponent, rhs_exponent, kSmiTagSize);
+    __ sll(lhs_exponent, lhs_exponent, kSmiTagSize);
+    __ or_(t4, rhs_exponent, lhs_exponent);
+    __ or_(t4, t4, rhs_mantissa);
+
+    __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
+
+    __ bind(&return_result_equal);
+    __ li(v0, Operand(EQUAL));
+    __ Ret();
+  }
+
+  __ bind(&return_result_not_equal);
+
+  if (!CpuFeatures::IsSupported(FPU)) {
+    __ push(ra);
+    __ PrepareCallCFunction(4, t4);  // Two doubles count as 4 arguments.
+    if (!IsMipsSoftFloatABI) {
+      // We are not using MIPS FPU instructions, and parameters for the runtime
+      // function call are prepaired in a0-a3 registers, but function we are
+      // calling is compiled with hard-float flag and expecting hard float ABI
+      // (parameters in f12/f14 registers). We need to copy parameters from
+      // a0-a3 registers to f12/f14 register pairs.
+      __ Move(f12, a0, a1);
+      __ Move(f14, a2, a3);
+    }
+    __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4);
+    __ pop(ra);  // Because this function returns int, result is in v0.
+    __ Ret();
+  } else {
+    CpuFeatures::Scope scope(FPU);
+    Label equal, less_than;
+    __ c(EQ, D, f12, f14);
+    __ bc1t(&equal);
+    __ nop();
+
+    __ c(OLT, D, f12, f14);
+    __ bc1t(&less_than);
+    __ nop();
+
+    // Not equal, not less, not NaN, must be greater.
+    __ li(v0, Operand(GREATER));
+    __ Ret();
+
+    __ bind(&equal);
+    __ li(v0, Operand(EQUAL));
+    __ Ret();
+
+    __ bind(&less_than);
+    __ li(v0, Operand(LESS));
+    __ Ret();
+  }
+}
+
+
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+                                           Register lhs,
+                                           Register rhs) {
+    // If either operand is a JSObject or an oddball value, then they are
+    // not equal since their pointers are different.
+    // There is no test for undetectability in strict equality.
+    STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+    Label first_non_object;
+    // Get the type of the first operand into a2 and compare it with
+    // FIRST_JS_OBJECT_TYPE.
+    __ GetObjectType(lhs, a2, a2);
+    __ Branch(&first_non_object, less, a2, Operand(FIRST_JS_OBJECT_TYPE));
+
+    // Return non-zero.
+    Label return_not_equal;
+    __ bind(&return_not_equal);
+    __ li(v0, Operand(1));
+    __ Ret();
+
+    __ bind(&first_non_object);
+    // Check for oddballs: true, false, null, undefined.
+    __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
+
+    __ GetObjectType(rhs, a3, a3);
+    __ Branch(&return_not_equal, greater, a3, Operand(FIRST_JS_OBJECT_TYPE));
+
+    // Check for oddballs: true, false, null, undefined.
+    __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
+
+    // Now that we have the types we might as well check for symbol-symbol.
+    // Ensure that no non-strings have the symbol bit set.
+    STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
+    STATIC_ASSERT(kSymbolTag != 0);
+    __ And(t2, a2, Operand(a3));
+    __ And(t0, t2, Operand(kIsSymbolMask));
+    __ Branch(&return_not_equal, ne, t0, Operand(zero_reg));
+}
+
+
+static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
+                                       Register lhs,
+                                       Register rhs,
+                                       Label* both_loaded_as_doubles,
+                                       Label* not_heap_numbers,
+                                       Label* slow) {
+  __ GetObjectType(lhs, a3, a2);
+  __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
+  __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
+  // If first was a heap number & second wasn't, go to slow case.
+  __ Branch(slow, ne, a3, Operand(a2));
+
+  // Both are heap numbers. Load them up then jump to the code we have
+  // for that.
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+    __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+  } else {
+    __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+    __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
+    if (rhs.is(a0)) {
+      __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
+      __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+    } else {
+      __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+      __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
+    }
+  }
+  __ jmp(both_loaded_as_doubles);
+}
+
+
+// Fast negative check for symbol-to-symbol equality.
+static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
+                                         Register lhs,
+                                         Register rhs,
+                                         Label* possible_strings,
+                                         Label* not_both_strings) {
+  ASSERT((lhs.is(a0) && rhs.is(a1)) ||
+         (lhs.is(a1) && rhs.is(a0)));
+
+  // a2 is object type of lhs.
+  // Ensure that no non-strings have the symbol bit set.
+  Label object_test;
+  STATIC_ASSERT(kSymbolTag != 0);
+  __ And(at, a2, Operand(kIsNotStringMask));
+  __ Branch(&object_test, ne, at, Operand(zero_reg));
+  __ And(at, a2, Operand(kIsSymbolMask));
+  __ Branch(possible_strings, eq, at, Operand(zero_reg));
+  __ GetObjectType(rhs, a3, a3);
+  __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
+  __ And(at, a3, Operand(kIsSymbolMask));
+  __ Branch(possible_strings, eq, at, Operand(zero_reg));
+
+  // Both are symbols. We already checked they weren't the same pointer
+  // so they are not equal.
+  __ li(v0, Operand(1));   // Non-zero indicates not equal.
+  __ Ret();
+
+  __ bind(&object_test);
+  __ Branch(not_both_strings, lt, a2, Operand(FIRST_JS_OBJECT_TYPE));
+  __ GetObjectType(rhs, a2, a3);
+  __ Branch(not_both_strings, lt, a3, Operand(FIRST_JS_OBJECT_TYPE));
+
+  // If both objects are undetectable, they are equal.  Otherwise, they
+  // are not equal, since they are different objects and an object is not
+  // equal to undefined.
+  __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
+  __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
+  __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
+  __ and_(a0, a2, a3);
+  __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
+  __ Xor(v0, a0, Operand(1 << Map::kIsUndetectable));
+  __ Ret();
 }
 
 
@@ -232,12 +1431,109 @@
                                                          Register scratch3,
                                                          bool object_is_smi,
                                                          Label* not_found) {
-  UNIMPLEMENTED_MIPS();
+  // Use of registers. Register result is used as a temporary.
+  Register number_string_cache = result;
+  Register mask = scratch3;
+
+  // Load the number string cache.
+  __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+  // Make the hash mask from the length of the number string cache. It
+  // contains two elements (number and string) for each cache entry.
+  __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
+  // Divide length by two (length is a smi).
+  __ sra(mask, mask, kSmiTagSize + 1);
+  __ Addu(mask, mask, -1);  // Make mask.
+
+  // Calculate the entry in the number string cache. The hash value in the
+  // number string cache for smis is just the smi value, and the hash for
+  // doubles is the xor of the upper and lower words. See
+  // Heap::GetNumberStringCache.
+  Isolate* isolate = masm->isolate();
+  Label is_smi;
+  Label load_result_from_cache;
+  if (!object_is_smi) {
+    __ JumpIfSmi(object, &is_smi);
+    if (CpuFeatures::IsSupported(FPU)) {
+      CpuFeatures::Scope scope(FPU);
+      __ CheckMap(object,
+                  scratch1,
+                  Heap::kHeapNumberMapRootIndex,
+                  not_found,
+                  DONT_DO_SMI_CHECK);
+
+      STATIC_ASSERT(8 == kDoubleSize);
+      __ Addu(scratch1,
+              object,
+              Operand(HeapNumber::kValueOffset - kHeapObjectTag));
+      __ lw(scratch2, MemOperand(scratch1, kPointerSize));
+      __ lw(scratch1, MemOperand(scratch1, 0));
+      __ Xor(scratch1, scratch1, Operand(scratch2));
+      __ And(scratch1, scratch1, Operand(mask));
+
+      // Calculate address of entry in string cache: each entry consists
+      // of two pointer sized fields.
+      __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
+      __ Addu(scratch1, number_string_cache, scratch1);
+
+      Register probe = mask;
+      __ lw(probe,
+             FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+      __ JumpIfSmi(probe, not_found);
+      __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
+      __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
+      __ c(EQ, D, f12, f14);
+      __ bc1t(&load_result_from_cache);
+      __ nop();   // bc1t() requires explicit fill of branch delay slot.
+      __ Branch(not_found);
+    } else {
+      // Note that there is no cache check for non-FPU case, even though
+      // it seems there could be. May be a tiny opimization for non-FPU
+      // cores.
+      __ Branch(not_found);
+    }
+  }
+
+  __ bind(&is_smi);
+  Register scratch = scratch1;
+  __ sra(scratch, object, 1);   // Shift away the tag.
+  __ And(scratch, mask, Operand(scratch));
+
+  // Calculate address of entry in string cache: each entry consists
+  // of two pointer sized fields.
+  __ sll(scratch, scratch, kPointerSizeLog2 + 1);
+  __ Addu(scratch, number_string_cache, scratch);
+
+  // Check if the entry is the smi we are looking for.
+  Register probe = mask;
+  __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+  __ Branch(not_found, ne, object, Operand(probe));
+
+  // Get the result from the cache.
+  __ bind(&load_result_from_cache);
+  __ lw(result,
+         FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+
+  __ IncrementCounter(isolate->counters()->number_to_string_native(),
+                      1,
+                      scratch1,
+                      scratch2);
 }
 
 
 void NumberToStringStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Label runtime;
+
+  __ lw(a1, MemOperand(sp, 0));
+
+  // Generate code to lookup number in the number string cache.
+  GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime);
+  __ Addu(sp, sp, Operand(1 * kPointerSize));
+  __ Ret();
+
+  __ bind(&runtime);
+  // Handle number to string in the runtime system if not found in the cache.
+  __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
 }
 
 
@@ -245,105 +1541,1018 @@
 // On exit, v0 is 0, positive, or negative (smi) to indicate the result
 // of the comparison.
 void CompareStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Label slow;  // Call builtin.
+  Label not_smis, both_loaded_as_doubles;
+
+
+  if (include_smi_compare_) {
+    Label not_two_smis, smi_done;
+    __ Or(a2, a1, a0);
+    __ JumpIfNotSmi(a2, &not_two_smis);
+    __ sra(a1, a1, 1);
+    __ sra(a0, a0, 1);
+    __ Subu(v0, a1, a0);
+    __ Ret();
+    __ bind(&not_two_smis);
+  } else if (FLAG_debug_code) {
+    __ Or(a2, a1, a0);
+    __ And(a2, a2, kSmiTagMask);
+    __ Assert(ne, "CompareStub: unexpected smi operands.",
+        a2, Operand(zero_reg));
+  }
+
+
+  // NOTICE! This code is only reached after a smi-fast-case check, so
+  // it is certain that at least one operand isn't a smi.
+
+  // Handle the case where the objects are identical.  Either returns the answer
+  // or goes to slow.  Only falls through if the objects were not identical.
+  EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
+
+  // If either is a Smi (we know that not both are), then they can only
+  // be strictly equal if the other is a HeapNumber.
+  STATIC_ASSERT(kSmiTag == 0);
+  ASSERT_EQ(0, Smi::FromInt(0));
+  __ And(t2, lhs_, Operand(rhs_));
+  __ JumpIfNotSmi(t2, &not_smis, t0);
+  // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
+  // 1) Return the answer.
+  // 2) Go to slow.
+  // 3) Fall through to both_loaded_as_doubles.
+  // 4) Jump to rhs_not_nan.
+  // In cases 3 and 4 we have found out we were dealing with a number-number
+  // comparison and the numbers have been loaded into f12 and f14 as doubles,
+  // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
+  EmitSmiNonsmiComparison(masm, lhs_, rhs_,
+                          &both_loaded_as_doubles, &slow, strict_);
+
+  __ bind(&both_loaded_as_doubles);
+  // f12, f14 are the double representations of the left hand side
+  // and the right hand side if we have FPU. Otherwise a2, a3 represent
+  // left hand side and a0, a1 represent right hand side.
+
+  Isolate* isolate = masm->isolate();
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    Label nan;
+    __ li(t0, Operand(LESS));
+    __ li(t1, Operand(GREATER));
+    __ li(t2, Operand(EQUAL));
+
+    // Check if either rhs or lhs is NaN.
+    __ c(UN, D, f12, f14);
+    __ bc1t(&nan);
+    __ nop();
+
+    // Check if LESS condition is satisfied. If true, move conditionally
+    // result to v0.
+    __ c(OLT, D, f12, f14);
+    __ movt(v0, t0);
+    // Use previous check to store conditionally to v0 oposite condition
+    // (GREATER). If rhs is equal to lhs, this will be corrected in next
+    // check.
+    __ movf(v0, t1);
+    // Check if EQUAL condition is satisfied. If true, move conditionally
+    // result to v0.
+    __ c(EQ, D, f12, f14);
+    __ movt(v0, t2);
+
+    __ Ret();
+
+    __ bind(&nan);
+    // NaN comparisons always fail.
+    // Load whatever we need in v0 to make the comparison fail.
+    if (cc_ == lt || cc_ == le) {
+      __ li(v0, Operand(GREATER));
+    } else {
+      __ li(v0, Operand(LESS));
+    }
+    __ Ret();
+  } else {
+    // Checks for NaN in the doubles we have loaded.  Can return the answer or
+    // fall through if neither is a NaN.  Also binds rhs_not_nan.
+    EmitNanCheck(masm, cc_);
+
+    // Compares two doubles that are not NaNs. Returns the answer.
+    // Never falls through.
+    EmitTwoNonNanDoubleComparison(masm, cc_);
+  }
+
+  __ bind(&not_smis);
+  // At this point we know we are dealing with two different objects,
+  // and neither of them is a Smi. The objects are in lhs_ and rhs_.
+  if (strict_) {
+    // This returns non-equal for some object types, or falls through if it
+    // was not lucky.
+    EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
+  }
+
+  Label check_for_symbols;
+  Label flat_string_check;
+  // Check for heap-number-heap-number comparison. Can jump to slow case,
+  // or load both doubles and jump to the code that handles
+  // that case. If the inputs are not doubles then jumps to check_for_symbols.
+  // In this case a2 will contain the type of lhs_.
+  EmitCheckForTwoHeapNumbers(masm,
+                             lhs_,
+                             rhs_,
+                             &both_loaded_as_doubles,
+                             &check_for_symbols,
+                             &flat_string_check);
+
+  __ bind(&check_for_symbols);
+  if (cc_ == eq && !strict_) {
+    // Returns an answer for two symbols or two detectable objects.
+    // Otherwise jumps to string case or not both strings case.
+    // Assumes that a2 is the type of lhs_ on entry.
+    EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
+  }
+
+  // Check for both being sequential ASCII strings, and inline if that is the
+  // case.
+  __ bind(&flat_string_check);
+
+  __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow);
+
+  __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
+  if (cc_ == eq) {
+    StringCompareStub::GenerateFlatAsciiStringEquals(masm,
+                                                     lhs_,
+                                                     rhs_,
+                                                     a2,
+                                                     a3,
+                                                     t0);
+  } else {
+    StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+                                                       lhs_,
+                                                       rhs_,
+                                                       a2,
+                                                       a3,
+                                                       t0,
+                                                       t1);
+  }
+  // Never falls through to here.
+
+  __ bind(&slow);
+  // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
+  // a1 (rhs) second.
+  __ Push(lhs_, rhs_);
+  // Figure out which native to call and setup the arguments.
+  Builtins::JavaScript native;
+  if (cc_ == eq) {
+    native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+  } else {
+    native = Builtins::COMPARE;
+    int ncr;  // NaN compare result.
+    if (cc_ == lt || cc_ == le) {
+      ncr = GREATER;
+    } else {
+      ASSERT(cc_ == gt || cc_ == ge);  // Remaining cases.
+      ncr = LESS;
+    }
+    __ li(a0, Operand(Smi::FromInt(ncr)));
+    __ push(a0);
+  }
+
+  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+  // tagged as a small integer.
+  __ InvokeBuiltin(native, JUMP_FUNCTION);
 }
 
 
 // This stub does not handle the inlined cases (Smis, Booleans, undefined).
 // The stub returns zero for false, and a non-zero value for true.
 void ToBooleanStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // This stub uses FPU instructions.
+  CpuFeatures::Scope scope(FPU);
+
+  Label false_result;
+  Label not_heap_number;
+  Register scratch0 = t5.is(tos_) ? t3 : t5;
+
+  // undefined -> false
+  __ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex);
+  __ Branch(&false_result, eq, tos_, Operand(scratch0));
+
+  // Boolean -> its value
+  __ LoadRoot(scratch0, Heap::kFalseValueRootIndex);
+  __ Branch(&false_result, eq, tos_, Operand(scratch0));
+  __ LoadRoot(scratch0, Heap::kTrueValueRootIndex);
+  // "tos_" is a register and contains a non-zero value.  Hence we implicitly
+  // return true if the equal condition is satisfied.
+  __ Ret(eq, tos_, Operand(scratch0));
+
+  // Smis: 0 -> false, all other -> true
+  __ And(scratch0, tos_, tos_);
+  __ Branch(&false_result, eq, scratch0, Operand(zero_reg));
+  __ And(scratch0, tos_, Operand(kSmiTagMask));
+  // "tos_" is a register and contains a non-zero value.  Hence we implicitly
+  // return true if the not equal condition is satisfied.
+  __ Ret(eq, scratch0, Operand(zero_reg));
+
+  // 'null' -> false
+  __ LoadRoot(scratch0, Heap::kNullValueRootIndex);
+  __ Branch(&false_result, eq, tos_, Operand(scratch0));
+
+  // HeapNumber => false if +0, -0, or NaN.
+  __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+  __ Branch(&not_heap_number, ne, scratch0, Operand(at));
+
+  __ ldc1(f12, FieldMemOperand(tos_, HeapNumber::kValueOffset));
+  __ fcmp(f12, 0.0, UEQ);
+
+  // "tos_" is a register, and contains a non zero value by default.
+  // Hence we only need to overwrite "tos_" with zero to return false for
+  // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
+  __ movt(tos_, zero_reg);
+  __ Ret();
+
+  __ bind(&not_heap_number);
+
+  // It can be an undetectable object.
+  // Undetectable => false.
+  __ lw(at, FieldMemOperand(tos_, HeapObject::kMapOffset));
+  __ lbu(scratch0, FieldMemOperand(at, Map::kBitFieldOffset));
+  __ And(scratch0, scratch0, Operand(1 << Map::kIsUndetectable));
+  __ Branch(&false_result, eq, scratch0, Operand(1 << Map::kIsUndetectable));
+
+  // JavaScript object => true.
+  __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+  __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
+
+  // "tos_" is a register and contains a non-zero value.
+  // Hence we implicitly return true if the greater than
+  // condition is satisfied.
+  __ Ret(gt, scratch0, Operand(FIRST_JS_OBJECT_TYPE));
+
+  // Check for string.
+  __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+  __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
+  // "tos_" is a register and contains a non-zero value.
+  // Hence we implicitly return true if the greater than
+  // condition is satisfied.
+  __ Ret(gt, scratch0, Operand(FIRST_NONSTRING_TYPE));
+
+  // String value => false iff empty, i.e., length is zero.
+  __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
+  // If length is zero, "tos_" contains zero ==> false.
+  // If length is not zero, "tos_" contains a non-zero value ==> true.
+  __ Ret();
+
+  // Return 0 in "tos_" for false.
+  __ bind(&false_result);
+  __ mov(tos_, zero_reg);
+  __ Ret();
 }
 
 
-// We fall into this code if the operands were Smis, but the result was
-// not (eg. overflow).  We branch into this code (to the not_smi label) if
-// the operands were not both Smi.  The operands are in lhs and rhs.
-// To call the C-implemented binary fp operation routines we need to end up
-// with the double precision floating point operands in a0 and a1 (for the
-// value in a1) and a2 and a3 (for the value in a0).
-void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm,
-                                    Label* not_smi,
-                                    Register lhs,
-                                    Register rhs,
-                                    const Builtins::JavaScript& builtin) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-// For bitwise ops where the inputs are not both Smis we here try to determine
-// whether both inputs are either Smis or at least heap numbers that can be
-// represented by a 32 bit signed value.  We truncate towards zero as required
-// by the ES spec.  If this is the case we do the bitwise op and see if the
-// result is a Smi.  If so, great, otherwise we try to find a heap number to
-// write the answer into (either by allocating or by overwriting).
-// On entry the operands are in lhs (x) and rhs (y). (Result = x op y).
-// On exit the result is in v0.
-void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
-                                                Register lhs,
-                                                Register rhs) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
-  GenericBinaryOpStub stub(key, type_info);
+Handle<Code> GetUnaryOpStub(int key, UnaryOpIC::TypeInfo type_info) {
+  UnaryOpStub stub(key, type_info);
   return stub.GetCode();
 }
 
 
-Handle<Code> GetTypeRecordingBinaryOpStub(int key,
-    TRBinaryOpIC::TypeInfo type_info,
-    TRBinaryOpIC::TypeInfo result_type_info) {
-  TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
+const char* UnaryOpStub::GetName() {
+  if (name_ != NULL) return name_;
+  const int kMaxNameLength = 100;
+  name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+      kMaxNameLength);
+  if (name_ == NULL) return "OOM";
+  const char* op_name = Token::Name(op_);
+  const char* overwrite_name = NULL;  // Make g++ happy.
+  switch (mode_) {
+    case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
+    case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
+  }
+
+  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+               "UnaryOpStub_%s_%s_%s",
+               op_name,
+               overwrite_name,
+               UnaryOpIC::GetName(operand_type_));
+  return name_;
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::Generate(MacroAssembler* masm) {
+  switch (operand_type_) {
+    case UnaryOpIC::UNINITIALIZED:
+      GenerateTypeTransition(masm);
+      break;
+    case UnaryOpIC::SMI:
+      GenerateSmiStub(masm);
+      break;
+    case UnaryOpIC::HEAP_NUMBER:
+      GenerateHeapNumberStub(masm);
+      break;
+    case UnaryOpIC::GENERIC:
+      GenerateGenericStub(masm);
+      break;
+  }
+}
+
+
+void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+  // Argument is in a0 and v0 at this point, so we can overwrite a0.
+  // Push this stub's key. Although the operation and the type info are
+  // encoded into the key, the encoding is opaque, so push them too.
+  __ li(a2, Operand(Smi::FromInt(MinorKey())));
+  __ li(a1, Operand(Smi::FromInt(op_)));
+  __ li(a0, Operand(Smi::FromInt(operand_type_)));
+
+  __ Push(v0, a2, a1, a0);
+
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kUnaryOp_Patch),
+                        masm->isolate()),
+      4,
+      1);
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+  switch (op_) {
+    case Token::SUB:
+      GenerateSmiStubSub(masm);
+      break;
+    case Token::BIT_NOT:
+      GenerateSmiStubBitNot(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeSub(masm, &non_smi, &slow);
+  __ bind(&non_smi);
+  __ bind(&slow);
+  GenerateTypeTransition(masm);
+}
+
+
+void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
+  Label non_smi;
+  GenerateSmiCodeBitNot(masm, &non_smi);
+  __ bind(&non_smi);
+  GenerateTypeTransition(masm);
+}
+
+
+void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
+                                     Label* non_smi,
+                                     Label* slow) {
+  __ JumpIfNotSmi(a0, non_smi);
+
+  // The result of negating zero or the smallest negative smi is not a smi.
+  __ And(t0, a0, ~0x80000000);
+  __ Branch(slow, eq, t0, Operand(zero_reg));
+
+  // Return '0 - value'.
+  __ Subu(v0, zero_reg, a0);
+  __ Ret();
+}
+
+
+void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
+                                        Label* non_smi) {
+  __ JumpIfNotSmi(a0, non_smi);
+
+  // Flip bits and revert inverted smi-tag.
+  __ Neg(v0, a0);
+  __ And(v0, v0, ~kSmiTagMask);
+  __ Ret();
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+  switch (op_) {
+    case Token::SUB:
+      GenerateHeapNumberStubSub(masm);
+      break;
+    case Token::BIT_NOT:
+      GenerateHeapNumberStubBitNot(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
+  Label non_smi, slow, call_builtin;
+  GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeSub(masm, &slow);
+  __ bind(&slow);
+  GenerateTypeTransition(masm);
+  __ bind(&call_builtin);
+  GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeBitNot(masm, &non_smi);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeBitNot(masm, &slow);
+  __ bind(&slow);
+  GenerateTypeTransition(masm);
+}
+
+void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
+                                            Label* slow) {
+  EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
+  // a0 is a heap number.  Get a new heap number in a1.
+  if (mode_ == UNARY_OVERWRITE) {
+    __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+    __ Xor(a2, a2, Operand(HeapNumber::kSignMask));  // Flip sign.
+    __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+  } else {
+    Label slow_allocate_heapnumber, heapnumber_allocated;
+    __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber);
+    __ jmp(&heapnumber_allocated);
+
+    __ bind(&slow_allocate_heapnumber);
+    __ EnterInternalFrame();
+    __ push(a0);
+    __ CallRuntime(Runtime::kNumberAlloc, 0);
+    __ mov(a1, v0);
+    __ pop(a0);
+    __ LeaveInternalFrame();
+
+    __ bind(&heapnumber_allocated);
+    __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
+    __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+    __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset));
+    __ Xor(a2, a2, Operand(HeapNumber::kSignMask));  // Flip sign.
+    __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset));
+    __ mov(v0, a1);
+  }
+  __ Ret();
+}
+
+
+void UnaryOpStub::GenerateHeapNumberCodeBitNot(
+    MacroAssembler* masm,
+    Label* slow) {
+  EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
+  // Convert the heap number in a0 to an untagged integer in a1.
+  __ ConvertToInt32(a0, a1, a2, a3, f0, slow);
+
+  // Do the bitwise operation and check if the result fits in a smi.
+  Label try_float;
+  __ Neg(a1, a1);
+  __ Addu(a2, a1, Operand(0x40000000));
+  __ Branch(&try_float, lt, a2, Operand(zero_reg));
+
+  // Tag the result as a smi and we're done.
+  __ SmiTag(v0, a1);
+  __ Ret();
+
+  // Try to store the result in a heap number.
+  __ bind(&try_float);
+  if (mode_ == UNARY_NO_OVERWRITE) {
+    Label slow_allocate_heapnumber, heapnumber_allocated;
+    __ AllocateHeapNumber(v0, a2, a3, t2, &slow_allocate_heapnumber);
+    __ jmp(&heapnumber_allocated);
+
+    __ bind(&slow_allocate_heapnumber);
+    __ EnterInternalFrame();
+    __ push(a1);
+    __ CallRuntime(Runtime::kNumberAlloc, 0);
+    __ pop(a1);
+    __ LeaveInternalFrame();
+
+    __ bind(&heapnumber_allocated);
+  }
+
+  if (CpuFeatures::IsSupported(FPU)) {
+    // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
+    CpuFeatures::Scope scope(FPU);
+    __ mtc1(a1, f0);
+    __ cvt_d_w(f0, f0);
+    __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
+    __ Ret();
+  } else {
+    // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
+    // have to set up a frame.
+    WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
+    __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+  }
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
+  switch (op_) {
+    case Token::SUB:
+      GenerateGenericStubSub(masm);
+      break;
+    case Token::BIT_NOT:
+      GenerateGenericStubBitNot(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeSub(masm, &non_smi, &slow);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeSub(masm, &slow);
+  __ bind(&slow);
+  GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeBitNot(masm, &non_smi);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeBitNot(masm, &slow);
+  __ bind(&slow);
+  GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateGenericCodeFallback(
+    MacroAssembler* masm) {
+  // Handle the slow case by jumping to the JavaScript builtin.
+  __ push(a0);
+  switch (op_) {
+    case Token::SUB:
+      __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+      break;
+    case Token::BIT_NOT:
+      __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+Handle<Code> GetBinaryOpStub(int key,
+                             BinaryOpIC::TypeInfo type_info,
+                             BinaryOpIC::TypeInfo result_type_info) {
+  BinaryOpStub stub(key, type_info, result_type_info);
   return stub.GetCode();
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+  Label get_result;
+
+  __ Push(a1, a0);
+
+  __ li(a2, Operand(Smi::FromInt(MinorKey())));
+  __ li(a1, Operand(Smi::FromInt(op_)));
+  __ li(a0, Operand(Smi::FromInt(operands_type_)));
+  __ Push(a2, a1, a0);
+
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
+                        masm->isolate()),
+      5,
+      1);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
+void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
     MacroAssembler* masm) {
   UNIMPLEMENTED();
 }
 
 
-void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::Generate(MacroAssembler* masm) {
+  switch (operands_type_) {
+    case BinaryOpIC::UNINITIALIZED:
+      GenerateTypeTransition(masm);
+      break;
+    case BinaryOpIC::SMI:
+      GenerateSmiStub(masm);
+      break;
+    case BinaryOpIC::INT32:
+      GenerateInt32Stub(masm);
+      break;
+    case BinaryOpIC::HEAP_NUMBER:
+      GenerateHeapNumberStub(masm);
+      break;
+    case BinaryOpIC::ODDBALL:
+      GenerateOddballStub(masm);
+      break;
+    case BinaryOpIC::BOTH_STRING:
+      GenerateBothStringStub(masm);
+      break;
+    case BinaryOpIC::STRING:
+      GenerateStringStub(masm);
+      break;
+    case BinaryOpIC::GENERIC:
+      GenerateGeneric(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
 }
 
 
-const char* TypeRecordingBinaryOpStub::GetName() {
-  UNIMPLEMENTED_MIPS();
+const char* BinaryOpStub::GetName() {
+  if (name_ != NULL) return name_;
+  const int kMaxNameLength = 100;
+  name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+      kMaxNameLength);
+  if (name_ == NULL) return "OOM";
+  const char* op_name = Token::Name(op_);
+  const char* overwrite_name;
+  switch (mode_) {
+    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+    default: overwrite_name = "UnknownOverwrite"; break;
+  }
+
+  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+               "BinaryOpStub_%s_%s_%s",
+               op_name,
+               overwrite_name,
+               BinaryOpIC::GetName(operands_type_));
   return name_;
 }
 
 
 
-void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation(
-    MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
+  Register left = a1;
+  Register right = a0;
+
+  Register scratch1 = t0;
+  Register scratch2 = t1;
+
+  ASSERT(right.is(a0));
+  STATIC_ASSERT(kSmiTag == 0);
+
+  Label not_smi_result;
+  switch (op_) {
+    case Token::ADD:
+      __ AdduAndCheckForOverflow(v0, left, right, scratch1);
+      __ RetOnNoOverflow(scratch1);
+      // No need to revert anything - right and left are intact.
+      break;
+    case Token::SUB:
+      __ SubuAndCheckForOverflow(v0, left, right, scratch1);
+      __ RetOnNoOverflow(scratch1);
+      // No need to revert anything - right and left are intact.
+      break;
+    case Token::MUL: {
+      // Remove tag from one of the operands. This way the multiplication result
+      // will be a smi if it fits the smi range.
+      __ SmiUntag(scratch1, right);
+      // Do multiplication.
+      // lo = lower 32 bits of scratch1 * left.
+      // hi = higher 32 bits of scratch1 * left.
+      __ Mult(left, scratch1);
+      // Check for overflowing the smi range - no overflow if higher 33 bits of
+      // the result are identical.
+      __ mflo(scratch1);
+      __ mfhi(scratch2);
+      __ sra(scratch1, scratch1, 31);
+      __ Branch(&not_smi_result, ne, scratch1, Operand(scratch2));
+      // Go slow on zero result to handle -0.
+      __ mflo(v0);
+      __ Ret(ne, v0, Operand(zero_reg));
+      // We need -0 if we were multiplying a negative number with 0 to get 0.
+      // We know one of them was zero.
+      __ Addu(scratch2, right, left);
+      Label skip;
+      // ARM uses the 'pl' condition, which is 'ge'.
+      // Negating it results in 'lt'.
+      __ Branch(&skip, lt, scratch2, Operand(zero_reg));
+      ASSERT(Smi::FromInt(0) == 0);
+      __ mov(v0, zero_reg);
+      __ Ret();  // Return smi 0 if the non-zero one was positive.
+      __ bind(&skip);
+      // We fall through here if we multiplied a negative number with 0, because
+      // that would mean we should produce -0.
+      }
+      break;
+    case Token::DIV: {
+      Label done;
+      __ SmiUntag(scratch2, right);
+      __ SmiUntag(scratch1, left);
+      __ Div(scratch1, scratch2);
+      // A minor optimization: div may be calculated asynchronously, so we check
+      // for division by zero before getting the result.
+      __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
+      // If the result is 0, we need to make sure the dividsor (right) is
+      // positive, otherwise it is a -0 case.
+      // Quotient is in 'lo', remainder is in 'hi'.
+      // Check for no remainder first.
+      __ mfhi(scratch1);
+      __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
+      __ mflo(scratch1);
+      __ Branch(&done, ne, scratch1, Operand(zero_reg));
+      __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
+      __ bind(&done);
+      // Check that the signed result fits in a Smi.
+      __ Addu(scratch2, scratch1, Operand(0x40000000));
+      __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
+      __ SmiTag(v0, scratch1);
+      __ Ret();
+      }
+      break;
+    case Token::MOD: {
+      Label done;
+      __ SmiUntag(scratch2, right);
+      __ SmiUntag(scratch1, left);
+      __ Div(scratch1, scratch2);
+      // A minor optimization: div may be calculated asynchronously, so we check
+      // for division by 0 before calling mfhi.
+      // Check for zero on the right hand side.
+      __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
+      // If the result is 0, we need to make sure the dividend (left) is
+      // positive (or 0), otherwise it is a -0 case.
+      // Remainder is in 'hi'.
+      __ mfhi(scratch2);
+      __ Branch(&done, ne, scratch2, Operand(zero_reg));
+      __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
+      __ bind(&done);
+      // Check that the signed result fits in a Smi.
+      __ Addu(scratch1, scratch2, Operand(0x40000000));
+      __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
+      __ SmiTag(v0, scratch2);
+      __ Ret();
+      }
+      break;
+    case Token::BIT_OR:
+      __ Or(v0, left, Operand(right));
+      __ Ret();
+      break;
+    case Token::BIT_AND:
+      __ And(v0, left, Operand(right));
+      __ Ret();
+      break;
+    case Token::BIT_XOR:
+      __ Xor(v0, left, Operand(right));
+      __ Ret();
+      break;
+    case Token::SAR:
+      // Remove tags from right operand.
+      __ GetLeastBitsFromSmi(scratch1, right, 5);
+      __ srav(scratch1, left, scratch1);
+      // Smi tag result.
+      __ And(v0, scratch1, Operand(~kSmiTagMask));
+      __ Ret();
+      break;
+    case Token::SHR:
+      // Remove tags from operands. We can't do this on a 31 bit number
+      // because then the 0s get shifted into bit 30 instead of bit 31.
+      __ SmiUntag(scratch1, left);
+      __ GetLeastBitsFromSmi(scratch2, right, 5);
+      __ srlv(v0, scratch1, scratch2);
+      // Unsigned shift is not allowed to produce a negative number, so
+      // check the sign bit and the sign bit after Smi tagging.
+      __ And(scratch1, v0, Operand(0xc0000000));
+      __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
+      // Smi tag result.
+      __ SmiTag(v0);
+      __ Ret();
+      break;
+    case Token::SHL:
+      // Remove tags from operands.
+      __ SmiUntag(scratch1, left);
+      __ GetLeastBitsFromSmi(scratch2, right, 5);
+      __ sllv(scratch1, scratch1, scratch2);
+      // Check that the signed result fits in a Smi.
+      __ Addu(scratch2, scratch1, Operand(0x40000000));
+      __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
+      __ SmiTag(v0, scratch1);
+      __ Ret();
+      break;
+    default:
+      UNREACHABLE();
+  }
+  __ bind(&not_smi_result);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
-                                                    bool smi_operands,
-                                                    Label* not_numbers,
-                                                    Label* gc_required) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
+                                       bool smi_operands,
+                                       Label* not_numbers,
+                                       Label* gc_required) {
+  Register left = a1;
+  Register right = a0;
+  Register scratch1 = t3;
+  Register scratch2 = t5;
+  Register scratch3 = t0;
+
+  ASSERT(smi_operands || (not_numbers != NULL));
+  if (smi_operands && FLAG_debug_code) {
+    __ AbortIfNotSmi(left);
+    __ AbortIfNotSmi(right);
+  }
+
+  Register heap_number_map = t2;
+  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+    case Token::MOD: {
+      // Load left and right operands into f12 and f14 or a0/a1 and a2/a3
+      // depending on whether FPU is available or not.
+      FloatingPointHelper::Destination destination =
+          CpuFeatures::IsSupported(FPU) &&
+          op_ != Token::MOD ?
+              FloatingPointHelper::kFPURegisters :
+              FloatingPointHelper::kCoreRegisters;
+
+      // Allocate new heap number for result.
+      Register result = s0;
+      GenerateHeapResultAllocation(
+          masm, result, heap_number_map, scratch1, scratch2, gc_required);
+
+      // Load the operands.
+      if (smi_operands) {
+        FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
+      } else {
+        FloatingPointHelper::LoadOperands(masm,
+                                          destination,
+                                          heap_number_map,
+                                          scratch1,
+                                          scratch2,
+                                          not_numbers);
+      }
+
+      // Calculate the result.
+      if (destination == FloatingPointHelper::kFPURegisters) {
+        // Using FPU registers:
+        // f12: Left value.
+        // f14: Right value.
+        CpuFeatures::Scope scope(FPU);
+        switch (op_) {
+        case Token::ADD:
+          __ add_d(f10, f12, f14);
+          break;
+        case Token::SUB:
+          __ sub_d(f10, f12, f14);
+          break;
+        case Token::MUL:
+          __ mul_d(f10, f12, f14);
+          break;
+        case Token::DIV:
+          __ div_d(f10, f12, f14);
+          break;
+        default:
+          UNREACHABLE();
+        }
+
+        // ARM uses a workaround here because of the unaligned HeapNumber
+        // kValueOffset. On MIPS this workaround is built into sdc1 so
+        // there's no point in generating even more instructions.
+        __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
+        __ mov(v0, result);
+        __ Ret();
+      } else {
+        // Call the C function to handle the double operation.
+        FloatingPointHelper::CallCCodeForDoubleOperation(masm,
+                                                         op_,
+                                                         result,
+                                                         scratch1);
+        if (FLAG_debug_code) {
+          __ stop("Unreachable code.");
+        }
+      }
+      break;
+    }
+    case Token::BIT_OR:
+    case Token::BIT_XOR:
+    case Token::BIT_AND:
+    case Token::SAR:
+    case Token::SHR:
+    case Token::SHL: {
+      if (smi_operands) {
+        __ SmiUntag(a3, left);
+        __ SmiUntag(a2, right);
+      } else {
+        // Convert operands to 32-bit integers. Right in a2 and left in a3.
+        FloatingPointHelper::ConvertNumberToInt32(masm,
+                                                  left,
+                                                  a3,
+                                                  heap_number_map,
+                                                  scratch1,
+                                                  scratch2,
+                                                  scratch3,
+                                                  f0,
+                                                  not_numbers);
+        FloatingPointHelper::ConvertNumberToInt32(masm,
+                                                  right,
+                                                  a2,
+                                                  heap_number_map,
+                                                  scratch1,
+                                                  scratch2,
+                                                  scratch3,
+                                                  f0,
+                                                  not_numbers);
+      }
+      Label result_not_a_smi;
+      switch (op_) {
+        case Token::BIT_OR:
+          __ Or(a2, a3, Operand(a2));
+          break;
+        case Token::BIT_XOR:
+          __ Xor(a2, a3, Operand(a2));
+          break;
+        case Token::BIT_AND:
+          __ And(a2, a3, Operand(a2));
+          break;
+        case Token::SAR:
+          // Use only the 5 least significant bits of the shift count.
+          __ GetLeastBitsFromInt32(a2, a2, 5);
+          __ srav(a2, a3, a2);
+          break;
+        case Token::SHR:
+          // Use only the 5 least significant bits of the shift count.
+          __ GetLeastBitsFromInt32(a2, a2, 5);
+          __ srlv(a2, a3, a2);
+          // SHR is special because it is required to produce a positive answer.
+          // The code below for writing into heap numbers isn't capable of
+          // writing the register as an unsigned int so we go to slow case if we
+          // hit this case.
+          if (CpuFeatures::IsSupported(FPU)) {
+            __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
+          } else {
+            __ Branch(not_numbers, lt, a2, Operand(zero_reg));
+          }
+          break;
+        case Token::SHL:
+          // Use only the 5 least significant bits of the shift count.
+          __ GetLeastBitsFromInt32(a2, a2, 5);
+          __ sllv(a2, a3, a2);
+          break;
+        default:
+          UNREACHABLE();
+      }
+      // Check that the *signed* result fits in a smi.
+      __ Addu(a3, a2, Operand(0x40000000));
+      __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
+      __ SmiTag(v0, a2);
+      __ Ret();
+
+      // Allocate new heap number for result.
+      __ bind(&result_not_a_smi);
+      Register result = t1;
+      if (smi_operands) {
+        __ AllocateHeapNumber(
+            result, scratch1, scratch2, heap_number_map, gc_required);
+      } else {
+        GenerateHeapResultAllocation(
+            masm, result, heap_number_map, scratch1, scratch2, gc_required);
+      }
+
+      // a2: Answer as signed int32.
+      // t1: Heap number to write answer into.
+
+      // Nothing can go wrong now, so move the heap number to v0, which is the
+      // result.
+      __ mov(v0, t1);
+
+      if (CpuFeatures::IsSupported(FPU)) {
+        // Convert the int32 in a2 to the heap number in a0. As
+        // mentioned above SHR needs to always produce a positive result.
+        CpuFeatures::Scope scope(FPU);
+        __ mtc1(a2, f0);
+        if (op_ == Token::SHR) {
+          __ Cvt_d_uw(f0, f0);
+        } else {
+          __ cvt_d_w(f0, f0);
+        }
+        // ARM uses a workaround here because of the unaligned HeapNumber
+        // kValueOffset. On MIPS this workaround is built into sdc1 so
+        // there's no point in generating even more instructions.
+        __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
+        __ Ret();
+      } else {
+        // Tail call that writes the int32 in a2 to the heap number in v0, using
+        // a3 and a0 as scratch. v0 is preserved and returned.
+        WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
+        __ TailCallStub(&stub);
+      }
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
 }
 
 
@@ -351,83 +2560,929 @@
 // generated. If the result is not a smi and heap number allocation is not
 // requested the code falls through. If number allocation is requested but a
 // heap number cannot be allocated the code jumps to the lable gc_required.
-void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
+void BinaryOpStub::GenerateSmiCode(
+    MacroAssembler* masm,
+    Label* use_runtime,
     Label* gc_required,
     SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
-  UNIMPLEMENTED_MIPS();
+  Label not_smis;
+
+  Register left = a1;
+  Register right = a0;
+  Register scratch1 = t3;
+  Register scratch2 = t5;
+
+  // Perform combined smi check on both operands.
+  __ Or(scratch1, left, Operand(right));
+  STATIC_ASSERT(kSmiTag == 0);
+  __ JumpIfNotSmi(scratch1, &not_smis);
+
+  // If the smi-smi operation results in a smi return is generated.
+  GenerateSmiSmiOperation(masm);
+
+  // If heap number results are possible generate the result in an allocated
+  // heap number.
+  if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
+    GenerateFPOperation(masm, true, use_runtime, gc_required);
+  }
+  __ bind(&not_smis);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+  Label not_smis, call_runtime;
+
+  if (result_type_ == BinaryOpIC::UNINITIALIZED ||
+      result_type_ == BinaryOpIC::SMI) {
+    // Only allow smi results.
+    GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
+  } else {
+    // Allow heap number result and don't make a transition if a heap number
+    // cannot be allocated.
+    GenerateSmiCode(masm,
+                    &call_runtime,
+                    &call_runtime,
+                    ALLOW_HEAPNUMBER_RESULTS);
+  }
+
+  // Code falls through if the result is not returned as either a smi or heap
+  // number.
+  GenerateTypeTransition(masm);
+
+  __ bind(&call_runtime);
+  GenerateCallRuntime(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+  ASSERT(operands_type_ == BinaryOpIC::STRING);
+  // Try to add arguments as strings, otherwise, transition to the generic
+  // BinaryOpIC type.
+  GenerateAddStrings(masm);
+  GenerateTypeTransition(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
+  Label call_runtime;
+  ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+  ASSERT(op_ == Token::ADD);
+  // If both arguments are strings, call the string add stub.
+  // Otherwise, do a transition.
+
+  // Registers containing left and right operands respectively.
+  Register left = a1;
+  Register right = a0;
+
+  // Test if left operand is a string.
+  __ JumpIfSmi(left, &call_runtime);
+  __ GetObjectType(left, a2, a2);
+  __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+  // Test if right operand is a string.
+  __ JumpIfSmi(right, &call_runtime);
+  __ GetObjectType(right, a2, a2);
+  __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+  GenerateRegisterArgsPush(masm);
+  __ TailCallStub(&string_add_stub);
+
+  __ bind(&call_runtime);
+  GenerateTypeTransition(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+  ASSERT(operands_type_ == BinaryOpIC::INT32);
+
+  Register left = a1;
+  Register right = a0;
+  Register scratch1 = t3;
+  Register scratch2 = t5;
+  FPURegister double_scratch = f0;
+  FPURegister single_scratch = f6;
+
+  Register heap_number_result = no_reg;
+  Register heap_number_map = t2;
+  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+  Label call_runtime;
+  // Labels for type transition, used for wrong input or output types.
+  // Both label are currently actually bound to the same position. We use two
+  // different label to differentiate the cause leading to type transition.
+  Label transition;
+
+  // Smi-smi fast case.
+  Label skip;
+  __ Or(scratch1, left, right);
+  __ JumpIfNotSmi(scratch1, &skip);
+  GenerateSmiSmiOperation(masm);
+  // Fall through if the result is not a smi.
+  __ bind(&skip);
+
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+    case Token::MOD: {
+    // Load both operands and check that they are 32-bit integer.
+    // Jump to type transition if they are not. The registers a0 and a1 (right
+    // and left) are preserved for the runtime call.
+    FloatingPointHelper::Destination destination =
+        CpuFeatures::IsSupported(FPU) &&
+        op_ != Token::MOD ?
+        FloatingPointHelper::kFPURegisters :
+        FloatingPointHelper::kCoreRegisters;
+
+    FloatingPointHelper::LoadNumberAsInt32Double(masm,
+                                                 right,
+                                                 destination,
+                                                 f14,
+                                                 a2,
+                                                 a3,
+                                                 heap_number_map,
+                                                 scratch1,
+                                                 scratch2,
+                                                 f2,
+                                                 &transition);
+    FloatingPointHelper::LoadNumberAsInt32Double(masm,
+                                                 left,
+                                                 destination,
+                                                 f12,
+                                                 t0,
+                                                 t1,
+                                                 heap_number_map,
+                                                 scratch1,
+                                                 scratch2,
+                                                 f2,
+                                                 &transition);
+
+      if (destination == FloatingPointHelper::kFPURegisters) {
+        CpuFeatures::Scope scope(FPU);
+        Label return_heap_number;
+        switch (op_) {
+          case Token::ADD:
+            __ add_d(f10, f12, f14);
+            break;
+          case Token::SUB:
+            __ sub_d(f10, f12, f14);
+            break;
+          case Token::MUL:
+            __ mul_d(f10, f12, f14);
+            break;
+          case Token::DIV:
+            __ div_d(f10, f12, f14);
+            break;
+          default:
+            UNREACHABLE();
+        }
+
+        if (op_ != Token::DIV) {
+          // These operations produce an integer result.
+          // Try to return a smi if we can.
+          // Otherwise return a heap number if allowed, or jump to type
+          // transition.
+
+          // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
+          // On MIPS a lot of things cannot be implemented the same way so right
+          // now it makes a lot more sense to just do things manually.
+
+          // Save FCSR.
+          __ cfc1(scratch1, FCSR);
+          // Disable FPU exceptions.
+          __ ctc1(zero_reg, FCSR);
+          __ trunc_w_d(single_scratch, f10);
+          // Retrieve FCSR.
+          __ cfc1(scratch2, FCSR);
+          // Restore FCSR.
+          __ ctc1(scratch1, FCSR);
+
+          // Check for inexact conversion.
+          __ srl(scratch2, scratch2, kFCSRFlagShift);
+          __ And(scratch2, scratch2, kFCSRFlagMask);
+
+          if (result_type_ <= BinaryOpIC::INT32) {
+            // If scratch2 != 0, result does not fit in a 32-bit integer.
+            __ Branch(&transition, ne, scratch2, Operand(zero_reg));
+          }
+
+          // Check if the result fits in a smi.
+          __ mfc1(scratch1, single_scratch);
+          __ Addu(scratch2, scratch1, Operand(0x40000000));
+          // If not try to return a heap number.
+          __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
+          // Check for minus zero. Return heap number for minus zero.
+          Label not_zero;
+          __ Branch(&not_zero, ne, scratch1, Operand(zero_reg));
+          __ mfc1(scratch2, f11);
+          __ And(scratch2, scratch2, HeapNumber::kSignMask);
+          __ Branch(&return_heap_number, ne, scratch2, Operand(zero_reg));
+          __ bind(&not_zero);
+
+          // Tag the result and return.
+          __ SmiTag(v0, scratch1);
+          __ Ret();
+        } else {
+          // DIV just falls through to allocating a heap number.
+        }
+
+        if (result_type_ >= (op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
+                                                : BinaryOpIC::INT32) {
+          __ bind(&return_heap_number);
+          // We are using FPU registers so s0 is available.
+          heap_number_result = s0;
+          GenerateHeapResultAllocation(masm,
+                                       heap_number_result,
+                                       heap_number_map,
+                                       scratch1,
+                                       scratch2,
+                                       &call_runtime);
+          __ mov(v0, heap_number_result);
+          __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset));
+          __ Ret();
+        }
+
+        // A DIV operation expecting an integer result falls through
+        // to type transition.
+
+      } else {
+        // We preserved a0 and a1 to be able to call runtime.
+        // Save the left value on the stack.
+        __ Push(t1, t0);
+
+        Label pop_and_call_runtime;
+
+        // Allocate a heap number to store the result.
+        heap_number_result = s0;
+        GenerateHeapResultAllocation(masm,
+                                     heap_number_result,
+                                     heap_number_map,
+                                     scratch1,
+                                     scratch2,
+                                     &pop_and_call_runtime);
+
+        // Load the left value from the value saved on the stack.
+        __ Pop(a1, a0);
+
+        // Call the C function to handle the double operation.
+        FloatingPointHelper::CallCCodeForDoubleOperation(
+            masm, op_, heap_number_result, scratch1);
+        if (FLAG_debug_code) {
+          __ stop("Unreachable code.");
+        }
+
+        __ bind(&pop_and_call_runtime);
+        __ Drop(2);
+        __ Branch(&call_runtime);
+      }
+
+      break;
+    }
+
+    case Token::BIT_OR:
+    case Token::BIT_XOR:
+    case Token::BIT_AND:
+    case Token::SAR:
+    case Token::SHR:
+    case Token::SHL: {
+      Label return_heap_number;
+      Register scratch3 = t1;
+      // Convert operands to 32-bit integers. Right in a2 and left in a3. The
+      // registers a0 and a1 (right and left) are preserved for the runtime
+      // call.
+      FloatingPointHelper::LoadNumberAsInt32(masm,
+                                             left,
+                                             a3,
+                                             heap_number_map,
+                                             scratch1,
+                                             scratch2,
+                                             scratch3,
+                                             f0,
+                                             &transition);
+      FloatingPointHelper::LoadNumberAsInt32(masm,
+                                             right,
+                                             a2,
+                                             heap_number_map,
+                                             scratch1,
+                                             scratch2,
+                                             scratch3,
+                                             f0,
+                                             &transition);
+
+      // The ECMA-262 standard specifies that, for shift operations, only the
+      // 5 least significant bits of the shift value should be used.
+      switch (op_) {
+        case Token::BIT_OR:
+          __ Or(a2, a3, Operand(a2));
+          break;
+        case Token::BIT_XOR:
+          __ Xor(a2, a3, Operand(a2));
+          break;
+        case Token::BIT_AND:
+          __ And(a2, a3, Operand(a2));
+          break;
+        case Token::SAR:
+          __ And(a2, a2, Operand(0x1f));
+          __ srav(a2, a3, a2);
+          break;
+        case Token::SHR:
+          __ And(a2, a2, Operand(0x1f));
+          __ srlv(a2, a3, a2);
+          // SHR is special because it is required to produce a positive answer.
+          // We only get a negative result if the shift value (a2) is 0.
+          // This result cannot be respresented as a signed 32-bit integer, try
+          // to return a heap number if we can.
+          // The non FPU code does not support this special case, so jump to
+          // runtime if we don't support it.
+          if (CpuFeatures::IsSupported(FPU)) {
+            __ Branch((result_type_ <= BinaryOpIC::INT32)
+                        ? &transition
+                        : &return_heap_number,
+                       lt,
+                       a2,
+                       Operand(zero_reg));
+          } else {
+            __ Branch((result_type_ <= BinaryOpIC::INT32)
+                        ? &transition
+                        : &call_runtime,
+                       lt,
+                       a2,
+                       Operand(zero_reg));
+          }
+          break;
+        case Token::SHL:
+          __ And(a2, a2, Operand(0x1f));
+          __ sllv(a2, a3, a2);
+          break;
+        default:
+          UNREACHABLE();
+      }
+
+      // Check if the result fits in a smi.
+      __ Addu(scratch1, a2, Operand(0x40000000));
+      // If not try to return a heap number. (We know the result is an int32.)
+      __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
+      // Tag the result and return.
+      __ SmiTag(v0, a2);
+      __ Ret();
+
+      __ bind(&return_heap_number);
+      heap_number_result = t1;
+      GenerateHeapResultAllocation(masm,
+                                   heap_number_result,
+                                   heap_number_map,
+                                   scratch1,
+                                   scratch2,
+                                   &call_runtime);
+
+      if (CpuFeatures::IsSupported(FPU)) {
+        CpuFeatures::Scope scope(FPU);
+
+        if (op_ != Token::SHR) {
+          // Convert the result to a floating point value.
+          __ mtc1(a2, double_scratch);
+          __ cvt_d_w(double_scratch, double_scratch);
+        } else {
+          // The result must be interpreted as an unsigned 32-bit integer.
+          __ mtc1(a2, double_scratch);
+          __ Cvt_d_uw(double_scratch, double_scratch);
+        }
+
+        // Store the result.
+        __ mov(v0, heap_number_result);
+        __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset));
+        __ Ret();
+      } else {
+        // Tail call that writes the int32 in a2 to the heap number in v0, using
+        // a3 and a1 as scratch. v0 is preserved and returned.
+        __ mov(a0, t1);
+        WriteInt32ToHeapNumberStub stub(a2, v0, a3, a1);
+        __ TailCallStub(&stub);
+      }
+
+      break;
+    }
+
+    default:
+      UNREACHABLE();
+  }
+
+  if (transition.is_linked()) {
+    __ bind(&transition);
+    GenerateTypeTransition(masm);
+  }
+
+  __ bind(&call_runtime);
+  GenerateCallRuntime(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
+  Label call_runtime;
+
+  if (op_ == Token::ADD) {
+    // Handle string addition here, because it is the only operation
+    // that does not do a ToNumber conversion on the operands.
+    GenerateAddStrings(masm);
+  }
+
+  // Convert oddball arguments to numbers.
+  Label check, done;
+  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+  __ Branch(&check, ne, a1, Operand(t0));
+  if (Token::IsBitOp(op_)) {
+    __ li(a1, Operand(Smi::FromInt(0)));
+  } else {
+    __ LoadRoot(a1, Heap::kNanValueRootIndex);
+  }
+  __ jmp(&done);
+  __ bind(&check);
+  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+  __ Branch(&done, ne, a0, Operand(t0));
+  if (Token::IsBitOp(op_)) {
+    __ li(a0, Operand(Smi::FromInt(0)));
+  } else {
+    __ LoadRoot(a0, Heap::kNanValueRootIndex);
+  }
+  __ bind(&done);
+
+  GenerateHeapNumberStub(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+  Label call_runtime;
+  GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
+
+  __ bind(&call_runtime);
+  GenerateCallRuntime(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+  Label call_runtime, call_string_add_or_runtime;
+
+  GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+
+  GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
+
+  __ bind(&call_string_add_or_runtime);
+  if (op_ == Token::ADD) {
+    GenerateAddStrings(masm);
+  }
+
+  __ bind(&call_runtime);
+  GenerateCallRuntime(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
+void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
+  ASSERT(op_ == Token::ADD);
+  Label left_not_string, call_runtime;
+
+  Register left = a1;
+  Register right = a0;
+
+  // Check if left argument is a string.
+  __ JumpIfSmi(left, &left_not_string);
+  __ GetObjectType(left, a2, a2);
+  __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+  StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
+  GenerateRegisterArgsPush(masm);
+  __ TailCallStub(&string_add_left_stub);
+
+  // Left operand is not a string, test right.
+  __ bind(&left_not_string);
+  __ JumpIfSmi(right, &call_runtime);
+  __ GetObjectType(right, a2, a2);
+  __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
+
+  StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
+  GenerateRegisterArgsPush(masm);
+  __ TailCallStub(&string_add_right_stub);
+
+  // At least one argument is not a string.
+  __ bind(&call_runtime);
+}
+
+
+void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
+  GenerateRegisterArgsPush(masm);
+  switch (op_) {
+    case Token::ADD:
+      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+      break;
+    case Token::SUB:
+      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+      break;
+    case Token::MUL:
+      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+      break;
+    case Token::DIV:
+      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+      break;
+    case Token::MOD:
+      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+      break;
+    case Token::BIT_OR:
+      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+      break;
+    case Token::BIT_AND:
+      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+      break;
+    case Token::BIT_XOR:
+      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+      break;
+    case Token::SAR:
+      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+      break;
+    case Token::SHR:
+      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+      break;
+    case Token::SHL:
+      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void BinaryOpStub::GenerateHeapResultAllocation(
     MacroAssembler* masm,
     Register result,
     Register heap_number_map,
     Register scratch1,
     Register scratch2,
     Label* gc_required) {
-  UNIMPLEMENTED_MIPS();
+
+  // Code below will scratch result if allocation fails. To keep both arguments
+  // intact for the runtime call result cannot be one of these.
+  ASSERT(!result.is(a0) && !result.is(a1));
+
+  if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
+    Label skip_allocation, allocated;
+    Register overwritable_operand = mode_ == OVERWRITE_LEFT ? a1 : a0;
+    // If the overwritable operand is already an object, we skip the
+    // allocation of a heap number.
+    __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
+    // Allocate a heap number for the result.
+    __ AllocateHeapNumber(
+        result, scratch1, scratch2, heap_number_map, gc_required);
+    __ Branch(&allocated);
+    __ bind(&skip_allocation);
+    // Use object holding the overwritable operand for result.
+    __ mov(result, overwritable_operand);
+    __ bind(&allocated);
+  } else {
+    ASSERT(mode_ == NO_OVERWRITE);
+    __ AllocateHeapNumber(
+        result, scratch1, scratch2, heap_number_map, gc_required);
+  }
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+  __ Push(a1, a0);
 }
 
 
 
 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Untagged case: double input in f4, double result goes
+  //   into f4.
+  // Tagged case: tagged input on top of stack and in a0,
+  //   tagged result (heap number) goes into v0.
+
+  Label input_not_smi;
+  Label loaded;
+  Label calculate;
+  Label invalid_cache;
+  const Register scratch0 = t5;
+  const Register scratch1 = t3;
+  const Register cache_entry = a0;
+  const bool tagged = (argument_type_ == TAGGED);
+
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+
+    if (tagged) {
+      // Argument is a number and is on stack and in a0.
+      // Load argument and check if it is a smi.
+      __ JumpIfNotSmi(a0, &input_not_smi);
+
+      // Input is a smi. Convert to double and load the low and high words
+      // of the double into a2, a3.
+      __ sra(t0, a0, kSmiTagSize);
+      __ mtc1(t0, f4);
+      __ cvt_d_w(f4, f4);
+      __ Move(a2, a3, f4);
+      __ Branch(&loaded);
+
+      __ bind(&input_not_smi);
+      // Check if input is a HeapNumber.
+      __ CheckMap(a0,
+                  a1,
+                  Heap::kHeapNumberMapRootIndex,
+                  &calculate,
+                  DONT_DO_SMI_CHECK);
+      // Input is a HeapNumber. Store the
+      // low and high words into a2, a3.
+      __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
+      __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
+    } else {
+      // Input is untagged double in f4. Output goes to f4.
+      __ Move(a2, a3, f4);
+    }
+    __ bind(&loaded);
+    // a2 = low 32 bits of double value.
+    // a3 = high 32 bits of double value.
+    // Compute hash (the shifts are arithmetic):
+    //   h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
+    __ Xor(a1, a2, a3);
+    __ sra(t0, a1, 16);
+    __ Xor(a1, a1, t0);
+    __ sra(t0, a1, 8);
+    __ Xor(a1, a1, t0);
+    ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
+    __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
+
+    // a2 = low 32 bits of double value.
+    // a3 = high 32 bits of double value.
+    // a1 = TranscendentalCache::hash(double value).
+    __ li(cache_entry, Operand(
+        ExternalReference::transcendental_cache_array_address(
+            masm->isolate())));
+    // a0 points to cache array.
+    __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
+        Isolate::Current()->transcendental_cache()->caches_[0])));
+    // a0 points to the cache for the type type_.
+    // If NULL, the cache hasn't been initialized yet, so go through runtime.
+    __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
+
+#ifdef DEBUG
+    // Check that the layout of cache elements match expectations.
+    { TranscendentalCache::SubCache::Element test_elem[2];
+      char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
+      char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
+      char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
+      char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
+      char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
+      CHECK_EQ(12, elem2_start - elem_start);  // Two uint_32's and a pointer.
+      CHECK_EQ(0, elem_in0 - elem_start);
+      CHECK_EQ(kIntSize, elem_in1 - elem_start);
+      CHECK_EQ(2 * kIntSize, elem_out - elem_start);
+    }
+#endif
+
+    // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
+    __ sll(t0, a1, 1);
+    __ Addu(a1, a1, t0);
+    __ sll(t0, a1, 2);
+    __ Addu(cache_entry, cache_entry, t0);
+
+    // Check if cache matches: Double value is stored in uint32_t[2] array.
+    __ lw(t0, MemOperand(cache_entry, 0));
+    __ lw(t1, MemOperand(cache_entry, 4));
+    __ lw(t2, MemOperand(cache_entry, 8));
+    __ Addu(cache_entry, cache_entry, 12);
+    __ Branch(&calculate, ne, a2, Operand(t0));
+    __ Branch(&calculate, ne, a3, Operand(t1));
+    // Cache hit. Load result, cleanup and return.
+    if (tagged) {
+      // Pop input value from stack and load result into v0.
+      __ Drop(1);
+      __ mov(v0, t2);
+    } else {
+      // Load result into f4.
+      __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
+    }
+    __ Ret();
+  }  // if (CpuFeatures::IsSupported(FPU))
+
+  __ bind(&calculate);
+  if (tagged) {
+    __ bind(&invalid_cache);
+    __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
+                                                   masm->isolate()),
+                                 1,
+                                 1);
+  } else {
+    if (!CpuFeatures::IsSupported(FPU)) UNREACHABLE();
+    CpuFeatures::Scope scope(FPU);
+
+    Label no_update;
+    Label skip_cache;
+    const Register heap_number_map = t2;
+
+    // Call C function to calculate the result and update the cache.
+    // Register a0 holds precalculated cache entry address; preserve
+    // it on the stack and pop it into register cache_entry after the
+    // call.
+    __ push(cache_entry);
+    GenerateCallCFunction(masm, scratch0);
+    __ GetCFunctionDoubleResult(f4);
+
+    // Try to update the cache. If we cannot allocate a
+    // heap number, we return the result without updating.
+    __ pop(cache_entry);
+    __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
+    __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
+
+    __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
+    __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
+    __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
+
+    __ mov(v0, cache_entry);
+    __ Ret();
+
+    __ bind(&invalid_cache);
+    // The cache is invalid. Call runtime which will recreate the
+    // cache.
+    __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
+    __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
+    __ EnterInternalFrame();
+    __ push(a0);
+    __ CallRuntime(RuntimeFunction(), 1);
+    __ LeaveInternalFrame();
+    __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
+    __ Ret();
+
+    __ bind(&skip_cache);
+    // Call C function to calculate the result and answer directly
+    // without updating the cache.
+    GenerateCallCFunction(masm, scratch0);
+    __ GetCFunctionDoubleResult(f4);
+    __ bind(&no_update);
+
+    // We return the value in f4 without adding it to the cache, but
+    // we cause a scavenging GC so that future allocations will succeed.
+    __ EnterInternalFrame();
+
+    // Allocate an aligned object larger than a HeapNumber.
+    ASSERT(4 * kPointerSize >= HeapNumber::kSize);
+    __ li(scratch0, Operand(4 * kPointerSize));
+    __ push(scratch0);
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+    __ LeaveInternalFrame();
+    __ Ret();
+  }
+}
+
+
+void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
+                                                    Register scratch) {
+  __ push(ra);
+  __ PrepareCallCFunction(2, scratch);
+  if (IsMipsSoftFloatABI) {
+    __ Move(v0, v1, f4);
+  } else {
+    __ mov_d(f12, f4);
+  }
+  switch (type_) {
+    case TranscendentalCache::SIN:
+      __ CallCFunction(
+          ExternalReference::math_sin_double_function(masm->isolate()), 2);
+      break;
+    case TranscendentalCache::COS:
+      __ CallCFunction(
+          ExternalReference::math_cos_double_function(masm->isolate()), 2);
+      break;
+    case TranscendentalCache::LOG:
+      __ CallCFunction(
+          ExternalReference::math_log_double_function(masm->isolate()), 2);
+      break;
+    default:
+      UNIMPLEMENTED();
+      break;
+  }
+  __ pop(ra);
 }
 
 
 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
-  UNIMPLEMENTED_MIPS();
-  return Runtime::kAbort;
+  switch (type_) {
+    // Add more cases when necessary.
+    case TranscendentalCache::SIN: return Runtime::kMath_sin;
+    case TranscendentalCache::COS: return Runtime::kMath_cos;
+    case TranscendentalCache::LOG: return Runtime::kMath_log;
+    default:
+      UNIMPLEMENTED();
+      return Runtime::kAbort;
+  }
 }
 
 
 void StackCheckStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
 }
 
 
-void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void MathPowStub::Generate(MacroAssembler* masm) {
+  Label call_runtime;
+
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+
+    Label base_not_smi;
+    Label exponent_not_smi;
+    Label convert_exponent;
+
+    const Register base = a0;
+    const Register exponent = a2;
+    const Register heapnumbermap = t1;
+    const Register heapnumber = s0;  // Callee-saved register.
+    const Register scratch = t2;
+    const Register scratch2 = t3;
+
+    // Alocate FP values in the ABI-parameter-passing regs.
+    const DoubleRegister double_base = f12;
+    const DoubleRegister double_exponent = f14;
+    const DoubleRegister double_result = f0;
+    const DoubleRegister double_scratch = f2;
+
+    __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
+    __ lw(base, MemOperand(sp, 1 * kPointerSize));
+    __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
+
+    // Convert base to double value and store it in f0.
+    __ JumpIfNotSmi(base, &base_not_smi);
+    // Base is a Smi. Untag and convert it.
+    __ SmiUntag(base);
+    __ mtc1(base, double_scratch);
+    __ cvt_d_w(double_base, double_scratch);
+    __ Branch(&convert_exponent);
+
+    __ bind(&base_not_smi);
+    __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
+    __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
+    // Base is a heapnumber. Load it into double register.
+    __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
+
+    __ bind(&convert_exponent);
+    __ JumpIfNotSmi(exponent, &exponent_not_smi);
+    __ SmiUntag(exponent);
+
+    // The base is in a double register and the exponent is
+    // an untagged smi. Allocate a heap number and call a
+    // C function for integer exponents. The register containing
+    // the heap number is callee-saved.
+    __ AllocateHeapNumber(heapnumber,
+                          scratch,
+                          scratch2,
+                          heapnumbermap,
+                          &call_runtime);
+    __ push(ra);
+    __ PrepareCallCFunction(3, scratch);
+    __ SetCallCDoubleArguments(double_base, exponent);
+    __ CallCFunction(
+        ExternalReference::power_double_int_function(masm->isolate()), 3);
+    __ pop(ra);
+    __ GetCFunctionDoubleResult(double_result);
+    __ sdc1(double_result,
+            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+    __ mov(v0, heapnumber);
+    __ DropAndRet(2 * kPointerSize);
+
+    __ bind(&exponent_not_smi);
+    __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
+    __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
+    // Exponent is a heapnumber. Load it into double register.
+    __ ldc1(double_exponent,
+            FieldMemOperand(exponent, HeapNumber::kValueOffset));
+
+    // The base and the exponent are in double registers.
+    // Allocate a heap number and call a C function for
+    // double exponents. The register containing
+    // the heap number is callee-saved.
+    __ AllocateHeapNumber(heapnumber,
+                          scratch,
+                          scratch2,
+                          heapnumbermap,
+                          &call_runtime);
+    __ push(ra);
+    __ PrepareCallCFunction(4, scratch);
+    // ABI (o32) for func(double a, double b): a in f12, b in f14.
+    ASSERT(double_base.is(f12));
+    ASSERT(double_exponent.is(f14));
+    __ SetCallCDoubleArguments(double_base, double_exponent);
+    __ CallCFunction(
+        ExternalReference::power_double_double_function(masm->isolate()), 4);
+    __ pop(ra);
+    __ GetCFunctionDoubleResult(double_result);
+    __ sdc1(double_result,
+            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+    __ mov(v0, heapnumber);
+    __ DropAndRet(2 * kPointerSize);
+  }
+
+  __ bind(&call_runtime);
+  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
 }
 
 
@@ -437,13 +3492,13 @@
 
 
 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  __ Throw(v0);
 }
 
 
 void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
                                           UncatchableExceptionType type) {
-  UNIMPLEMENTED_MIPS();
+  __ ThrowUncatchable(type, v0);
 }
 
 
@@ -453,78 +3508,1427 @@
                               Label* throw_out_of_memory_exception,
                               bool do_gc,
                               bool always_allocate) {
-  UNIMPLEMENTED_MIPS();
+  // v0: result parameter for PerformGC, if any
+  // s0: number of arguments including receiver (C callee-saved)
+  // s1: pointer to the first argument          (C callee-saved)
+  // s2: pointer to builtin function            (C callee-saved)
+
+  if (do_gc) {
+    // Move result passed in v0 into a0 to call PerformGC.
+    __ mov(a0, v0);
+    __ PrepareCallCFunction(1, a1);
+    __ CallCFunction(
+        ExternalReference::perform_gc_function(masm->isolate()), 1);
+  }
+
+  ExternalReference scope_depth =
+      ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
+  if (always_allocate) {
+    __ li(a0, Operand(scope_depth));
+    __ lw(a1, MemOperand(a0));
+    __ Addu(a1, a1, Operand(1));
+    __ sw(a1, MemOperand(a0));
+  }
+
+  // Prepare arguments for C routine: a0 = argc, a1 = argv
+  __ mov(a0, s0);
+  __ mov(a1, s1);
+
+  // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
+  // also need to reserve the 4 argument slots on the stack.
+
+  __ AssertStackIsAligned();
+
+  __ li(a2, Operand(ExternalReference::isolate_address()));
+
+  // From arm version of this function:
+  // TODO(1242173): To let the GC traverse the return address of the exit
+  // frames, we need to know where the return address is. Right now,
+  // we push it on the stack to be able to find it again, but we never
+  // restore from it in case of changes, which makes it impossible to
+  // support moving the C entry code stub. This should be fixed, but currently
+  // this is OK because the CEntryStub gets generated so early in the V8 boot
+  // sequence that it is not moving ever.
+
+  { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
+    // This branch-and-link sequence is needed to find the current PC on mips,
+    // saved to the ra register.
+    // Use masm-> here instead of the double-underscore macro since extra
+    // coverage code can interfere with the proper calculation of ra.
+    Label find_ra;
+    masm->bal(&find_ra);  // bal exposes branch delay slot.
+    masm->nop();  // Branch delay slot nop.
+    masm->bind(&find_ra);
+
+    // Adjust the value in ra to point to the correct return location, 2nd
+    // instruction past the real call into C code (the jalr(t9)), and push it.
+    // This is the return address of the exit frame.
+    const int kNumInstructionsToJump = 6;
+    masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
+    masm->sw(ra, MemOperand(sp));  // This spot was reserved in EnterExitFrame.
+    masm->Subu(sp, sp, StandardFrameConstants::kCArgsSlotsSize);
+    // Stack is still aligned.
+
+    // Call the C routine.
+    masm->mov(t9, s2);  // Function pointer to t9 to conform to ABI for PIC.
+    masm->jalr(t9);
+    masm->nop();    // Branch delay slot nop.
+    // Make sure the stored 'ra' points to this position.
+    ASSERT_EQ(kNumInstructionsToJump,
+              masm->InstructionsGeneratedSince(&find_ra));
+  }
+
+  // Restore stack (remove arg slots).
+  __ Addu(sp, sp, StandardFrameConstants::kCArgsSlotsSize);
+
+  if (always_allocate) {
+    // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
+    __ li(a2, Operand(scope_depth));
+    __ lw(a3, MemOperand(a2));
+    __ Subu(a3, a3, Operand(1));
+    __ sw(a3, MemOperand(a2));
+  }
+
+  // Check for failure result.
+  Label failure_returned;
+  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+  __ addiu(a2, v0, 1);
+  __ andi(t0, a2, kFailureTagMask);
+  __ Branch(&failure_returned, eq, t0, Operand(zero_reg));
+
+  // Exit C frame and return.
+  // v0:v1: result
+  // sp: stack pointer
+  // fp: frame pointer
+  __ LeaveExitFrame(save_doubles_, s0);
+  __ Ret();
+
+  // Check if we should retry or throw exception.
+  Label retry;
+  __ bind(&failure_returned);
+  STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
+  __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
+  __ Branch(&retry, eq, t0, Operand(zero_reg));
+
+  // Special handling of out of memory exceptions.
+  Failure* out_of_memory = Failure::OutOfMemoryException();
+  __ Branch(throw_out_of_memory_exception, eq,
+            v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+
+  // Retrieve the pending exception and clear the variable.
+  __ li(t0,
+        Operand(ExternalReference::the_hole_value_location(masm->isolate())));
+  __ lw(a3, MemOperand(t0));
+  __ li(t0, Operand(ExternalReference(Isolate::k_pending_exception_address,
+                                      masm->isolate())));
+  __ lw(v0, MemOperand(t0));
+  __ sw(a3, MemOperand(t0));
+
+  // Special handling of termination exceptions which are uncatchable
+  // by javascript code.
+  __ Branch(throw_termination_exception, eq,
+            v0, Operand(masm->isolate()->factory()->termination_exception()));
+
+  // Handle normal exception.
+  __ jmp(throw_normal_exception);
+
+  __ bind(&retry);
+  // Last failure (v0) will be moved to (a0) for parameter when retrying.
 }
 
 
 void CEntryStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Called from JavaScript; parameters are on stack as if calling JS function
+  // a0: number of arguments including receiver
+  // a1: pointer to builtin function
+  // fp: frame pointer    (restored after C call)
+  // sp: stack pointer    (restored as callee's sp after C call)
+  // cp: current context  (C callee-saved)
+
+  // NOTE: Invocations of builtins may return failure objects
+  // instead of a proper result. The builtin entry handles
+  // this by performing a garbage collection and retrying the
+  // builtin once.
+
+  // Compute the argv pointer in a callee-saved register.
+  __ sll(s1, a0, kPointerSizeLog2);
+  __ Addu(s1, sp, s1);
+  __ Subu(s1, s1, Operand(kPointerSize));
+
+  // Enter the exit frame that transitions from JavaScript to C++.
+  __ EnterExitFrame(save_doubles_);
+
+  // Setup argc and the builtin function in callee-saved registers.
+  __ mov(s0, a0);
+  __ mov(s2, a1);
+
+  // s0: number of arguments (C callee-saved)
+  // s1: pointer to first argument (C callee-saved)
+  // s2: pointer to builtin function (C callee-saved)
+
+  Label throw_normal_exception;
+  Label throw_termination_exception;
+  Label throw_out_of_memory_exception;
+
+  // Call into the runtime system.
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_termination_exception,
+               &throw_out_of_memory_exception,
+               false,
+               false);
+
+  // Do space-specific GC and retry runtime call.
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_termination_exception,
+               &throw_out_of_memory_exception,
+               true,
+               false);
+
+  // Do full GC and retry runtime call one final time.
+  Failure* failure = Failure::InternalError();
+  __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_termination_exception,
+               &throw_out_of_memory_exception,
+               true,
+               true);
+
+  __ bind(&throw_out_of_memory_exception);
+  GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+
+  __ bind(&throw_termination_exception);
+  GenerateThrowUncatchable(masm, TERMINATION);
+
+  __ bind(&throw_normal_exception);
+  GenerateThrowTOS(masm);
 }
 
 
 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
-  UNIMPLEMENTED_MIPS();
+  Label invoke, exit;
+
+  // Registers:
+  // a0: entry address
+  // a1: function
+  // a2: reveiver
+  // a3: argc
+  //
+  // Stack:
+  // 4 args slots
+  // args
+
+  // Save callee saved registers on the stack.
+  __ MultiPush((kCalleeSaved | ra.bit()) & ~sp.bit());
+
+  // Load argv in s0 register.
+  __ lw(s0, MemOperand(sp, kNumCalleeSaved * kPointerSize +
+                           StandardFrameConstants::kCArgsSlotsSize));
+
+  // We build an EntryFrame.
+  __ li(t3, Operand(-1));  // Push a bad frame pointer to fail if it is used.
+  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+  __ li(t2, Operand(Smi::FromInt(marker)));
+  __ li(t1, Operand(Smi::FromInt(marker)));
+  __ li(t0, Operand(ExternalReference(Isolate::k_c_entry_fp_address,
+                                      masm->isolate())));
+  __ lw(t0, MemOperand(t0));
+  __ Push(t3, t2, t1, t0);
+  // Setup frame pointer for the frame to be pushed.
+  __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+  // Registers:
+  // a0: entry_address
+  // a1: function
+  // a2: reveiver_pointer
+  // a3: argc
+  // s0: argv
+  //
+  // Stack:
+  // caller fp          |
+  // function slot      | entry frame
+  // context slot       |
+  // bad fp (0xff...f)  |
+  // callee saved registers + ra
+  // 4 args slots
+  // args
+
+  #ifdef ENABLE_LOGGING_AND_PROFILING
+    // If this is the outermost JS call, set js_entry_sp value.
+    Label non_outermost_js;
+    ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address,
+                                  masm->isolate());
+    __ li(t1, Operand(ExternalReference(js_entry_sp)));
+    __ lw(t2, MemOperand(t1));
+    __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
+    __ sw(fp, MemOperand(t1));
+    __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+    Label cont;
+    __ b(&cont);
+    __ nop();   // Branch delay slot nop.
+    __ bind(&non_outermost_js);
+    __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
+    __ bind(&cont);
+    __ push(t0);
+  #endif
+
+  // Call a faked try-block that does the invoke.
+  __ bal(&invoke);  // bal exposes branch delay slot.
+  __ nop();   // Branch delay slot nop.
+
+  // Caught exception: Store result (exception) in the pending
+  // exception field in the JSEnv and return a failure sentinel.
+  // Coming in here the fp will be invalid because the PushTryHandler below
+  // sets it to 0 to signal the existence of the JSEntry frame.
+  __ li(t0, Operand(ExternalReference(Isolate::k_pending_exception_address,
+                                      masm->isolate())));
+  __ sw(v0, MemOperand(t0));  // We come back from 'invoke'. result is in v0.
+  __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
+  __ b(&exit);  // b exposes branch delay slot.
+  __ nop();   // Branch delay slot nop.
+
+  // Invoke: Link this frame into the handler chain.
+  __ bind(&invoke);
+  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+  // If an exception not caught by another handler occurs, this handler
+  // returns control to the code after the bal(&invoke) above, which
+  // restores all kCalleeSaved registers (including cp and fp) to their
+  // saved values before returning a failure to C.
+
+  // Clear any pending exceptions.
+  __ li(t0,
+        Operand(ExternalReference::the_hole_value_location(masm->isolate())));
+  __ lw(t1, MemOperand(t0));
+  __ li(t0, Operand(ExternalReference(Isolate::k_pending_exception_address,
+                                      masm->isolate())));
+  __ sw(t1, MemOperand(t0));
+
+  // Invoke the function by calling through JS entry trampoline builtin.
+  // Notice that we cannot store a reference to the trampoline code directly in
+  // this stub, because runtime stubs are not traversed when doing GC.
+
+  // Registers:
+  // a0: entry_address
+  // a1: function
+  // a2: reveiver_pointer
+  // a3: argc
+  // s0: argv
+  //
+  // Stack:
+  // handler frame
+  // entry frame
+  // callee saved registers + ra
+  // 4 args slots
+  // args
+
+  if (is_construct) {
+    ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
+                                      masm->isolate());
+    __ li(t0, Operand(construct_entry));
+  } else {
+    ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
+    __ li(t0, Operand(entry));
+  }
+  __ lw(t9, MemOperand(t0));  // Deref address.
+
+  // Call JSEntryTrampoline.
+  __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
+  __ Call(t9);
+
+  // Unlink this frame from the handler chain.
+  __ PopTryHandler();
+
+  __ bind(&exit);  // v0 holds result
+  #ifdef ENABLE_LOGGING_AND_PROFILING
+    // Check if the current stack frame is marked as the outermost JS frame.
+    Label non_outermost_js_2;
+    __ pop(t1);
+    __ Branch(&non_outermost_js_2, ne, t1,
+              Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+    __ li(t1, Operand(ExternalReference(js_entry_sp)));
+    __ sw(zero_reg, MemOperand(t1));
+    __ bind(&non_outermost_js_2);
+  #endif
+
+  // Restore the top frame descriptors from the stack.
+  __ pop(t1);
+  __ li(t0, Operand(ExternalReference(Isolate::k_c_entry_fp_address,
+                                      masm->isolate())));
+  __ sw(t1, MemOperand(t0));
+
+  // Reset the stack to the callee saved registers.
+  __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+  // Restore callee saved registers from the stack.
+  __ MultiPop((kCalleeSaved | ra.bit()) & ~sp.bit());
+  // Return.
+  __ Jump(ra);
 }
 
 
-// Uses registers a0 to t0. Expected input is
-// object in a0 (or at sp+1*kPointerSize) and function in
-// a1 (or at sp), depending on whether or not
-// args_in_registers() is true.
+// Uses registers a0 to t0.
+// Expected input (depending on whether args are in registers or on the stack):
+// * object: a0 or at sp + 1 * kPointerSize.
+// * function: a1 or at sp.
+//
+// Inlined call site patching is a crankshaft-specific feature that is not
+// implemented on MIPS.
 void InstanceofStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // This is a crankshaft-specific feature that has not been implemented yet.
+  ASSERT(!HasCallSiteInlineCheck());
+  // Call site inlining and patching implies arguments in registers.
+  ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
+  // ReturnTrueFalse is only implemented for inlined call sites.
+  ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
+
+  // Fixed register usage throughout the stub:
+  const Register object = a0;  // Object (lhs).
+  Register map = a3;  // Map of the object.
+  const Register function = a1;  // Function (rhs).
+  const Register prototype = t0;  // Prototype of the function.
+  const Register inline_site = t5;
+  const Register scratch = a2;
+
+  Label slow, loop, is_instance, is_not_instance, not_js_object;
+
+  if (!HasArgsInRegisters()) {
+    __ lw(object, MemOperand(sp, 1 * kPointerSize));
+    __ lw(function, MemOperand(sp, 0));
+  }
+
+  // Check that the left hand is a JS object and load map.
+  __ JumpIfSmi(object, &not_js_object);
+  __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
+
+  // If there is a call site cache don't look in the global cache, but do the
+  // real lookup and update the call site cache.
+  if (!HasCallSiteInlineCheck()) {
+    Label miss;
+    __ LoadRoot(t1, Heap::kInstanceofCacheFunctionRootIndex);
+    __ Branch(&miss, ne, function, Operand(t1));
+    __ LoadRoot(t1, Heap::kInstanceofCacheMapRootIndex);
+    __ Branch(&miss, ne, map, Operand(t1));
+    __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+    __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+    __ bind(&miss);
+  }
+
+  // Get the prototype of the function.
+  __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
+
+  // Check that the function prototype is a JS object.
+  __ JumpIfSmi(prototype, &slow);
+  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
+
+  // Update the global instanceof or call site inlined cache with the current
+  // map and function. The cached answer will be set when it is known below.
+  if (!HasCallSiteInlineCheck()) {
+    __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+    __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
+  } else {
+    UNIMPLEMENTED_MIPS();
+  }
+
+  // Register mapping: a3 is object map and t0 is function prototype.
+  // Get prototype of object into a2.
+  __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
+
+  // We don't need map any more. Use it as a scratch register.
+  Register scratch2 = map;
+  map = no_reg;
+
+  // Loop through the prototype chain looking for the function prototype.
+  __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
+  __ bind(&loop);
+  __ Branch(&is_instance, eq, scratch, Operand(prototype));
+  __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
+  __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
+  __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
+  __ Branch(&loop);
+
+  __ bind(&is_instance);
+  ASSERT(Smi::FromInt(0) == 0);
+  if (!HasCallSiteInlineCheck()) {
+    __ mov(v0, zero_reg);
+    __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+  } else {
+    UNIMPLEMENTED_MIPS();
+  }
+  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+  __ bind(&is_not_instance);
+  if (!HasCallSiteInlineCheck()) {
+    __ li(v0, Operand(Smi::FromInt(1)));
+    __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+  } else {
+    UNIMPLEMENTED_MIPS();
+  }
+  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+  Label object_not_null, object_not_null_or_smi;
+  __ bind(&not_js_object);
+  // Before null, smi and string value checks, check that the rhs is a function
+  // as for a non-function rhs an exception needs to be thrown.
+  __ JumpIfSmi(function, &slow);
+  __ GetObjectType(function, scratch2, scratch);
+  __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
+
+  // Null is not instance of anything.
+  __ Branch(&object_not_null, ne, scratch,
+      Operand(masm->isolate()->factory()->null_value()));
+  __ li(v0, Operand(Smi::FromInt(1)));
+  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+  __ bind(&object_not_null);
+  // Smi values are not instances of anything.
+  __ JumpIfNotSmi(object, &object_not_null_or_smi);
+  __ li(v0, Operand(Smi::FromInt(1)));
+  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+  __ bind(&object_not_null_or_smi);
+  // String values are not instances of anything.
+  __ IsObjectJSStringType(object, scratch, &slow);
+  __ li(v0, Operand(Smi::FromInt(1)));
+  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+  // Slow-case.  Tail call builtin.
+  __ bind(&slow);
+  if (!ReturnTrueFalseObject()) {
+    if (HasArgsInRegisters()) {
+      __ Push(a0, a1);
+    }
+  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
+  } else {
+    __ EnterInternalFrame();
+    __ Push(a0, a1);
+    __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+    __ LeaveInternalFrame();
+    __ mov(a0, v0);
+    __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+    __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
+    __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+    __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+  }
 }
 
 
+Register InstanceofStub::left() { return a0; }
+
+
+Register InstanceofStub::right() { return a1; }
+
+
 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // The displacement is the offset of the last parameter (if any)
+  // relative to the frame pointer.
+  static const int kDisplacement =
+      StandardFrameConstants::kCallerSPOffset - kPointerSize;
+
+  // Check that the key is a smiGenerateReadElement.
+  Label slow;
+  __ JumpIfNotSmi(a1, &slow);
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label adaptor;
+  __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+  __ Branch(&adaptor,
+            eq,
+            a3,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Check index (a1) against formal parameters count limit passed in
+  // through register a0. Use unsigned comparison to get negative
+  // check for free.
+  __ Branch(&slow, hs, a1, Operand(a0));
+
+  // Read the argument from the stack and return it.
+  __ subu(a3, a0, a1);
+  __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(a3, fp, Operand(t3));
+  __ lw(v0, MemOperand(a3, kDisplacement));
+  __ Ret();
+
+  // Arguments adaptor case: Check index (a1) against actual arguments
+  // limit found in the arguments adaptor frame. Use unsigned
+  // comparison to get negative check for free.
+  __ bind(&adaptor);
+  __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
+
+  // Read the argument from the adaptor frame and return it.
+  __ subu(a3, a0, a1);
+  __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(a3, a2, Operand(t3));
+  __ lw(v0, MemOperand(a3, kDisplacement));
+  __ Ret();
+
+  // Slow-case: Handle non-smi or out-of-bounds access to arguments
+  // by calling the runtime system.
+  __ bind(&slow);
+  __ push(a1);
+  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
 }
 
 
 void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // sp[0] : number of parameters
+  // sp[4] : receiver displacement
+  // sp[8] : function
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label adaptor_frame, try_allocate, runtime;
+  __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+  __ Branch(&adaptor_frame,
+            eq,
+            a3,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Get the length from the frame.
+  __ lw(a1, MemOperand(sp, 0));
+  __ Branch(&try_allocate);
+
+  // Patch the arguments.length and the parameters pointer.
+  __ bind(&adaptor_frame);
+  __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ sw(a1, MemOperand(sp, 0));
+  __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(a3, a2, Operand(at));
+
+  __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
+  __ sw(a3, MemOperand(sp, 1 * kPointerSize));
+
+  // Try the new space allocation. Start out with computing the size
+  // of the arguments object and the elements array in words.
+  Label add_arguments_object;
+  __ bind(&try_allocate);
+  __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
+  __ srl(a1, a1, kSmiTagSize);
+
+  __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
+  __ bind(&add_arguments_object);
+  __ Addu(a1, a1, Operand(GetArgumentsObjectSize() / kPointerSize));
+
+  // Do the allocation of both objects in one go.
+  __ AllocateInNewSpace(
+      a1,
+      v0,
+      a2,
+      a3,
+      &runtime,
+      static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+
+  // Get the arguments boilerplate from the current (global) context.
+  __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
+  __ lw(t0, MemOperand(t0,
+                       Context::SlotOffset(GetArgumentsBoilerplateIndex())));
+
+  // Copy the JS object part.
+  __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
+
+  if (type_ == NEW_NON_STRICT) {
+    // Setup the callee in-object property.
+    STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+    __ lw(a3, MemOperand(sp, 2 * kPointerSize));
+    const int kCalleeOffset = JSObject::kHeaderSize +
+                              Heap::kArgumentsCalleeIndex * kPointerSize;
+    __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
+  }
+
+  // Get the length (smi tagged) and set that as an in-object property too.
+  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+  __ lw(a1, MemOperand(sp, 0 * kPointerSize));
+  __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
+                                Heap::kArgumentsLengthIndex * kPointerSize));
+
+  Label done;
+  __ Branch(&done, eq, a1, Operand(zero_reg));
+
+  // Get the parameters pointer from the stack.
+  __ lw(a2, MemOperand(sp, 1 * kPointerSize));
+
+  // Setup the elements pointer in the allocated arguments object and
+  // initialize the header in the elements fixed array.
+  __ Addu(t0, v0, Operand(GetArgumentsObjectSize()));
+  __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
+  __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
+  __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
+  __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
+  __ srl(a1, a1, kSmiTagSize);  // Untag the length for the loop.
+
+  // Copy the fixed array slots.
+  Label loop;
+  // Setup t0 to point to the first array slot.
+  __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ bind(&loop);
+  // Pre-decrement a2 with kPointerSize on each iteration.
+  // Pre-decrement in order to skip receiver.
+  __ Addu(a2, a2, Operand(-kPointerSize));
+  __ lw(a3, MemOperand(a2));
+  // Post-increment t0 with kPointerSize on each iteration.
+  __ sw(a3, MemOperand(t0));
+  __ Addu(t0, t0, Operand(kPointerSize));
+  __ Subu(a1, a1, Operand(1));
+  __ Branch(&loop, ne, a1, Operand(zero_reg));
+
+  // Return and remove the on-stack parameters.
+  __ bind(&done);
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  // Do the runtime call to allocate the arguments object.
+  __ bind(&runtime);
+  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
 }
 
 
 void RegExpExecStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Just jump directly to runtime if native RegExp is not selected at compile
+  // time or if regexp entry in generated code is turned off runtime switch or
+  // at compilation.
+#ifdef V8_INTERPRETED_REGEXP
+  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#else  // V8_INTERPRETED_REGEXP
+  if (!FLAG_regexp_entry_native) {
+    __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+    return;
+  }
+
+  // Stack frame on entry.
+  //  sp[0]: last_match_info (expected JSArray)
+  //  sp[4]: previous index
+  //  sp[8]: subject string
+  //  sp[12]: JSRegExp object
+
+  static const int kLastMatchInfoOffset = 0 * kPointerSize;
+  static const int kPreviousIndexOffset = 1 * kPointerSize;
+  static const int kSubjectOffset = 2 * kPointerSize;
+  static const int kJSRegExpOffset = 3 * kPointerSize;
+
+  Label runtime, invoke_regexp;
+
+  // Allocation of registers for this function. These are in callee save
+  // registers and will be preserved by the call to the native RegExp code, as
+  // this code is called using the normal C calling convention. When calling
+  // directly from generated code the native RegExp code will not do a GC and
+  // therefore the content of these registers are safe to use after the call.
+  // MIPS - using s0..s2, since we are not using CEntry Stub.
+  Register subject = s0;
+  Register regexp_data = s1;
+  Register last_match_info_elements = s2;
+
+  // Ensure that a RegExp stack is allocated.
+  ExternalReference address_of_regexp_stack_memory_address =
+      ExternalReference::address_of_regexp_stack_memory_address(
+          masm->isolate());
+  ExternalReference address_of_regexp_stack_memory_size =
+      ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
+  __ li(a0, Operand(address_of_regexp_stack_memory_size));
+  __ lw(a0, MemOperand(a0, 0));
+  __ Branch(&runtime, eq, a0, Operand(zero_reg));
+
+  // Check that the first argument is a JSRegExp object.
+  __ lw(a0, MemOperand(sp, kJSRegExpOffset));
+  STATIC_ASSERT(kSmiTag == 0);
+  __ JumpIfSmi(a0, &runtime);
+  __ GetObjectType(a0, a1, a1);
+  __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
+
+  // Check that the RegExp has been compiled (data contains a fixed array).
+  __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
+  if (FLAG_debug_code) {
+    __ And(t0, regexp_data, Operand(kSmiTagMask));
+    __ Check(nz,
+             "Unexpected type for RegExp data, FixedArray expected",
+             t0,
+             Operand(zero_reg));
+    __ GetObjectType(regexp_data, a0, a0);
+    __ Check(eq,
+             "Unexpected type for RegExp data, FixedArray expected",
+             a0,
+             Operand(FIXED_ARRAY_TYPE));
+  }
+
+  // regexp_data: RegExp data (FixedArray)
+  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+  __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
+  __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
+
+  // regexp_data: RegExp data (FixedArray)
+  // Check that the number of captures fit in the static offsets vector buffer.
+  __ lw(a2,
+         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+  // Calculate number of capture registers (number_of_captures + 1) * 2. This
+  // uses the asumption that smis are 2 * their untagged value.
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+  __ Addu(a2, a2, Operand(2));  // a2 was a smi.
+  // Check that the static offsets vector buffer is large enough.
+  __ Branch(&runtime, hi, a2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
+
+  // a2: Number of capture registers
+  // regexp_data: RegExp data (FixedArray)
+  // Check that the second argument is a string.
+  __ lw(subject, MemOperand(sp, kSubjectOffset));
+  __ JumpIfSmi(subject, &runtime);
+  __ GetObjectType(subject, a0, a0);
+  __ And(a0, a0, Operand(kIsNotStringMask));
+  STATIC_ASSERT(kStringTag == 0);
+  __ Branch(&runtime, ne, a0, Operand(zero_reg));
+
+  // Get the length of the string to r3.
+  __ lw(a3, FieldMemOperand(subject, String::kLengthOffset));
+
+  // a2: Number of capture registers
+  // a3: Length of subject string as a smi
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // Check that the third argument is a positive smi less than the subject
+  // string length. A negative value will be greater (unsigned comparison).
+  __ lw(a0, MemOperand(sp, kPreviousIndexOffset));
+  __ And(at, a0, Operand(kSmiTagMask));
+  __ Branch(&runtime, ne, at, Operand(zero_reg));
+  __ Branch(&runtime, ls, a3, Operand(a0));
+
+  // a2: Number of capture registers
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // Check that the fourth object is a JSArray object.
+  __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
+  __ JumpIfSmi(a0, &runtime);
+  __ GetObjectType(a0, a1, a1);
+  __ Branch(&runtime, ne, a1, Operand(JS_ARRAY_TYPE));
+  // Check that the JSArray is in fast case.
+  __ lw(last_match_info_elements,
+         FieldMemOperand(a0, JSArray::kElementsOffset));
+  __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
+  __ Branch(&runtime, ne, a0, Operand(
+      masm->isolate()->factory()->fixed_array_map()));
+  // Check that the last match info has space for the capture registers and the
+  // additional information.
+  __ lw(a0,
+         FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
+  __ Addu(a2, a2, Operand(RegExpImpl::kLastMatchOverhead));
+  __ sra(at, a0, kSmiTagSize);  // Untag length for comparison.
+  __ Branch(&runtime, gt, a2, Operand(at));
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // Check the representation and encoding of the subject string.
+  Label seq_string;
+  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
+  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+  // First check for flat string.
+  __ And(at, a0, Operand(kIsNotStringMask | kStringRepresentationMask));
+  STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
+  __ Branch(&seq_string, eq, at, Operand(zero_reg));
+
+  // subject: Subject string
+  // a0: instance type if Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // Check for flat cons string.
+  // A flat cons string is a cons string where the second part is the empty
+  // string. In that case the subject string is just the first part of the cons
+  // string. Also in this case the first part of the cons string is known to be
+  // a sequential string or an external string.
+  STATIC_ASSERT(kExternalStringTag != 0);
+  STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
+  __ And(at, a0, Operand(kIsNotStringMask | kExternalStringTag));
+  __ Branch(&runtime, ne, at, Operand(zero_reg));
+  __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
+  __ LoadRoot(a1, Heap::kEmptyStringRootIndex);
+  __ Branch(&runtime, ne, a0, Operand(a1));
+  __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
+  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+  // Is first part a flat string?
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ And(at, a0, Operand(kStringRepresentationMask));
+  __ Branch(&runtime, ne, at, Operand(zero_reg));
+
+  __ bind(&seq_string);
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // a0: Instance type of subject string
+  STATIC_ASSERT(kStringEncodingMask == 4);
+  STATIC_ASSERT(kAsciiStringTag == 4);
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  // Find the code object based on the assumptions above.
+  __ And(a0, a0, Operand(kStringEncodingMask));  // Non-zero for ascii.
+  __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
+  __ sra(a3, a0, 2);  // a3 is 1 for ascii, 0 for UC16 (usyed below).
+  __ lw(t0, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
+  __ movz(t9, t0, a0);  // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
+
+  // Check that the irregexp code has been generated for the actual string
+  // encoding. If it has, the field contains a code object otherwise it
+  // contains the hole.
+  __ GetObjectType(t9, a0, a0);
+  __ Branch(&runtime, ne, a0, Operand(CODE_TYPE));
+
+  // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
+  // t9: code
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // Load used arguments before starting to push arguments for call to native
+  // RegExp code to avoid handling changing stack height.
+  __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
+  __ sra(a1, a1, kSmiTagSize);  // Untag the Smi.
+
+  // a1: previous index
+  // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
+  // t9: code
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // All checks done. Now push arguments for native regexp code.
+  __ IncrementCounter(masm->isolate()->counters()->regexp_entry_native(),
+                      1, a0, a2);
+
+  // Isolates: note we add an additional parameter here (isolate pointer).
+  static const int kRegExpExecuteArguments = 8;
+  static const int kParameterRegisters = 4;
+  __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
+
+  // Stack pointer now points to cell where return address is to be written.
+  // Arguments are before that on the stack or in registers, meaning we
+  // treat the return address as argument 5. Thus every argument after that
+  // needs to be shifted back by 1. Since DirectCEntryStub will handle
+  // allocating space for the c argument slots, we don't need to calculate
+  // that into the argument positions on the stack. This is how the stack will
+  // look (sp meaning the value of sp at this moment):
+  // [sp + 4] - Argument 8
+  // [sp + 3] - Argument 7
+  // [sp + 2] - Argument 6
+  // [sp + 1] - Argument 5
+  // [sp + 0] - saved ra
+
+  // Argument 8: Pass current isolate address.
+  // CFunctionArgumentOperand handles MIPS stack argument slots.
+  __ li(a0, Operand(ExternalReference::isolate_address()));
+  __ sw(a0, MemOperand(sp, 4 * kPointerSize));
+
+  // Argument 7: Indicate that this is a direct call from JavaScript.
+  __ li(a0, Operand(1));
+  __ sw(a0, MemOperand(sp, 3 * kPointerSize));
+
+  // Argument 6: Start (high end) of backtracking stack memory area.
+  __ li(a0, Operand(address_of_regexp_stack_memory_address));
+  __ lw(a0, MemOperand(a0, 0));
+  __ li(a2, Operand(address_of_regexp_stack_memory_size));
+  __ lw(a2, MemOperand(a2, 0));
+  __ addu(a0, a0, a2);
+  __ sw(a0, MemOperand(sp, 2 * kPointerSize));
+
+  // Argument 5: static offsets vector buffer.
+  __ li(a0, Operand(
+        ExternalReference::address_of_static_offsets_vector(masm->isolate())));
+  __ sw(a0, MemOperand(sp, 1 * kPointerSize));
+
+  // For arguments 4 and 3 get string length, calculate start of string data
+  // and calculate the shift of the index (0 for ASCII and 1 for two byte).
+  __ lw(a0, FieldMemOperand(subject, String::kLengthOffset));
+  __ sra(a0, a0, kSmiTagSize);
+  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+  __ Addu(t0, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ Xor(a3, a3, Operand(1));  // 1 for 2-byte str, 0 for 1-byte.
+  // Argument 4 (a3): End of string data
+  // Argument 3 (a2): Start of string data
+  __ sllv(t1, a1, a3);
+  __ addu(a2, t0, t1);
+  __ sllv(t1, a0, a3);
+  __ addu(a3, t0, t1);
+
+  // Argument 2 (a1): Previous index.
+  // Already there
+
+  // Argument 1 (a0): Subject string.
+  __ mov(a0, subject);
+
+  // Locate the code entry and call it.
+  __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
+  DirectCEntryStub stub;
+  stub.GenerateCall(masm, t9);
+
+  __ LeaveExitFrame(false, no_reg);
+
+  // v0: result
+  // subject: subject string (callee saved)
+  // regexp_data: RegExp data (callee saved)
+  // last_match_info_elements: Last match info elements (callee saved)
+
+  // Check the result.
+
+  Label success;
+  __ Branch(&success, eq, v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
+  Label failure;
+  __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
+  // If not exception it can only be retry. Handle that in the runtime system.
+  __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
+  // Result must now be exception. If there is no pending exception already a
+  // stack overflow (on the backtrack stack) was detected in RegExp code but
+  // haven't created the exception yet. Handle that in the runtime system.
+  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
+  __ li(a1, Operand(
+      ExternalReference::the_hole_value_location(masm->isolate())));
+  __ lw(a1, MemOperand(a1, 0));
+  __ li(a2, Operand(ExternalReference(Isolate::k_pending_exception_address,
+                                      masm->isolate())));
+  __ lw(v0, MemOperand(a2, 0));
+  __ Branch(&runtime, eq, v0, Operand(a1));
+
+  __ sw(a1, MemOperand(a2, 0));  // Clear pending exception.
+
+  // Check if the exception is a termination. If so, throw as uncatchable.
+  __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
+  Label termination_exception;
+  __ Branch(&termination_exception, eq, v0, Operand(a0));
+
+  __ Throw(a0);  // Expects thrown value in v0.
+
+  __ bind(&termination_exception);
+  __ ThrowUncatchable(TERMINATION, v0);  // Expects thrown value in v0.
+
+  __ bind(&failure);
+  // For failure and exception return null.
+  __ li(v0, Operand(masm->isolate()->factory()->null_value()));
+  __ Addu(sp, sp, Operand(4 * kPointerSize));
+  __ Ret();
+
+  // Process the result from the native regexp code.
+  __ bind(&success);
+  __ lw(a1,
+         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+  // Calculate number of capture registers (number_of_captures + 1) * 2.
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+  __ Addu(a1, a1, Operand(2));  // a1 was a smi.
+
+  // a1: number of capture registers
+  // subject: subject string
+  // Store the capture count.
+  __ sll(a2, a1, kSmiTagSize + kSmiShiftSize);  // To smi.
+  __ sw(a2, FieldMemOperand(last_match_info_elements,
+                             RegExpImpl::kLastCaptureCountOffset));
+  // Store last subject and last input.
+  __ mov(a3, last_match_info_elements);  // Moved up to reduce latency.
+  __ sw(subject,
+         FieldMemOperand(last_match_info_elements,
+                         RegExpImpl::kLastSubjectOffset));
+  __ RecordWrite(a3, Operand(RegExpImpl::kLastSubjectOffset), a2, t0);
+  __ sw(subject,
+         FieldMemOperand(last_match_info_elements,
+                         RegExpImpl::kLastInputOffset));
+  __ mov(a3, last_match_info_elements);
+  __ RecordWrite(a3, Operand(RegExpImpl::kLastInputOffset), a2, t0);
+
+  // Get the static offsets vector filled by the native regexp code.
+  ExternalReference address_of_static_offsets_vector =
+      ExternalReference::address_of_static_offsets_vector(masm->isolate());
+  __ li(a2, Operand(address_of_static_offsets_vector));
+
+  // a1: number of capture registers
+  // a2: offsets vector
+  Label next_capture, done;
+  // Capture register counter starts from number of capture registers and
+  // counts down until wrapping after zero.
+  __ Addu(a0,
+         last_match_info_elements,
+         Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
+  __ bind(&next_capture);
+  __ Subu(a1, a1, Operand(1));
+  __ Branch(&done, lt, a1, Operand(zero_reg));
+  // Read the value from the static offsets vector buffer.
+  __ lw(a3, MemOperand(a2, 0));
+  __ addiu(a2, a2, kPointerSize);
+  // Store the smi value in the last match info.
+  __ sll(a3, a3, kSmiTagSize);  // Convert to Smi.
+  __ sw(a3, MemOperand(a0, 0));
+  __ Branch(&next_capture, USE_DELAY_SLOT);
+  __ addiu(a0, a0, kPointerSize);   // In branch delay slot.
+
+  __ bind(&done);
+
+  // Return last match info.
+  __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
+  __ Addu(sp, sp, Operand(4 * kPointerSize));
+  __ Ret();
+
+  // Do the runtime call to execute the regexp.
+  __ bind(&runtime);
+  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#endif  // V8_INTERPRETED_REGEXP
 }
 
 
 void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  const int kMaxInlineLength = 100;
+  Label slowcase;
+  Label done;
+  __ lw(a1, MemOperand(sp, kPointerSize * 2));
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  __ JumpIfNotSmi(a1, &slowcase);
+  __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength)));
+  // Smi-tagging is equivalent to multiplying by 2.
+  // Allocate RegExpResult followed by FixedArray with size in ebx.
+  // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
+  // Elements:  [Map][Length][..elements..]
+  // Size of JSArray with two in-object properties and the header of a
+  // FixedArray.
+  int objects_size =
+      (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
+  __ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
+  __ Addu(a2, t1, Operand(objects_size));
+  __ AllocateInNewSpace(
+      a2,  // In: Size, in words.
+      v0,  // Out: Start of allocation (tagged).
+      a3,  // Scratch register.
+      t0,  // Scratch register.
+      &slowcase,
+      static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+  // v0: Start of allocated area, object-tagged.
+  // a1: Number of elements in array, as smi.
+  // t1: Number of elements, untagged.
+
+  // Set JSArray map to global.regexp_result_map().
+  // Set empty properties FixedArray.
+  // Set elements to point to FixedArray allocated right after the JSArray.
+  // Interleave operations for better latency.
+  __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
+  __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
+  __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
+  __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
+  __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+  __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
+  __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+  __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+
+  // Set input, index and length fields from arguments.
+  __ lw(a1, MemOperand(sp, kPointerSize * 0));
+  __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
+  __ lw(a1, MemOperand(sp, kPointerSize * 1));
+  __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
+  __ lw(a1, MemOperand(sp, kPointerSize * 2));
+  __ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset));
+
+  // Fill out the elements FixedArray.
+  // v0: JSArray, tagged.
+  // a3: FixedArray, tagged.
+  // t1: Number of elements in array, untagged.
+
+  // Set map.
+  __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map()));
+  __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
+  // Set FixedArray length.
+  __ sll(t2, t1, kSmiTagSize);
+  __ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset));
+  // Fill contents of fixed-array with the-hole.
+  __ li(a2, Operand(masm->isolate()->factory()->the_hole_value()));
+  __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  // Fill fixed array elements with hole.
+  // v0: JSArray, tagged.
+  // a2: the hole.
+  // a3: Start of elements in FixedArray.
+  // t1: Number of elements to fill.
+  Label loop;
+  __ sll(t1, t1, kPointerSizeLog2);  // Convert num elements to num bytes.
+  __ addu(t1, t1, a3);  // Point past last element to store.
+  __ bind(&loop);
+  __ Branch(&done, ge, a3, Operand(t1));  // Break when a3 past end of elem.
+  __ sw(a2, MemOperand(a3));
+  __ Branch(&loop, USE_DELAY_SLOT);
+  __ addiu(a3, a3, kPointerSize);  // In branch delay slot.
+
+  __ bind(&done);
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  __ bind(&slowcase);
+  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
 }
 
 
 void CallFunctionStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Label slow;
+
+  // The receiver might implicitly be the global object. This is
+  // indicated by passing the hole as the receiver to the call
+  // function stub.
+  if (ReceiverMightBeImplicit()) {
+    Label call;
+    // Get the receiver from the stack.
+    // function, receiver [, arguments]
+    __ lw(t0, MemOperand(sp, argc_ * kPointerSize));
+    // Call as function is indicated with the hole.
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+    __ Branch(&call, ne, t0, Operand(at));
+    // Patch the receiver on the stack with the global receiver object.
+    __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+    __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
+    __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
+    __ bind(&call);
+  }
+
+  // Get the function to call from the stack.
+  // function, receiver [, arguments]
+  __ lw(a1, MemOperand(sp, (argc_ + 1) * kPointerSize));
+
+  // Check that the function is really a JavaScript function.
+  // a1: pushed function (to be verified)
+  __ JumpIfSmi(a1, &slow);
+  // Get the map of the function object.
+  __ GetObjectType(a1, a2, a2);
+  __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+  // Fast-case: Invoke the function now.
+  // a1: pushed function
+  ParameterCount actual(argc_);
+
+  if (ReceiverMightBeImplicit()) {
+    Label call_as_function;
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+    __ Branch(&call_as_function, eq, t0, Operand(at));
+    __ InvokeFunction(a1, actual, JUMP_FUNCTION);
+    __ bind(&call_as_function);
+  }
+  __ InvokeFunction(a1,
+                    actual,
+                    JUMP_FUNCTION,
+                    NullCallWrapper(),
+                    CALL_AS_FUNCTION);
+
+  // Slow-case: Non-function called.
+  __ bind(&slow);
+  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+  // of the original receiver from the call site).
+  __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
+  __ li(a0, Operand(argc_));  // Setup the number of arguments.
+  __ mov(a2, zero_reg);
+  __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
+  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+          RelocInfo::CODE_TARGET);
 }
 
 
 // Unfortunately you have to run without snapshots to see most of these
 // names in the profile since most compare stubs end up in the snapshot.
 const char* CompareStub::GetName() {
-  UNIMPLEMENTED_MIPS();
+  ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
+         (lhs_.is(a1) && rhs_.is(a0)));
+
+  if (name_ != NULL) return name_;
+  const int kMaxNameLength = 100;
+  name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+      kMaxNameLength);
+  if (name_ == NULL) return "OOM";
+
+  const char* cc_name;
+  switch (cc_) {
+    case lt: cc_name = "LT"; break;
+    case gt: cc_name = "GT"; break;
+    case le: cc_name = "LE"; break;
+    case ge: cc_name = "GE"; break;
+    case eq: cc_name = "EQ"; break;
+    case ne: cc_name = "NE"; break;
+    default: cc_name = "UnknownCondition"; break;
+  }
+
+  const char* lhs_name = lhs_.is(a0) ? "_a0" : "_a1";
+  const char* rhs_name = rhs_.is(a0) ? "_a0" : "_a1";
+
+  const char* strict_name = "";
+  if (strict_ && (cc_ == eq || cc_ == ne)) {
+    strict_name = "_STRICT";
+  }
+
+  const char* never_nan_nan_name = "";
+  if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
+    never_nan_nan_name = "_NO_NAN";
+  }
+
+  const char* include_number_compare_name = "";
+  if (!include_number_compare_) {
+    include_number_compare_name = "_NO_NUMBER";
+  }
+
+  const char* include_smi_compare_name = "";
+  if (!include_smi_compare_) {
+    include_smi_compare_name = "_NO_SMI";
+  }
+
+  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+               "CompareStub_%s%s%s%s%s%s",
+               cc_name,
+               lhs_name,
+               rhs_name,
+               strict_name,
+               never_nan_nan_name,
+               include_number_compare_name,
+               include_smi_compare_name);
   return name_;
 }
 
 
 int CompareStub::MinorKey() {
-  UNIMPLEMENTED_MIPS();
-  return 0;
+  // Encode the two parameters in a unique 16 bit value.
+  ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
+  ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
+         (lhs_.is(a1) && rhs_.is(a0)));
+  return ConditionField::encode(static_cast<unsigned>(cc_))
+         | RegisterField::encode(lhs_.is(a0))
+         | StrictField::encode(strict_)
+         | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
+         | IncludeSmiCompareField::encode(include_smi_compare_);
 }
 
 
-// StringCharCodeAtGenerator
-
+// StringCharCodeAtGenerator.
 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Label flat_string;
+  Label ascii_string;
+  Label got_char_code;
+
+  ASSERT(!t0.is(scratch_));
+  ASSERT(!t0.is(index_));
+  ASSERT(!t0.is(result_));
+  ASSERT(!t0.is(object_));
+
+  // If the receiver is a smi trigger the non-string case.
+  __ JumpIfSmi(object_, receiver_not_string_);
+
+  // Fetch the instance type of the receiver into result register.
+  __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+  // If the receiver is not a string trigger the non-string case.
+  __ And(t0, result_, Operand(kIsNotStringMask));
+  __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
+
+  // If the index is non-smi trigger the non-smi case.
+  __ JumpIfNotSmi(index_, &index_not_smi_);
+
+  // Put smi-tagged index into scratch register.
+  __ mov(scratch_, index_);
+  __ bind(&got_smi_index_);
+
+  // Check for index out of range.
+  __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
+  __ Branch(index_out_of_range_, ls, t0, Operand(scratch_));
+
+  // We need special handling for non-flat strings.
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ And(t0, result_, Operand(kStringRepresentationMask));
+  __ Branch(&flat_string, eq, t0, Operand(zero_reg));
+
+  // Handle non-flat strings.
+  __ And(t0, result_, Operand(kIsConsStringMask));
+  __ Branch(&call_runtime_, eq, t0, Operand(zero_reg));
+
+  // ConsString.
+  // Check whether the right hand side is the empty string (i.e. if
+  // this is really a flat string in a cons string). If that is not
+  // the case we would rather go to the runtime system now to flatten
+  // the string.
+  __ lw(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
+  __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
+  __ Branch(&call_runtime_, ne, result_, Operand(t0));
+
+  // Get the first of the two strings and load its instance type.
+  __ lw(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
+  __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+  // If the first cons component is also non-flat, then go to runtime.
+  STATIC_ASSERT(kSeqStringTag == 0);
+
+  __ And(t0, result_, Operand(kStringRepresentationMask));
+  __ Branch(&call_runtime_, ne, t0, Operand(zero_reg));
+
+  // Check for 1-byte or 2-byte string.
+  __ bind(&flat_string);
+  STATIC_ASSERT(kAsciiStringTag != 0);
+  __ And(t0, result_, Operand(kStringEncodingMask));
+  __ Branch(&ascii_string, ne, t0, Operand(zero_reg));
+
+  // 2-byte string.
+  // Load the 2-byte character code into the result register. We can
+  // add without shifting since the smi tag size is the log2 of the
+  // number of bytes in a two-byte character.
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
+  __ Addu(scratch_, object_, Operand(scratch_));
+  __ lhu(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
+  __ Branch(&got_char_code);
+
+  // ASCII string.
+  // Load the byte into the result register.
+  __ bind(&ascii_string);
+
+  __ srl(t0, scratch_, kSmiTagSize);
+  __ Addu(scratch_, object_, t0);
+
+  __ lbu(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
+
+  __ bind(&got_char_code);
+  __ sll(result_, result_, kSmiTagSize);
+  __ bind(&exit_);
 }
 
 
 void StringCharCodeAtGenerator::GenerateSlow(
     MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
-  UNIMPLEMENTED_MIPS();
+  __ Abort("Unexpected fallthrough to CharCodeAt slow case");
+
+  // Index is not a smi.
+  __ bind(&index_not_smi_);
+  // If index is a heap number, try converting it to an integer.
+  __ CheckMap(index_,
+              scratch_,
+              Heap::kHeapNumberMapRootIndex,
+              index_not_number_,
+              DONT_DO_SMI_CHECK);
+  call_helper.BeforeCall(masm);
+  // Consumed by runtime conversion function:
+  __ Push(object_, index_, index_);
+  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+  } else {
+    ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+    // NumberToSmi discards numbers that are not exact integers.
+    __ CallRuntime(Runtime::kNumberToSmi, 1);
+  }
+
+  // Save the conversion result before the pop instructions below
+  // have a chance to overwrite it.
+
+  __ Move(scratch_, v0);
+
+  __ pop(index_);
+  __ pop(object_);
+  // Reload the instance type.
+  __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+  call_helper.AfterCall(masm);
+  // If index is still not a smi, it must be out of range.
+  __ JumpIfNotSmi(scratch_, index_out_of_range_);
+  // Otherwise, return to the fast path.
+  __ Branch(&got_smi_index_);
+
+  // Call runtime. We get here when the receiver is a string and the
+  // index is a number, but the code of getting the actual character
+  // is too complex (e.g., when the string needs to be flattened).
+  __ bind(&call_runtime_);
+  call_helper.BeforeCall(masm);
+  __ Push(object_, index_);
+  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+
+  __ Move(result_, v0);
+
+  call_helper.AfterCall(masm);
+  __ jmp(&exit_);
+
+  __ Abort("Unexpected fallthrough from CharCodeAt slow case");
 }
 
 
@@ -532,13 +4936,46 @@
 // StringCharFromCodeGenerator
 
 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Fast case of Heap::LookupSingleCharacterStringFromCode.
+
+  ASSERT(!t0.is(result_));
+  ASSERT(!t0.is(code_));
+
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiShiftSize == 0);
+  ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+  __ And(t0,
+         code_,
+         Operand(kSmiTagMask |
+                 ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+  __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
+
+  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
+  // At this point code register contains smi tagged ASCII char code.
+  STATIC_ASSERT(kSmiTag == 0);
+  __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(result_, result_, t0);
+  __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
+  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+  __ Branch(&slow_case_, eq, result_, Operand(t0));
+  __ bind(&exit_);
 }
 
 
 void StringCharFromCodeGenerator::GenerateSlow(
     MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
-  UNIMPLEMENTED_MIPS();
+  __ Abort("Unexpected fallthrough to CharFromCode slow case");
+
+  __ bind(&slow_case_);
+  call_helper.BeforeCall(masm);
+  __ push(code_);
+  __ CallRuntime(Runtime::kCharFromCode, 1);
+  __ Move(result_, v0);
+
+  call_helper.AfterCall(masm);
+  __ Branch(&exit_);
+
+  __ Abort("Unexpected fallthrough from CharFromCode slow case");
 }
 
 
@@ -546,13 +4983,15 @@
 // StringCharAtGenerator
 
 void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  char_code_at_generator_.GenerateFast(masm);
+  char_from_code_generator_.GenerateFast(masm);
 }
 
 
 void StringCharAtGenerator::GenerateSlow(
     MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
-  UNIMPLEMENTED_MIPS();
+  char_code_at_generator_.GenerateSlow(masm, call_helper);
+  char_from_code_generator_.GenerateSlow(masm, call_helper);
 }
 
 
@@ -626,7 +5065,24 @@
                                           Register count,
                                           Register scratch,
                                           bool ascii) {
-  UNIMPLEMENTED_MIPS();
+  Label loop;
+  Label done;
+  // This loop just copies one character at a time, as it is only used for
+  // very short strings.
+  if (!ascii) {
+    __ addu(count, count, count);
+  }
+  __ Branch(&done, eq, count, Operand(zero_reg));
+  __ addu(count, dest, count);  // Count now points to the last dest byte.
+
+  __ bind(&loop);
+  __ lbu(scratch, MemOperand(src));
+  __ addiu(src, src, 1);
+  __ sb(scratch, MemOperand(dest));
+  __ addiu(dest, dest, 1);
+  __ Branch(&loop, lt, dest, Operand(count));
+
+  __ bind(&done);
 }
 
 
@@ -646,7 +5102,105 @@
                                               Register scratch4,
                                               Register scratch5,
                                               int flags) {
-  UNIMPLEMENTED_MIPS();
+  bool ascii = (flags & COPY_ASCII) != 0;
+  bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
+
+  if (dest_always_aligned && FLAG_debug_code) {
+    // Check that destination is actually word aligned if the flag says
+    // that it is.
+    __ And(scratch4, dest, Operand(kPointerAlignmentMask));
+    __ Check(eq,
+             "Destination of copy not aligned.",
+             scratch4,
+             Operand(zero_reg));
+  }
+
+  const int kReadAlignment = 4;
+  const int kReadAlignmentMask = kReadAlignment - 1;
+  // Ensure that reading an entire aligned word containing the last character
+  // of a string will not read outside the allocated area (because we pad up
+  // to kObjectAlignment).
+  STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
+  // Assumes word reads and writes are little endian.
+  // Nothing to do for zero characters.
+  Label done;
+
+  if (!ascii) {
+    __ addu(count, count, count);
+  }
+  __ Branch(&done, eq, count, Operand(zero_reg));
+
+  Label byte_loop;
+  // Must copy at least eight bytes, otherwise just do it one byte at a time.
+  __ Subu(scratch1, count, Operand(8));
+  __ Addu(count, dest, Operand(count));
+  Register limit = count;  // Read until src equals this.
+  __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg));
+
+  if (!dest_always_aligned) {
+    // Align dest by byte copying. Copies between zero and three bytes.
+    __ And(scratch4, dest, Operand(kReadAlignmentMask));
+    Label dest_aligned;
+    __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg));
+    Label aligned_loop;
+    __ bind(&aligned_loop);
+    __ lbu(scratch1, MemOperand(src));
+    __ addiu(src, src, 1);
+    __ sb(scratch1, MemOperand(dest));
+    __ addiu(dest, dest, 1);
+    __ addiu(scratch4, scratch4, 1);
+    __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask));
+    __ bind(&dest_aligned);
+  }
+
+  Label simple_loop;
+
+  __ And(scratch4, src, Operand(kReadAlignmentMask));
+  __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
+
+  // Loop for src/dst that are not aligned the same way.
+  // This loop uses lwl and lwr instructions. These instructions
+  // depend on the endianness, and the implementation assumes little-endian.
+  {
+    Label loop;
+    __ bind(&loop);
+    __ lwr(scratch1, MemOperand(src));
+    __ Addu(src, src, Operand(kReadAlignment));
+    __ lwl(scratch1, MemOperand(src, -1));
+    __ sw(scratch1, MemOperand(dest));
+    __ Addu(dest, dest, Operand(kReadAlignment));
+    __ Subu(scratch2, limit, dest);
+    __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
+  }
+
+  __ Branch(&byte_loop);
+
+  // Simple loop.
+  // Copy words from src to dest, until less than four bytes left.
+  // Both src and dest are word aligned.
+  __ bind(&simple_loop);
+  {
+    Label loop;
+    __ bind(&loop);
+    __ lw(scratch1, MemOperand(src));
+    __ Addu(src, src, Operand(kReadAlignment));
+    __ sw(scratch1, MemOperand(dest));
+    __ Addu(dest, dest, Operand(kReadAlignment));
+    __ Subu(scratch2, limit, dest);
+    __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
+  }
+
+  // Copy bytes from src to dest until dest hits limit.
+  __ bind(&byte_loop);
+  // Test if dest has already reached the limit.
+  __ Branch(&done, ge, dest, Operand(limit));
+  __ lbu(scratch1, MemOperand(src));
+  __ addiu(src, src, 1);
+  __ sb(scratch1, MemOperand(dest));
+  __ addiu(dest, dest, 1);
+  __ Branch(&byte_loop);
+
+  __ bind(&done);
 }
 
 
@@ -659,88 +5213,1439 @@
                                                         Register scratch4,
                                                         Register scratch5,
                                                         Label* not_found) {
-  UNIMPLEMENTED_MIPS();
+  // Register scratch3 is the general scratch register in this function.
+  Register scratch = scratch3;
+
+  // Make sure that both characters are not digits as such strings has a
+  // different hash algorithm. Don't try to look for these in the symbol table.
+  Label not_array_index;
+  __ Subu(scratch, c1, Operand(static_cast<int>('0')));
+  __ Branch(&not_array_index,
+            Ugreater,
+            scratch,
+            Operand(static_cast<int>('9' - '0')));
+  __ Subu(scratch, c2, Operand(static_cast<int>('0')));
+
+  // If check failed combine both characters into single halfword.
+  // This is required by the contract of the method: code at the
+  // not_found branch expects this combination in c1 register.
+  Label tmp;
+  __ sll(scratch1, c2, kBitsPerByte);
+  __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
+  __ Or(c1, c1, scratch1);
+  __ bind(&tmp);
+  __ Branch(not_found,
+            Uless_equal,
+            scratch,
+            Operand(static_cast<int>('9' - '0')));
+
+  __ bind(&not_array_index);
+  // Calculate the two character string hash.
+  Register hash = scratch1;
+  StringHelper::GenerateHashInit(masm, hash, c1);
+  StringHelper::GenerateHashAddCharacter(masm, hash, c2);
+  StringHelper::GenerateHashGetHash(masm, hash);
+
+  // Collect the two characters in a register.
+  Register chars = c1;
+  __ sll(scratch, c2, kBitsPerByte);
+  __ Or(chars, chars, scratch);
+
+  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+  // hash:  hash of two character string.
+
+  // Load symbol table.
+  // Load address of first element of the symbol table.
+  Register symbol_table = c2;
+  __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
+
+  Register undefined = scratch4;
+  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+  // Calculate capacity mask from the symbol table capacity.
+  Register mask = scratch2;
+  __ lw(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
+  __ sra(mask, mask, 1);
+  __ Addu(mask, mask, -1);
+
+  // Calculate untagged address of the first element of the symbol table.
+  Register first_symbol_table_element = symbol_table;
+  __ Addu(first_symbol_table_element, symbol_table,
+         Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
+
+  // Registers.
+  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+  // hash:  hash of two character string
+  // mask:  capacity mask
+  // first_symbol_table_element: address of the first element of
+  //                             the symbol table
+  // undefined: the undefined object
+  // scratch: -
+
+  // Perform a number of probes in the symbol table.
+  static const int kProbes = 4;
+  Label found_in_symbol_table;
+  Label next_probe[kProbes];
+  Register candidate = scratch5;  // Scratch register contains candidate.
+  for (int i = 0; i < kProbes; i++) {
+    // Calculate entry in symbol table.
+    if (i > 0) {
+      __ Addu(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
+    } else {
+      __ mov(candidate, hash);
+    }
+
+    __ And(candidate, candidate, Operand(mask));
+
+    // Load the entry from the symble table.
+    STATIC_ASSERT(SymbolTable::kEntrySize == 1);
+    __ sll(scratch, candidate, kPointerSizeLog2);
+    __ Addu(scratch, scratch, first_symbol_table_element);
+    __ lw(candidate, MemOperand(scratch));
+
+    // If entry is undefined no string with this hash can be found.
+    Label is_string;
+    __ GetObjectType(candidate, scratch, scratch);
+    __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
+
+    __ Branch(not_found, eq, undefined, Operand(candidate));
+    // Must be null (deleted entry).
+    if (FLAG_debug_code) {
+      __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+      __ Assert(eq, "oddball in symbol table is not undefined or null",
+          scratch, Operand(candidate));
+    }
+    __ jmp(&next_probe[i]);
+
+    __ bind(&is_string);
+
+    // Check that the candidate is a non-external ASCII string.  The instance
+    // type is still in the scratch register from the CompareObjectType
+    // operation.
+    __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
+
+    // If length is not 2 the string is not a candidate.
+    __ lw(scratch, FieldMemOperand(candidate, String::kLengthOffset));
+    __ Branch(&next_probe[i], ne, scratch, Operand(Smi::FromInt(2)));
+
+    // Check if the two characters match.
+    // Assumes that word load is little endian.
+    __ lhu(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
+    __ Branch(&found_in_symbol_table, eq, chars, Operand(scratch));
+    __ bind(&next_probe[i]);
+  }
+
+  // No matching 2 character string found by probing.
+  __ jmp(not_found);
+
+  // Scratch register contains result when we fall through to here.
+  Register result = candidate;
+  __ bind(&found_in_symbol_table);
+  __ mov(v0, result);
 }
 
 
 void StringHelper::GenerateHashInit(MacroAssembler* masm,
                                       Register hash,
                                       Register character) {
-  UNIMPLEMENTED_MIPS();
+  // hash = character + (character << 10);
+  __ sll(hash, character, 10);
+  __ addu(hash, hash, character);
+  // hash ^= hash >> 6;
+  __ sra(at, hash, 6);
+  __ xor_(hash, hash, at);
 }
 
 
 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
                                               Register hash,
                                               Register character) {
-  UNIMPLEMENTED_MIPS();
+  // hash += character;
+  __ addu(hash, hash, character);
+  // hash += hash << 10;
+  __ sll(at, hash, 10);
+  __ addu(hash, hash, at);
+  // hash ^= hash >> 6;
+  __ sra(at, hash, 6);
+  __ xor_(hash, hash, at);
 }
 
 
 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
                                          Register hash) {
-  UNIMPLEMENTED_MIPS();
+  // hash += hash << 3;
+  __ sll(at, hash, 3);
+  __ addu(hash, hash, at);
+  // hash ^= hash >> 11;
+  __ sra(at, hash, 11);
+  __ xor_(hash, hash, at);
+  // hash += hash << 15;
+  __ sll(at, hash, 15);
+  __ addu(hash, hash, at);
+
+  // if (hash == 0) hash = 27;
+  __ ori(at, zero_reg, 27);
+  __ movz(hash, at, hash);
 }
 
 
 void SubStringStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Label sub_string_runtime;
+  // Stack frame on entry.
+  //  ra: return address
+  //  sp[0]: to
+  //  sp[4]: from
+  //  sp[8]: string
+
+  // This stub is called from the native-call %_SubString(...), so
+  // nothing can be assumed about the arguments. It is tested that:
+  //  "string" is a sequential string,
+  //  both "from" and "to" are smis, and
+  //  0 <= from <= to <= string.length.
+  // If any of these assumptions fail, we call the runtime system.
+
+  static const int kToOffset = 0 * kPointerSize;
+  static const int kFromOffset = 1 * kPointerSize;
+  static const int kStringOffset = 2 * kPointerSize;
+
+  Register to = t2;
+  Register from = t3;
+
+  // Check bounds and smi-ness.
+  __ lw(to, MemOperand(sp, kToOffset));
+  __ lw(from, MemOperand(sp, kFromOffset));
+  STATIC_ASSERT(kFromOffset == kToOffset + 4);
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+
+  __ JumpIfNotSmi(from, &sub_string_runtime);
+  __ JumpIfNotSmi(to, &sub_string_runtime);
+
+  __ sra(a3, from, kSmiTagSize);  // Remove smi tag.
+  __ sra(t5, to, kSmiTagSize);  // Remove smi tag.
+
+  // a3: from index (untagged smi)
+  // t5: to index (untagged smi)
+
+  __ Branch(&sub_string_runtime, lt, a3, Operand(zero_reg));  // From < 0.
+
+  __ subu(a2, t5, a3);
+  __ Branch(&sub_string_runtime, gt, a3, Operand(t5));  // Fail if from > to.
+
+  // Special handling of sub-strings of length 1 and 2. One character strings
+  // are handled in the runtime system (looked up in the single character
+  // cache). Two character strings are looked for in the symbol cache.
+  __ Branch(&sub_string_runtime, lt, a2, Operand(2));
+
+  // Both to and from are smis.
+
+  // a2: result string length
+  // a3: from index (untagged smi)
+  // t2: (a.k.a. to): to (smi)
+  // t3: (a.k.a. from): from offset (smi)
+  // t5: to index (untagged smi)
+
+  // Make sure first argument is a sequential (or flat) string.
+  __ lw(t1, MemOperand(sp, kStringOffset));
+  __ Branch(&sub_string_runtime, eq, t1, Operand(kSmiTagMask));
+
+  __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
+  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
+  __ And(t4, a1, Operand(kIsNotStringMask));
+
+  __ Branch(&sub_string_runtime, ne, t4, Operand(zero_reg));
+
+  // a1: instance type
+  // a2: result string length
+  // a3: from index (untagged smi)
+  // t1: string
+  // t2: (a.k.a. to): to (smi)
+  // t3: (a.k.a. from): from offset (smi)
+  // t5: to index (untagged smi)
+
+  Label seq_string;
+  __ And(t0, a1, Operand(kStringRepresentationMask));
+  STATIC_ASSERT(kSeqStringTag < kConsStringTag);
+  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+
+  // External strings go to runtime.
+  __ Branch(&sub_string_runtime, gt, t0, Operand(kConsStringTag));
+
+  // Sequential strings are handled directly.
+  __ Branch(&seq_string, lt, t0, Operand(kConsStringTag));
+
+  // Cons string. Try to recurse (once) on the first substring.
+  // (This adds a little more generality than necessary to handle flattened
+  // cons strings, but not much).
+  __ lw(t1, FieldMemOperand(t1, ConsString::kFirstOffset));
+  __ lw(t0, FieldMemOperand(t1, HeapObject::kMapOffset));
+  __ lbu(a1, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kSeqStringTag == 0);
+  // Cons and External strings go to runtime.
+  __ Branch(&sub_string_runtime, ne, a1, Operand(kStringRepresentationMask));
+
+  // Definitly a sequential string.
+  __ bind(&seq_string);
+
+  // a1: instance type
+  // a2: result string length
+  // a3: from index (untagged smi)
+  // t1: string
+  // t2: (a.k.a. to): to (smi)
+  // t3: (a.k.a. from): from offset (smi)
+  // t5: to index (untagged smi)
+
+  __ lw(t0, FieldMemOperand(t1, String::kLengthOffset));
+  __ Branch(&sub_string_runtime, lt, t0, Operand(to));  // Fail if to > length.
+  to = no_reg;
+
+  // a1: instance type
+  // a2: result string length
+  // a3: from index (untagged smi)
+  // t1: string
+  // t3: (a.k.a. from): from offset (smi)
+  // t5: to index (untagged smi)
+
+  // Check for flat ASCII string.
+  Label non_ascii_flat;
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+
+  __ And(t4, a1, Operand(kStringEncodingMask));
+  __ Branch(&non_ascii_flat, eq, t4, Operand(zero_reg));
+
+  Label result_longer_than_two;
+  __ Branch(&result_longer_than_two, gt, a2, Operand(2));
+
+  // Sub string of length 2 requested.
+  // Get the two characters forming the sub string.
+  __ Addu(t1, t1, Operand(a3));
+  __ lbu(a3, FieldMemOperand(t1, SeqAsciiString::kHeaderSize));
+  __ lbu(t0, FieldMemOperand(t1, SeqAsciiString::kHeaderSize + 1));
+
+  // Try to lookup two character string in symbol table.
+  Label make_two_character_string;
+  StringHelper::GenerateTwoCharacterSymbolTableProbe(
+      masm, a3, t0, a1, t1, t2, t3, t4, &make_two_character_string);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+
+  // a2: result string length.
+  // a3: two characters combined into halfword in little endian byte order.
+  __ bind(&make_two_character_string);
+  __ AllocateAsciiString(v0, a2, t0, t1, t4, &sub_string_runtime);
+  __ sh(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
+  __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  __ bind(&result_longer_than_two);
+
+  // Allocate the result.
+  __ AllocateAsciiString(v0, a2, t4, t0, a1, &sub_string_runtime);
+
+  // v0: result string.
+  // a2: result string length.
+  // a3: from index (untagged smi)
+  // t1: string.
+  // t3: (a.k.a. from): from offset (smi)
+  // Locate first character of result.
+  __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // Locate 'from' character of string.
+  __ Addu(t1, t1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ Addu(t1, t1, Operand(a3));
+
+  // v0: result string.
+  // a1: first character of result string.
+  // a2: result string length.
+  // t1: first character of sub string to copy.
+  STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+  StringHelper::GenerateCopyCharactersLong(
+      masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
+  __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  __ bind(&non_ascii_flat);
+  // a2: result string length.
+  // t1: string.
+  // t3: (a.k.a. from): from offset (smi)
+  // Check for flat two byte string.
+
+  // Allocate the result.
+  __ AllocateTwoByteString(v0, a2, a1, a3, t0, &sub_string_runtime);
+
+  // v0: result string.
+  // a2: result string length.
+  // t1: string.
+  // Locate first character of result.
+  __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // Locate 'from' character of string.
+  __ Addu(t1, t1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // As "from" is a smi it is 2 times the value which matches the size of a two
+  // byte character.
+  __ Addu(t1, t1, Operand(from));
+  from = no_reg;
+
+  // v0: result string.
+  // a1: first character of result.
+  // a2: result length.
+  // t1: first character of string to copy.
+  STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+  StringHelper::GenerateCopyCharactersLong(
+      masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
+  __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  // Just jump to runtime to create the sub string.
+  __ bind(&sub_string_runtime);
+  __ TailCallRuntime(Runtime::kSubString, 3, 1);
+}
+
+
+void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+                                                      Register left,
+                                                      Register right,
+                                                      Register scratch1,
+                                                      Register scratch2,
+                                                      Register scratch3) {
+  Register length = scratch1;
+
+  // Compare lengths.
+  Label strings_not_equal, check_zero_length;
+  __ lw(length, FieldMemOperand(left, String::kLengthOffset));
+  __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
+  __ Branch(&check_zero_length, eq, length, Operand(scratch2));
+  __ bind(&strings_not_equal);
+  __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
+  __ Ret();
+
+  // Check if the length is zero.
+  Label compare_chars;
+  __ bind(&check_zero_length);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ Branch(&compare_chars, ne, length, Operand(zero_reg));
+  __ li(v0, Operand(Smi::FromInt(EQUAL)));
+  __ Ret();
+
+  // Compare characters.
+  __ bind(&compare_chars);
+
+  GenerateAsciiCharsCompareLoop(masm,
+                                left, right, length, scratch2, scratch3, v0,
+                                &strings_not_equal);
+
+  // Characters are equal.
+  __ li(v0, Operand(Smi::FromInt(EQUAL)));
+  __ Ret();
 }
 
 
 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
-                                                        Register right,
                                                         Register left,
+                                                        Register right,
                                                         Register scratch1,
                                                         Register scratch2,
                                                         Register scratch3,
                                                         Register scratch4) {
-  UNIMPLEMENTED_MIPS();
+  Label result_not_equal, compare_lengths;
+  // Find minimum length and length difference.
+  __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
+  __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
+  __ Subu(scratch3, scratch1, Operand(scratch2));
+  Register length_delta = scratch3;
+  __ slt(scratch4, scratch2, scratch1);
+  __ movn(scratch1, scratch2, scratch4);
+  Register min_length = scratch1;
+  STATIC_ASSERT(kSmiTag == 0);
+  __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
+
+  // Compare loop.
+  GenerateAsciiCharsCompareLoop(masm,
+                                left, right, min_length, scratch2, scratch4, v0,
+                                &result_not_equal);
+
+  // Compare lengths - strings up to min-length are equal.
+  __ bind(&compare_lengths);
+  ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+  // Use length_delta as result if it's zero.
+  __ mov(scratch2, length_delta);
+  __ mov(scratch4, zero_reg);
+  __ mov(v0, zero_reg);
+
+  __ bind(&result_not_equal);
+  // Conditionally update the result based either on length_delta or
+  // the last comparion performed in the loop above.
+  Label ret;
+  __ Branch(&ret, eq, scratch2, Operand(scratch4));
+  __ li(v0, Operand(Smi::FromInt(GREATER)));
+  __ Branch(&ret, gt, scratch2, Operand(scratch4));
+  __ li(v0, Operand(Smi::FromInt(LESS)));
+  __ bind(&ret);
+  __ Ret();
+}
+
+
+void StringCompareStub::GenerateAsciiCharsCompareLoop(
+    MacroAssembler* masm,
+    Register left,
+    Register right,
+    Register length,
+    Register scratch1,
+    Register scratch2,
+    Register scratch3,
+    Label* chars_not_equal) {
+  // Change index to run from -length to -1 by adding length to string
+  // start. This means that loop ends when index reaches zero, which
+  // doesn't need an additional compare.
+  __ SmiUntag(length);
+  __ Addu(scratch1, length,
+          Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ Addu(left, left, Operand(scratch1));
+  __ Addu(right, right, Operand(scratch1));
+  __ Subu(length, zero_reg, length);
+  Register index = length;  // index = -length;
+
+
+  // Compare loop.
+  Label loop;
+  __ bind(&loop);
+  __ Addu(scratch3, left, index);
+  __ lbu(scratch1, MemOperand(scratch3));
+  __ Addu(scratch3, right, index);
+  __ lbu(scratch2, MemOperand(scratch3));
+  __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
+  __ Addu(index, index, 1);
+  __ Branch(&loop, ne, index, Operand(zero_reg));
 }
 
 
 void StringCompareStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Label runtime;
+
+  Counters* counters = masm->isolate()->counters();
+
+  // Stack frame on entry.
+  //  sp[0]: right string
+  //  sp[4]: left string
+  __ lw(a1, MemOperand(sp, 1 * kPointerSize));  // Left.
+  __ lw(a0, MemOperand(sp, 0 * kPointerSize));  // Right.
+
+  Label not_same;
+  __ Branch(&not_same, ne, a0, Operand(a1));
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ li(v0, Operand(Smi::FromInt(EQUAL)));
+  __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  __ bind(&not_same);
+
+  // Check that both objects are sequential ASCII strings.
+  __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
+
+  // Compare flat ASCII strings natively. Remove arguments from stack first.
+  __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
+
+  __ bind(&runtime);
+  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
 }
 
 
 void StringAddStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  Label string_add_runtime, call_builtin;
+  Builtins::JavaScript builtin_id = Builtins::ADD;
+
+  Counters* counters = masm->isolate()->counters();
+
+  // Stack on entry:
+  // sp[0]: second argument (right).
+  // sp[4]: first argument (left).
+
+  // Load the two arguments.
+  __ lw(a0, MemOperand(sp, 1 * kPointerSize));  // First argument.
+  __ lw(a1, MemOperand(sp, 0 * kPointerSize));  // Second argument.
+
+  // Make sure that both arguments are strings if not known in advance.
+  if (flags_ == NO_STRING_ADD_FLAGS) {
+    __ JumpIfEitherSmi(a0, a1, &string_add_runtime);
+    // Load instance types.
+    __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
+    __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+    __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+    __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+    STATIC_ASSERT(kStringTag == 0);
+    // If either is not a string, go to runtime.
+    __ Or(t4, t0, Operand(t1));
+    __ And(t4, t4, Operand(kIsNotStringMask));
+    __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
+  } else {
+    // Here at least one of the arguments is definitely a string.
+    // We convert the one that is not known to be a string.
+    if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
+      ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
+      GenerateConvertArgument(
+          masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
+      builtin_id = Builtins::STRING_ADD_RIGHT;
+    } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
+      ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
+      GenerateConvertArgument(
+          masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
+      builtin_id = Builtins::STRING_ADD_LEFT;
+    }
+  }
+
+  // Both arguments are strings.
+  // a0: first string
+  // a1: second string
+  // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+  // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+  {
+    Label strings_not_empty;
+    // Check if either of the strings are empty. In that case return the other.
+    // These tests use zero-length check on string-length whch is an Smi.
+    // Assert that Smi::FromInt(0) is really 0.
+    STATIC_ASSERT(kSmiTag == 0);
+    ASSERT(Smi::FromInt(0) == 0);
+    __ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
+    __ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
+    __ mov(v0, a0);       // Assume we'll return first string (from a0).
+    __ movz(v0, a1, a2);  // If first is empty, return second (from a1).
+    __ slt(t4, zero_reg, a2);   // if (a2 > 0) t4 = 1.
+    __ slt(t5, zero_reg, a3);   // if (a3 > 0) t5 = 1.
+    __ and_(t4, t4, t5);        // Branch if both strings were non-empty.
+    __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
+
+    __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+    __ Addu(sp, sp, Operand(2 * kPointerSize));
+    __ Ret();
+
+    __ bind(&strings_not_empty);
+  }
+
+  // Untag both string-lengths.
+  __ sra(a2, a2, kSmiTagSize);
+  __ sra(a3, a3, kSmiTagSize);
+
+  // Both strings are non-empty.
+  // a0: first string
+  // a1: second string
+  // a2: length of first string
+  // a3: length of second string
+  // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+  // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+  // Look at the length of the result of adding the two strings.
+  Label string_add_flat_result, longer_than_two;
+  // Adding two lengths can't overflow.
+  STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
+  __ Addu(t2, a2, Operand(a3));
+  // Use the symbol table when adding two one character strings, as it
+  // helps later optimizations to return a symbol here.
+  __ Branch(&longer_than_two, ne, t2, Operand(2));
+
+  // Check that both strings are non-external ASCII strings.
+  if (flags_ != NO_STRING_ADD_FLAGS) {
+    __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
+    __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+    __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+    __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+  }
+  __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
+                                                 &string_add_runtime);
+
+  // Get the two characters forming the sub string.
+  __ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize));
+  __ lbu(a3, FieldMemOperand(a1, SeqAsciiString::kHeaderSize));
+
+  // Try to lookup two character string in symbol table. If it is not found
+  // just allocate a new one.
+  Label make_two_character_string;
+  StringHelper::GenerateTwoCharacterSymbolTableProbe(
+      masm, a2, a3, t2, t3, t0, t1, t4, &make_two_character_string);
+  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  __ bind(&make_two_character_string);
+  // Resulting string has length 2 and first chars of two strings
+  // are combined into single halfword in a2 register.
+  // So we can fill resulting string without two loops by a single
+  // halfword store instruction (which assumes that processor is
+  // in a little endian mode).
+  __ li(t2, Operand(2));
+  __ AllocateAsciiString(v0, t2, t0, t1, t4, &string_add_runtime);
+  __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
+  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  __ bind(&longer_than_two);
+  // Check if resulting string will be flat.
+  __ Branch(&string_add_flat_result, lt, t2,
+           Operand(String::kMinNonFlatLength));
+  // Handle exceptionally long strings in the runtime system.
+  STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
+  ASSERT(IsPowerOf2(String::kMaxLength + 1));
+  // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
+  __ Branch(&string_add_runtime, hs, t2, Operand(String::kMaxLength + 1));
+
+  // If result is not supposed to be flat, allocate a cons string object.
+  // If both strings are ASCII the result is an ASCII cons string.
+  if (flags_ != NO_STRING_ADD_FLAGS) {
+    __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
+    __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+    __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+    __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+  }
+  Label non_ascii, allocated, ascii_data;
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  // Branch to non_ascii if either string-encoding field is zero (non-ascii).
+  __ And(t4, t0, Operand(t1));
+  __ And(t4, t4, Operand(kStringEncodingMask));
+  __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
+
+  // Allocate an ASCII cons string.
+  __ bind(&ascii_data);
+  __ AllocateAsciiConsString(t3, t2, t0, t1, &string_add_runtime);
+  __ bind(&allocated);
+  // Fill the fields of the cons string.
+  __ sw(a0, FieldMemOperand(t3, ConsString::kFirstOffset));
+  __ sw(a1, FieldMemOperand(t3, ConsString::kSecondOffset));
+  __ mov(v0, t3);
+  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  __ bind(&non_ascii);
+  // At least one of the strings is two-byte. Check whether it happens
+  // to contain only ASCII characters.
+  // t0: first instance type.
+  // t1: second instance type.
+  // Branch to if _both_ instances have kAsciiDataHintMask set.
+  __ And(at, t0, Operand(kAsciiDataHintMask));
+  __ and_(at, at, t1);
+  __ Branch(&ascii_data, ne, at, Operand(zero_reg));
+
+  __ xor_(t0, t0, t1);
+  STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+  __ And(t0, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
+  __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
+
+  // Allocate a two byte cons string.
+  __ AllocateTwoByteConsString(t3, t2, t0, t1, &string_add_runtime);
+  __ Branch(&allocated);
+
+  // Handle creating a flat result. First check that both strings are
+  // sequential and that they have the same encoding.
+  // a0: first string
+  // a1: second string
+  // a2: length of first string
+  // a3: length of second string
+  // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+  // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
+  // t2: sum of lengths.
+  __ bind(&string_add_flat_result);
+  if (flags_ != NO_STRING_ADD_FLAGS) {
+    __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
+    __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+    __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+    __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+  }
+  // Check that both strings are sequential, meaning that we
+  // branch to runtime if either string tag is non-zero.
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ Or(t4, t0, Operand(t1));
+  __ And(t4, t4, Operand(kStringRepresentationMask));
+  __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
+
+  // Now check if both strings have the same encoding (ASCII/Two-byte).
+  // a0: first string
+  // a1: second string
+  // a2: length of first string
+  // a3: length of second string
+  // t0: first string instance type
+  // t1: second string instance type
+  // t2: sum of lengths.
+  Label non_ascii_string_add_flat_result;
+  ASSERT(IsPowerOf2(kStringEncodingMask));  // Just one bit to test.
+  __ xor_(t3, t1, t0);
+  __ And(t3, t3, Operand(kStringEncodingMask));
+  __ Branch(&string_add_runtime, ne, t3, Operand(zero_reg));
+  // And see if it's ASCII (0) or two-byte (1).
+  __ And(t3, t0, Operand(kStringEncodingMask));
+  __ Branch(&non_ascii_string_add_flat_result, eq, t3, Operand(zero_reg));
+
+  // Both strings are sequential ASCII strings. We also know that they are
+  // short (since the sum of the lengths is less than kMinNonFlatLength).
+  // t2: length of resulting flat string
+  __ AllocateAsciiString(t3, t2, t0, t1, t4, &string_add_runtime);
+  // Locate first character of result.
+  __ Addu(t2, t3, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // Locate first character of first argument.
+  __ Addu(a0, a0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // a0: first character of first string.
+  // a1: second string.
+  // a2: length of first string.
+  // a3: length of second string.
+  // t2: first character of result.
+  // t3: result string.
+  StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, true);
+
+  // Load second argument and locate first character.
+  __ Addu(a1, a1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // a1: first character of second string.
+  // a3: length of second string.
+  // t2: next character of result.
+  // t3: result string.
+  StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
+  __ mov(v0, t3);
+  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  __ bind(&non_ascii_string_add_flat_result);
+  // Both strings are sequential two byte strings.
+  // a0: first string.
+  // a1: second string.
+  // a2: length of first string.
+  // a3: length of second string.
+  // t2: sum of length of strings.
+  __ AllocateTwoByteString(t3, t2, t0, t1, t4, &string_add_runtime);
+  // a0: first string.
+  // a1: second string.
+  // a2: length of first string.
+  // a3: length of second string.
+  // t3: result string.
+
+  // Locate first character of result.
+  __ Addu(t2, t3, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // Locate first character of first argument.
+  __ Addu(a0, a0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+  // a0: first character of first string.
+  // a1: second string.
+  // a2: length of first string.
+  // a3: length of second string.
+  // t2: first character of result.
+  // t3: result string.
+  StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, false);
+
+  // Locate first character of second argument.
+  __ Addu(a1, a1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+  // a1: first character of second string.
+  // a3: length of second string.
+  // t2: next character of result (after copy of first string).
+  // t3: result string.
+  StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
+
+  __ mov(v0, t3);
+  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  // Just jump to runtime to add the two strings.
+  __ bind(&string_add_runtime);
+  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+
+  if (call_builtin.is_linked()) {
+    __ bind(&call_builtin);
+    __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
+  }
+}
+
+
+void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
+                                            int stack_offset,
+                                            Register arg,
+                                            Register scratch1,
+                                            Register scratch2,
+                                            Register scratch3,
+                                            Register scratch4,
+                                            Label* slow) {
+  // First check if the argument is already a string.
+  Label not_string, done;
+  __ JumpIfSmi(arg, &not_string);
+  __ GetObjectType(arg, scratch1, scratch1);
+  __ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
+
+  // Check the number to string cache.
+  Label not_cached;
+  __ bind(&not_string);
+  // Puts the cached result into scratch1.
+  NumberToStringStub::GenerateLookupNumberStringCache(masm,
+                                                      arg,
+                                                      scratch1,
+                                                      scratch2,
+                                                      scratch3,
+                                                      scratch4,
+                                                      false,
+                                                      &not_cached);
+  __ mov(arg, scratch1);
+  __ sw(arg, MemOperand(sp, stack_offset));
+  __ jmp(&done);
+
+  // Check if the argument is a safe string wrapper.
+  __ bind(&not_cached);
+  __ JumpIfSmi(arg, slow);
+  __ GetObjectType(arg, scratch1, scratch2);  // map -> scratch1.
+  __ Branch(slow, ne, scratch2, Operand(JS_VALUE_TYPE));
+  __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
+  __ li(scratch4, 1 << Map::kStringWrapperSafeForDefaultValueOf);
+  __ And(scratch2, scratch2, scratch4);
+  __ Branch(slow, ne, scratch2, Operand(scratch4));
+  __ lw(arg, FieldMemOperand(arg, JSValue::kValueOffset));
+  __ sw(arg, MemOperand(sp, stack_offset));
+
+  __ bind(&done);
 }
 
 
 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(state_ == CompareIC::SMIS);
+  Label miss;
+  __ Or(a2, a1, a0);
+  __ JumpIfNotSmi(a2, &miss);
+
+  if (GetCondition() == eq) {
+    // For equality we do not care about the sign of the result.
+    __ Subu(v0, a0, a1);
+  } else {
+    // Untag before subtracting to avoid handling overflow.
+    __ SmiUntag(a1);
+    __ SmiUntag(a0);
+    __ Subu(v0, a1, a0);
+  }
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
 }
 
 
 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+
+  Label generic_stub;
+  Label unordered;
+  Label miss;
+  __ And(a2, a1, Operand(a0));
+  __ JumpIfSmi(a2, &generic_stub);
+
+  __ GetObjectType(a0, a2, a2);
+  __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
+  __ GetObjectType(a1, a2, a2);
+  __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
+
+  // Inlining the double comparison and falling back to the general compare
+  // stub if NaN is involved or FPU is unsupported.
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+
+    // Load left and right operand.
+    __ Subu(a2, a1, Operand(kHeapObjectTag));
+    __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
+    __ Subu(a2, a0, Operand(kHeapObjectTag));
+    __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
+
+    Label fpu_eq, fpu_lt, fpu_gt;
+    // Compare operands (test if unordered).
+    __ c(UN, D, f0, f2);
+    // Don't base result on status bits when a NaN is involved.
+    __ bc1t(&unordered);
+    __ nop();
+
+    // Test if equal.
+    __ c(EQ, D, f0, f2);
+    __ bc1t(&fpu_eq);
+    __ nop();
+
+    // Test if unordered or less (unordered case is already handled).
+    __ c(ULT, D, f0, f2);
+    __ bc1t(&fpu_lt);
+    __ nop();
+
+    // Otherwise it's greater.
+    __ bc1f(&fpu_gt);
+    __ nop();
+
+    // Return a result of -1, 0, or 1.
+    __ bind(&fpu_eq);
+    __ li(v0, Operand(EQUAL));
+    __ Ret();
+
+    __ bind(&fpu_lt);
+    __ li(v0, Operand(LESS));
+    __ Ret();
+
+    __ bind(&fpu_gt);
+    __ li(v0, Operand(GREATER));
+    __ Ret();
+
+    __ bind(&unordered);
+  }
+
+  CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
+  __ bind(&generic_stub);
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::SYMBOLS);
+  Label miss;
+
+  // Registers containing left and right operands respectively.
+  Register left = a1;
+  Register right = a0;
+  Register tmp1 = a2;
+  Register tmp2 = a3;
+
+  // Check that both operands are heap objects.
+  __ JumpIfEitherSmi(left, right, &miss);
+
+  // Check that both operands are symbols.
+  __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+  __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kSymbolTag != 0);
+  __ And(tmp1, tmp1, Operand(tmp2));
+  __ And(tmp1, tmp1, kIsSymbolMask);
+  __ Branch(&miss, eq, tmp1, Operand(zero_reg));
+  // Make sure a0 is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  ASSERT(right.is(a0));
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ mov(v0, right);
+  // Symbols are compared by identity.
+  __ Ret(ne, left, Operand(right));
+  __ li(v0, Operand(Smi::FromInt(EQUAL)));
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::STRINGS);
+  Label miss;
+
+  // Registers containing left and right operands respectively.
+  Register left = a1;
+  Register right = a0;
+  Register tmp1 = a2;
+  Register tmp2 = a3;
+  Register tmp3 = t0;
+  Register tmp4 = t1;
+  Register tmp5 = t2;
+
+  // Check that both operands are heap objects.
+  __ JumpIfEitherSmi(left, right, &miss);
+
+  // Check that both operands are strings. This leaves the instance
+  // types loaded in tmp1 and tmp2.
+  __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+  __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kNotStringTag != 0);
+  __ Or(tmp3, tmp1, tmp2);
+  __ And(tmp5, tmp3, Operand(kIsNotStringMask));
+  __ Branch(&miss, ne, tmp5, Operand(zero_reg));
+
+  // Fast check for identical strings.
+  Label left_ne_right;
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ Branch(&left_ne_right, ne, left, Operand(right), USE_DELAY_SLOT);
+  __ mov(v0, zero_reg);  // In the delay slot.
+  __ Ret();
+  __ bind(&left_ne_right);
+
+  // Handle not identical strings.
+
+  // Check that both strings are symbols. If they are, we're done
+  // because we already know they are not identical.
+  ASSERT(GetCondition() == eq);
+  STATIC_ASSERT(kSymbolTag != 0);
+  __ And(tmp3, tmp1, Operand(tmp2));
+  __ And(tmp5, tmp3, Operand(kIsSymbolMask));
+  Label is_symbol;
+  __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg), USE_DELAY_SLOT);
+  __ mov(v0, a0);  // In the delay slot.
+  // Make sure a0 is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  ASSERT(right.is(a0));
+  __ Ret();
+  __ bind(&is_symbol);
+
+  // Check that both strings are sequential ASCII.
+  Label runtime;
+  __ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4,
+                                                  &runtime);
+
+  // Compare flat ASCII strings. Returns when done.
+  StringCompareStub::GenerateFlatAsciiStringEquals(
+      masm, left, right, tmp1, tmp2, tmp3);
+
+  // Handle more complex cases in runtime.
+  __ bind(&runtime);
+  __ Push(left, right);
+  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
 }
 
 
 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(state_ == CompareIC::OBJECTS);
+  Label miss;
+  __ And(a2, a1, Operand(a0));
+  __ JumpIfSmi(a2, &miss);
+
+  __ GetObjectType(a0, a2, a2);
+  __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
+  __ GetObjectType(a1, a2, a2);
+  __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
+
+  ASSERT(GetCondition() == eq);
+  __ Subu(v0, a0, Operand(a1));
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
 }
 
 
 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  __ Push(a1, a0);
+  __ push(ra);
+
+  // Call the runtime system in a fresh internal frame.
+  ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
+                                             masm->isolate());
+  __ EnterInternalFrame();
+  __ Push(a1, a0);
+  __ li(t0, Operand(Smi::FromInt(op_)));
+  __ push(t0);
+  __ CallExternalReference(miss, 3);
+  __ LeaveInternalFrame();
+  // Compute the entry point of the rewritten stub.
+  __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+  // Restore registers.
+  __ pop(ra);
+  __ pop(a0);
+  __ pop(a1);
+  __ Jump(a2);
+}
+
+void DirectCEntryStub::Generate(MacroAssembler* masm) {
+  // No need to pop or drop anything, LeaveExitFrame will restore the old
+  // stack, thus dropping the allocated space for the return value.
+  // The saved ra is after the reserved stack space for the 4 args.
+  __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
+
+  if (FLAG_debug_code && EnableSlowAsserts()) {
+    // In case of an error the return address may point to a memory area
+    // filled with kZapValue by the GC.
+    // Dereference the address and check for this.
+    __ lw(t0, MemOperand(t9));
+    __ Assert(ne, "Received invalid return address.", t0,
+        Operand(reinterpret_cast<uint32_t>(kZapValue)));
+  }
+  __ Jump(t9);
 }
 
 
-void GenerateFastPixelArrayLoad(MacroAssembler* masm,
-                                Register receiver,
-                                Register key,
-                                Register elements_map,
-                                Register elements,
-                                Register scratch1,
-                                Register scratch2,
-                                Register result,
-                                Label* not_pixel_array,
-                                Label* key_not_smi,
-                                Label* out_of_range) {
-  UNIMPLEMENTED_MIPS();
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
+                                    ExternalReference function) {
+  __ li(t9, Operand(function));
+  this->GenerateCall(masm, t9);
+}
+
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
+                                    Register target) {
+  __ Move(t9, target);
+  __ AssertStackIsAligned();
+  // Allocate space for arg slots.
+  __ Subu(sp, sp, kCArgsSlotsSize);
+
+  // Block the trampoline pool through the whole function to make sure the
+  // number of generated instructions is constant.
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
+
+  // We need to get the current 'pc' value, which is not available on MIPS.
+  Label find_ra;
+  masm->bal(&find_ra);  // ra = pc + 8.
+  masm->nop();  // Branch delay slot nop.
+  masm->bind(&find_ra);
+
+  const int kNumInstructionsToJump = 6;
+  masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize);
+  // Push return address (accessible to GC through exit frame pc).
+  // This spot for ra was reserved in EnterExitFrame.
+  masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
+  masm->li(ra, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
+                    RelocInfo::CODE_TARGET), true);
+  // Call the function.
+  masm->Jump(t9);
+  // Make sure the stored 'ra' points to this position.
+  ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra));
+}
+
+
+MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
+    MacroAssembler* masm,
+    Label* miss,
+    Label* done,
+    Register receiver,
+    Register properties,
+    String* name,
+    Register scratch0) {
+// If names of slots in range from 1 to kProbes - 1 for the hash value are
+  // not equal to the name and kProbes-th slot is not used (its name is the
+  // undefined value), it guarantees the hash table doesn't contain the
+  // property. It's true even if some slots represent deleted properties
+  // (their names are the null value).
+  for (int i = 0; i < kInlinedProbes; i++) {
+    // scratch0 points to properties hash.
+    // Compute the masked index: (hash + i + i * i) & mask.
+    Register index = scratch0;
+    // Capacity is smi 2^n.
+    __ lw(index, FieldMemOperand(properties, kCapacityOffset));
+    __ Subu(index, index, Operand(1));
+    __ And(index, index, Operand(
+         Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    // index *= 3.
+    __ mov(at, index);
+    __ sll(index, index, 1);
+    __ Addu(index, index, at);
+
+    Register entity_name = scratch0;
+    // Having undefined at this place means the name is not contained.
+    ASSERT_EQ(kSmiTagSize, 1);
+    Register tmp = properties;
+
+    __ sll(scratch0, index, 1);
+    __ Addu(tmp, properties, scratch0);
+    __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
+
+    ASSERT(!tmp.is(entity_name));
+    __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
+    __ Branch(done, eq, entity_name, Operand(tmp));
+
+    if (i != kInlinedProbes - 1) {
+      // Stop if found the property.
+      __ Branch(miss, eq, entity_name, Operand(Handle<String>(name)));
+
+      // Check if the entry name is not a symbol.
+      __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+      __ lbu(entity_name,
+             FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+      __ And(scratch0, entity_name, Operand(kIsSymbolMask));
+      __ Branch(miss, eq, scratch0, Operand(zero_reg));
+
+      // Restore the properties.
+      __ lw(properties,
+            FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+    }
+  }
+
+  const int spill_mask =
+      (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
+       a2.bit() | a1.bit() | a0.bit());
+
+  __ MultiPush(spill_mask);
+  __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  __ li(a1, Operand(Handle<String>(name)));
+  StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
+  MaybeObject* result = masm->TryCallStub(&stub);
+  if (result->IsFailure()) return result;
+  __ MultiPop(spill_mask);
+
+  __ Branch(done, eq, v0, Operand(zero_reg));
+  __ Branch(miss, ne, v0, Operand(zero_reg));
+  return result;
+}
+
+
+// Probe the string dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found. Jump to
+// the |miss| label otherwise.
+// If lookup was successful |scratch2| will be equal to elements + 4 * index.
+void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
+                                                        Label* miss,
+                                                        Label* done,
+                                                        Register elements,
+                                                        Register name,
+                                                        Register scratch1,
+                                                        Register scratch2) {
+  // Assert that name contains a string.
+  if (FLAG_debug_code) __ AbortIfNotString(name);
+
+  // Compute the capacity mask.
+  __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
+  __ sra(scratch1, scratch1, kSmiTagSize);  // convert smi to int
+  __ Subu(scratch1, scratch1, Operand(1));
+
+  // Generate an unrolled loop that performs a few probes before
+  // giving up. Measurements done on Gmail indicate that 2 probes
+  // cover ~93% of loads from dictionaries.
+  for (int i = 0; i < kInlinedProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    __ lw(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
+    if (i > 0) {
+      // Add the probe offset (i + i * i) left shifted to avoid right shifting
+      // the hash in a separate instruction. The value hash + i + i * i is right
+      // shifted in the following and instruction.
+      ASSERT(StringDictionary::GetProbeOffset(i) <
+             1 << (32 - String::kHashFieldOffset));
+      __ Addu(scratch2, scratch2, Operand(
+           StringDictionary::GetProbeOffset(i) << String::kHashShift));
+    }
+    __ srl(scratch2, scratch2, String::kHashShift);
+    __ And(scratch2, scratch1, scratch2);
+
+    // Scale the index by multiplying by the element size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    // scratch2 = scratch2 * 3.
+
+    __ mov(at, scratch2);
+    __ sll(scratch2, scratch2, 1);
+    __ Addu(scratch2, scratch2, at);
+
+    // Check if the key is identical to the name.
+    __ sll(at, scratch2, 2);
+    __ Addu(scratch2, elements, at);
+    __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
+    __ Branch(done, eq, name, Operand(at));
+  }
+
+  const int spill_mask =
+      (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
+       a3.bit() | a2.bit() | a1.bit() | a0.bit()) &
+      ~(scratch1.bit() | scratch2.bit());
+
+  __ MultiPush(spill_mask);
+  __ Move(a0, elements);
+  __ Move(a1, name);
+  StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
+  __ CallStub(&stub);
+  __ mov(scratch2, a2);
+  __ MultiPop(spill_mask);
+
+  __ Branch(done, ne, v0, Operand(zero_reg));
+  __ Branch(miss, eq, v0, Operand(zero_reg));
+}
+
+
+void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+  // Registers:
+  //  result: StringDictionary to probe
+  //  a1: key
+  //  : StringDictionary to probe.
+  //  index_: will hold an index of entry if lookup is successful.
+  //          might alias with result_.
+  // Returns:
+  //  result_ is zero if lookup failed, non zero otherwise.
+
+  Register result = v0;
+  Register dictionary = a0;
+  Register key = a1;
+  Register index = a2;
+  Register mask = a3;
+  Register hash = t0;
+  Register undefined = t1;
+  Register entry_key = t2;
+
+  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+  __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
+  __ sra(mask, mask, kSmiTagSize);
+  __ Subu(mask, mask, Operand(1));
+
+  __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
+
+  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    // Capacity is smi 2^n.
+    if (i > 0) {
+      // Add the probe offset (i + i * i) left shifted to avoid right shifting
+      // the hash in a separate instruction. The value hash + i + i * i is right
+      // shifted in the following and instruction.
+      ASSERT(StringDictionary::GetProbeOffset(i) <
+             1 << (32 - String::kHashFieldOffset));
+      __ Addu(index, hash, Operand(
+           StringDictionary::GetProbeOffset(i) << String::kHashShift));
+    } else {
+      __ mov(index, hash);
+    }
+    __ srl(index, index, String::kHashShift);
+    __ And(index, mask, index);
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    // index *= 3.
+    __ mov(at, index);
+    __ sll(index, index, 1);
+    __ Addu(index, index, at);
+
+
+    ASSERT_EQ(kSmiTagSize, 1);
+    __ sll(index, index, 2);
+    __ Addu(index, index, dictionary);
+    __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
+
+    // Having undefined at this place means the name is not contained.
+    __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
+
+    // Stop if found the property.
+    __ Branch(&in_dictionary, eq, entry_key, Operand(key));
+
+    if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+      // Check if the entry name is not a symbol.
+      __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
+      __ lbu(entry_key,
+             FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
+      __ And(result, entry_key, Operand(kIsSymbolMask));
+      __ Branch(&maybe_in_dictionary, eq, result, Operand(zero_reg));
+    }
+  }
+
+  __ bind(&maybe_in_dictionary);
+  // If we are doing negative lookup then probing failure should be
+  // treated as a lookup success. For positive lookup probing failure
+  // should be treated as lookup failure.
+  if (mode_ == POSITIVE_LOOKUP) {
+    __ mov(result, zero_reg);
+    __ Ret();
+  }
+
+  __ bind(&in_dictionary);
+  __ li(result, 1);
+  __ Ret();
+
+  __ bind(&not_in_dictionary);
+  __ mov(result, zero_reg);
+  __ Ret();
 }
 
 
@@ -749,4 +6654,3 @@
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_MIPS
-
diff --git a/src/mips/code-stubs-mips.h b/src/mips/code-stubs-mips.h
index 675730a..356aa97 100644
--- a/src/mips/code-stubs-mips.h
+++ b/src/mips/code-stubs-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -39,13 +39,22 @@
 // TranscendentalCache runtime function.
 class TranscendentalCacheStub: public CodeStub {
  public:
-  explicit TranscendentalCacheStub(TranscendentalCache::Type type)
-      : type_(type) {}
+  enum ArgumentType {
+    TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
+    UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
+  };
+
+  TranscendentalCacheStub(TranscendentalCache::Type type,
+                          ArgumentType argument_type)
+      : type_(type), argument_type_(argument_type) { }
   void Generate(MacroAssembler* masm);
  private:
   TranscendentalCache::Type type_;
+  ArgumentType argument_type_;
+  void GenerateCallCFunction(MacroAssembler* masm, Register scratch);
+
   Major MajorKey() { return TranscendentalCache; }
-  int MinorKey() { return type_; }
+  int MinorKey() { return type_ | argument_type_; }
   Runtime::FunctionId RuntimeFunction();
 };
 
@@ -63,176 +72,108 @@
 };
 
 
-class GenericBinaryOpStub : public CodeStub {
+class UnaryOpStub: public CodeStub {
  public:
-  static const int kUnknownIntValue = -1;
-
-  GenericBinaryOpStub(Token::Value op,
-                      OverwriteMode mode,
-                      Register lhs,
-                      Register rhs,
-                      int constant_rhs = kUnknownIntValue)
+  UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
       : op_(op),
         mode_(mode),
-        lhs_(lhs),
-        rhs_(rhs),
-        constant_rhs_(constant_rhs),
-        specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
-        runtime_operands_type_(BinaryOpIC::UNINIT_OR_SMI),
-        name_(NULL) { }
+        operand_type_(UnaryOpIC::UNINITIALIZED),
+        name_(NULL) {
+  }
 
-  GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
+  UnaryOpStub(
+      int key,
+      UnaryOpIC::TypeInfo operand_type)
       : op_(OpBits::decode(key)),
         mode_(ModeBits::decode(key)),
-        lhs_(LhsRegister(RegisterBits::decode(key))),
-        rhs_(RhsRegister(RegisterBits::decode(key))),
-        constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
-        specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
-        runtime_operands_type_(type_info),
-        name_(NULL) { }
+        operand_type_(operand_type),
+        name_(NULL) {
+  }
 
  private:
   Token::Value op_;
-  OverwriteMode mode_;
-  Register lhs_;
-  Register rhs_;
-  int constant_rhs_;
-  bool specialized_on_rhs_;
-  BinaryOpIC::TypeInfo runtime_operands_type_;
+  UnaryOverwriteMode mode_;
+
+  // Operand type information determined at runtime.
+  UnaryOpIC::TypeInfo operand_type_;
+
   char* name_;
 
-  static const int kMaxKnownRhs = 0x40000000;
-  static const int kKnownRhsKeyBits = 6;
-
-  // Minor key encoding in 16 bits.
-  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
-  class OpBits: public BitField<Token::Value, 2, 6> {};
-  class TypeInfoBits: public BitField<int, 8, 3> {};
-  class RegisterBits: public BitField<bool, 11, 1> {};
-  class KnownIntBits: public BitField<int, 12, kKnownRhsKeyBits> {};
-
-  Major MajorKey() { return GenericBinaryOp; }
-  int MinorKey() {
-    ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
-           (lhs_.is(a1) && rhs_.is(a0)));
-    // Encode the parameters in a unique 16 bit value.
-    return OpBits::encode(op_)
-           | ModeBits::encode(mode_)
-           | KnownIntBits::encode(MinorKeyForKnownInt())
-           | TypeInfoBits::encode(runtime_operands_type_)
-           | RegisterBits::encode(lhs_.is(a0));
-  }
-
-  void Generate(MacroAssembler* masm);
-  void HandleNonSmiBitwiseOp(MacroAssembler* masm,
-                             Register lhs,
-                             Register rhs);
-  void HandleBinaryOpSlowCases(MacroAssembler* masm,
-                               Label* not_smi,
-                               Register lhs,
-                               Register rhs,
-                               const Builtins::JavaScript& builtin);
-  void GenerateTypeTransition(MacroAssembler* masm);
-
-  static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
-    if (constant_rhs == kUnknownIntValue) return false;
-    if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
-    if (op == Token::MOD) {
-      if (constant_rhs <= 1) return false;
-      if (constant_rhs <= 10) return true;
-      if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
-      return false;
-    }
-    return false;
-  }
-
-  int MinorKeyForKnownInt() {
-    if (!specialized_on_rhs_) return 0;
-    if (constant_rhs_ <= 10) return constant_rhs_ + 1;
-    ASSERT(IsPowerOf2(constant_rhs_));
-    int key = 12;
-    int d = constant_rhs_;
-    while ((d & 1) == 0) {
-      key++;
-      d >>= 1;
-    }
-    ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
-    return key;
-  }
-
-  int KnownBitsForMinorKey(int key) {
-    if (!key) return 0;
-    if (key <= 11) return key - 1;
-    int d = 1;
-    while (key != 12) {
-      key--;
-      d <<= 1;
-    }
-    return d;
-  }
-
-  Register LhsRegister(bool lhs_is_a0) {
-    return lhs_is_a0 ? a0 : a1;
-  }
-
-  Register RhsRegister(bool lhs_is_a0) {
-    return lhs_is_a0 ? a1 : a0;
-  }
-
-  bool HasSmiSmiFastPath() {
-    return op_ != Token::DIV;
-  }
-
-  bool ShouldGenerateSmiCode() {
-    return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
-        runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
-        runtime_operands_type_ != BinaryOpIC::STRINGS;
-  }
-
-  bool ShouldGenerateFPCode() {
-    return runtime_operands_type_ != BinaryOpIC::STRINGS;
-  }
-
-  virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
-  virtual InlineCacheState GetICState() {
-    return BinaryOpIC::ToState(runtime_operands_type_);
-  }
-
   const char* GetName();
 
-  virtual void FinishCode(Code* code) {
-    code->set_binary_op_type(runtime_operands_type_);
-  }
-
 #ifdef DEBUG
   void Print() {
-    if (!specialized_on_rhs_) {
-      PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
-    } else {
-      PrintF("GenericBinaryOpStub (%s by %d)\n",
-             Token::String(op_),
-             constant_rhs_);
-    }
+    PrintF("UnaryOpStub %d (op %s), "
+           "(mode %d, runtime_type_info %s)\n",
+           MinorKey(),
+           Token::String(op_),
+           static_cast<int>(mode_),
+           UnaryOpIC::GetName(operand_type_));
   }
 #endif
-};
 
-class TypeRecordingBinaryOpStub: public CodeStub {
- public:
-  TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
-      : op_(op),
-        mode_(mode),
-        operands_type_(TRBinaryOpIC::UNINITIALIZED),
-        result_type_(TRBinaryOpIC::UNINITIALIZED),
-        name_(NULL) {
-    UNIMPLEMENTED_MIPS();
+  class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
+  class OpBits: public BitField<Token::Value, 1, 7> {};
+  class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
+
+  Major MajorKey() { return UnaryOp; }
+  int MinorKey() {
+    return ModeBits::encode(mode_)
+           | OpBits::encode(op_)
+           | OperandTypeInfoBits::encode(operand_type_);
   }
 
-  TypeRecordingBinaryOpStub(
+  // Note: A lot of the helper functions below will vanish when we use virtual
+  // function instead of switch more often.
+  void Generate(MacroAssembler* masm);
+
+  void GenerateTypeTransition(MacroAssembler* masm);
+
+  void GenerateSmiStub(MacroAssembler* masm);
+  void GenerateSmiStubSub(MacroAssembler* masm);
+  void GenerateSmiStubBitNot(MacroAssembler* masm);
+  void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow);
+  void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow);
+
+  void GenerateHeapNumberStub(MacroAssembler* masm);
+  void GenerateHeapNumberStubSub(MacroAssembler* masm);
+  void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
+  void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
+  void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
+
+  void GenerateGenericStub(MacroAssembler* masm);
+  void GenerateGenericStubSub(MacroAssembler* masm);
+  void GenerateGenericStubBitNot(MacroAssembler* masm);
+  void GenerateGenericCodeFallback(MacroAssembler* masm);
+
+  virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
+
+  virtual InlineCacheState GetICState() {
+    return UnaryOpIC::ToState(operand_type_);
+  }
+
+  virtual void FinishCode(Code* code) {
+    code->set_unary_op_type(operand_type_);
+  }
+};
+
+
+class BinaryOpStub: public CodeStub {
+ public:
+  BinaryOpStub(Token::Value op, OverwriteMode mode)
+      : op_(op),
+        mode_(mode),
+        operands_type_(BinaryOpIC::UNINITIALIZED),
+        result_type_(BinaryOpIC::UNINITIALIZED),
+        name_(NULL) {
+    use_fpu_ = CpuFeatures::IsSupported(FPU);
+    ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+  }
+
+  BinaryOpStub(
       int key,
-      TRBinaryOpIC::TypeInfo operands_type,
-      TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
+      BinaryOpIC::TypeInfo operands_type,
+      BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
       : op_(OpBits::decode(key)),
         mode_(ModeBits::decode(key)),
         use_fpu_(FPUBits::decode(key)),
@@ -251,8 +192,8 @@
   bool use_fpu_;
 
   // Operand type information determined at runtime.
-  TRBinaryOpIC::TypeInfo operands_type_;
-  TRBinaryOpIC::TypeInfo result_type_;
+  BinaryOpIC::TypeInfo operands_type_;
+  BinaryOpIC::TypeInfo result_type_;
 
   char* name_;
 
@@ -260,12 +201,12 @@
 
 #ifdef DEBUG
   void Print() {
-    PrintF("TypeRecordingBinaryOpStub %d (op %s), "
+    PrintF("BinaryOpStub %d (op %s), "
            "(mode %d, runtime_type_info %s)\n",
            MinorKey(),
            Token::String(op_),
            static_cast<int>(mode_),
-           TRBinaryOpIC::GetName(operands_type_));
+           BinaryOpIC::GetName(operands_type_));
   }
 #endif
 
@@ -273,10 +214,10 @@
   class ModeBits: public BitField<OverwriteMode, 0, 2> {};
   class OpBits: public BitField<Token::Value, 2, 7> {};
   class FPUBits: public BitField<bool, 9, 1> {};
-  class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
-  class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
+  class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
+  class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
 
-  Major MajorKey() { return TypeRecordingBinaryOp; }
+  Major MajorKey() { return BinaryOp; }
   int MinorKey() {
     return OpBits::encode(op_)
            | ModeBits::encode(mode_)
@@ -293,6 +234,7 @@
                            Label* not_numbers,
                            Label* gc_required);
   void GenerateSmiCode(MacroAssembler* masm,
+                       Label* use_runtime,
                        Label* gc_required,
                        SmiCodeGenerateHeapNumberResults heapnumber_results);
   void GenerateLoadArguments(MacroAssembler* masm);
@@ -301,7 +243,9 @@
   void GenerateSmiStub(MacroAssembler* masm);
   void GenerateInt32Stub(MacroAssembler* masm);
   void GenerateHeapNumberStub(MacroAssembler* masm);
+  void GenerateOddballStub(MacroAssembler* masm);
   void GenerateStringStub(MacroAssembler* masm);
+  void GenerateBothStringStub(MacroAssembler* masm);
   void GenerateGenericStub(MacroAssembler* masm);
   void GenerateAddStrings(MacroAssembler* masm);
   void GenerateCallRuntime(MacroAssembler* masm);
@@ -316,15 +260,15 @@
   void GenerateTypeTransition(MacroAssembler* masm);
   void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
 
-  virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
+  virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
 
   virtual InlineCacheState GetICState() {
-    return TRBinaryOpIC::ToState(operands_type_);
+    return BinaryOpIC::ToState(operands_type_);
   }
 
   virtual void FinishCode(Code* code) {
-    code->set_type_recording_binary_op_type(operands_type_);
-    code->set_type_recording_binary_op_result_type(result_type_);
+    code->set_binary_op_type(operands_type_);
+    code->set_binary_op_result_type(result_type_);
   }
 
   friend class CodeGenerator;
@@ -334,24 +278,36 @@
 // Flag that indicates how to generate code for the stub StringAddStub.
 enum StringAddFlags {
   NO_STRING_ADD_FLAGS = 0,
-  NO_STRING_CHECK_IN_STUB = 1 << 0  // Omit string check in stub.
+  // Omit left string check in stub (left is definitely a string).
+  NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
+  // Omit right string check in stub (right is definitely a string).
+  NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
+  // Omit both string checks in stub.
+  NO_STRING_CHECK_IN_STUB =
+      NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
 };
 
 
 class StringAddStub: public CodeStub {
  public:
-  explicit StringAddStub(StringAddFlags flags) {
-    string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
-  }
+  explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
 
  private:
   Major MajorKey() { return StringAdd; }
-  int MinorKey() { return string_check_ ? 0 : 1; }
+  int MinorKey() { return flags_; }
 
   void Generate(MacroAssembler* masm);
 
-  // Should the stub check whether arguments are strings?
-  bool string_check_;
+  void GenerateConvertArgument(MacroAssembler* masm,
+                               int stack_offset,
+                               Register arg,
+                               Register scratch1,
+                               Register scratch2,
+                               Register scratch3,
+                               Register scratch4,
+                               Label* slow);
+
+  const StringAddFlags flags_;
 };
 
 
@@ -372,7 +328,6 @@
   StringCompareStub() { }
 
   // Compare two flat ASCII strings and returns result in v0.
-  // Does not use the stack.
   static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
                                               Register left,
                                               Register right,
@@ -381,11 +336,28 @@
                                               Register scratch3,
                                               Register scratch4);
 
- private:
-  Major MajorKey() { return StringCompare; }
-  int MinorKey() { return 0; }
+  // Compares two flat ASCII strings for equality and returns result
+  // in v0.
+  static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+                                            Register left,
+                                            Register right,
+                                            Register scratch1,
+                                            Register scratch2,
+                                            Register scratch3);
 
-  void Generate(MacroAssembler* masm);
+ private:
+  virtual Major MajorKey() { return StringCompare; }
+  virtual int MinorKey() { return 0; }
+  virtual void Generate(MacroAssembler* masm);
+
+  static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
+                                            Register left,
+                                            Register right,
+                                            Register length,
+                                            Register scratch1,
+                                            Register scratch2,
+                                            Register scratch3,
+                                            Label* chars_not_equal);
 };
 
 
@@ -484,26 +456,225 @@
   const char* GetName() { return "RegExpCEntryStub"; }
 };
 
+// Trampoline stub to call into native code. To call safely into native code
+// in the presence of compacting GC (which can move code objects) we need to
+// keep the code which called into native pinned in the memory. Currently the
+// simplest approach is to generate such stub early enough so it can never be
+// moved by GC
+class DirectCEntryStub: public CodeStub {
+ public:
+  DirectCEntryStub() {}
+  void Generate(MacroAssembler* masm);
+  void GenerateCall(MacroAssembler* masm,
+                                ExternalReference function);
+  void GenerateCall(MacroAssembler* masm, Register target);
 
-// Generate code the to load an element from a pixel array. The receiver is
-// assumed to not be a smi and to have elements, the caller must guarantee this
-// precondition. If the receiver does not have elements that are pixel arrays,
-// the generated code jumps to not_pixel_array. If key is not a smi, then the
-// generated code branches to key_not_smi. Callers can specify NULL for
-// key_not_smi to signal that a smi check has already been performed on key so
-// that the smi check is not generated . If key is not a valid index within the
-// bounds of the pixel array, the generated code jumps to out_of_range.
-void GenerateFastPixelArrayLoad(MacroAssembler* masm,
-                                Register receiver,
-                                Register key,
-                                Register elements_map,
-                                Register elements,
+ private:
+  Major MajorKey() { return DirectCEntry; }
+  int MinorKey() { return 0; }
+
+  bool NeedsImmovableCode() { return true; }
+
+  const char* GetName() { return "DirectCEntryStub"; }
+};
+
+class FloatingPointHelper : public AllStatic {
+ public:
+
+  enum Destination {
+    kFPURegisters,
+    kCoreRegisters
+  };
+
+
+  // Loads smis from a0 and a1 (right and left in binary operations) into
+  // floating point registers. Depending on the destination the values ends up
+  // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
+  // is floating point registers FPU must be supported. If core registers are
+  // requested when FPU is supported f12 and f14 will be scratched.
+  static void LoadSmis(MacroAssembler* masm,
+                       Destination destination,
+                       Register scratch1,
+                       Register scratch2);
+
+  // Loads objects from a0 and a1 (right and left in binary operations) into
+  // floating point registers. Depending on the destination the values ends up
+  // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
+  // is floating point registers FPU must be supported. If core registers are
+  // requested when FPU is supported f12 and f14 will still be scratched. If
+  // either a0 or a1 is not a number (not smi and not heap number object) the
+  // not_number label is jumped to with a0 and a1 intact.
+  static void LoadOperands(MacroAssembler* masm,
+                           FloatingPointHelper::Destination destination,
+                           Register heap_number_map,
+                           Register scratch1,
+                           Register scratch2,
+                           Label* not_number);
+
+  // Convert the smi or heap number in object to an int32 using the rules
+  // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+  // and brought into the range -2^31 .. +2^31 - 1.
+  static void ConvertNumberToInt32(MacroAssembler* masm,
+                                   Register object,
+                                   Register dst,
+                                   Register heap_number_map,
+                                   Register scratch1,
+                                   Register scratch2,
+                                   Register scratch3,
+                                   FPURegister double_scratch,
+                                   Label* not_int32);
+
+  // Converts the integer (untagged smi) in |int_scratch| to a double, storing
+  // the result either in |double_dst| or |dst2:dst1|, depending on
+  // |destination|.
+  // Warning: The value in |int_scratch| will be changed in the process!
+  static void ConvertIntToDouble(MacroAssembler* masm,
+                                 Register int_scratch,
+                                 Destination destination,
+                                 FPURegister double_dst,
+                                 Register dst1,
+                                 Register dst2,
+                                 Register scratch2,
+                                 FPURegister single_scratch);
+
+  // Load the number from object into double_dst in the double format.
+  // Control will jump to not_int32 if the value cannot be exactly represented
+  // by a 32-bit integer.
+  // Floating point value in the 32-bit integer range that are not exact integer
+  // won't be loaded.
+  static void LoadNumberAsInt32Double(MacroAssembler* masm,
+                                      Register object,
+                                      Destination destination,
+                                      FPURegister double_dst,
+                                      Register dst1,
+                                      Register dst2,
+                                      Register heap_number_map,
+                                      Register scratch1,
+                                      Register scratch2,
+                                      FPURegister single_scratch,
+                                      Label* not_int32);
+
+  // Loads the number from object into dst as a 32-bit integer.
+  // Control will jump to not_int32 if the object cannot be exactly represented
+  // by a 32-bit integer.
+  // Floating point value in the 32-bit integer range that are not exact integer
+  // won't be converted.
+  // scratch3 is not used when FPU is supported.
+  static void LoadNumberAsInt32(MacroAssembler* masm,
+                                Register object,
+                                Register dst,
+                                Register heap_number_map,
                                 Register scratch1,
                                 Register scratch2,
-                                Register result,
-                                Label* not_pixel_array,
-                                Label* key_not_smi,
-                                Label* out_of_range);
+                                Register scratch3,
+                                FPURegister double_scratch,
+                                Label* not_int32);
+
+  // Generate non FPU code to check if a double can be exactly represented by a
+  // 32-bit integer. This does not check for 0 or -0, which need
+  // to be checked for separately.
+  // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
+  // through otherwise.
+  // src1 and src2 will be cloberred.
+  //
+  // Expected input:
+  // - src1: higher (exponent) part of the double value.
+  // - src2: lower (mantissa) part of the double value.
+  // Output status:
+  // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
+  // - src2: contains 1.
+  // - other registers are clobbered.
+  static void DoubleIs32BitInteger(MacroAssembler* masm,
+                                   Register src1,
+                                   Register src2,
+                                   Register dst,
+                                   Register scratch,
+                                   Label* not_int32);
+
+  // Generates code to call a C function to do a double operation using core
+  // registers. (Used when FPU is not supported.)
+  // This code never falls through, but returns with a heap number containing
+  // the result in v0.
+  // Register heapnumber_result must be a heap number in which the
+  // result of the operation will be stored.
+  // Requires the following layout on entry:
+  // a0: Left value (least significant part of mantissa).
+  // a1: Left value (sign, exponent, top of mantissa).
+  // a2: Right value (least significant part of mantissa).
+  // a3: Right value (sign, exponent, top of mantissa).
+  static void CallCCodeForDoubleOperation(MacroAssembler* masm,
+                                          Token::Value op,
+                                          Register heap_number_result,
+                                          Register scratch);
+
+ private:
+  static void LoadNumber(MacroAssembler* masm,
+                         FloatingPointHelper::Destination destination,
+                         Register object,
+                         FPURegister dst,
+                         Register dst1,
+                         Register dst2,
+                         Register heap_number_map,
+                         Register scratch1,
+                         Register scratch2,
+                         Label* not_number);
+};
+
+
+class StringDictionaryLookupStub: public CodeStub {
+ public:
+  enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+  explicit StringDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
+
+  void Generate(MacroAssembler* masm);
+
+  MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
+      MacroAssembler* masm,
+      Label* miss,
+      Label* done,
+      Register receiver,
+      Register properties,
+      String* name,
+      Register scratch0);
+
+  static void GeneratePositiveLookup(MacroAssembler* masm,
+                                     Label* miss,
+                                     Label* done,
+                                     Register elements,
+                                     Register name,
+                                     Register r0,
+                                     Register r1);
+
+ private:
+  static const int kInlinedProbes = 4;
+  static const int kTotalProbes = 20;
+
+  static const int kCapacityOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kCapacityIndex * kPointerSize;
+
+  static const int kElementsStartOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
+
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("StringDictionaryLookupStub\n");
+  }
+#endif
+
+  Major MajorKey() { return StringDictionaryNegativeLookup; }
+
+  int MinorKey() {
+    return LookupModeBits::encode(mode_);
+  }
+
+  class LookupModeBits: public BitField<LookupMode, 0, 1> {};
+
+  LookupMode mode_;
+};
 
 
 } }  // namespace v8::internal
diff --git a/src/mips/codegen-mips-inl.h b/src/mips/codegen-mips-inl.h
deleted file mode 100644
index be9ae9e..0000000
--- a/src/mips/codegen-mips-inl.h
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_MIPS_CODEGEN_MIPS_INL_H_
-#define V8_MIPS_CODEGEN_MIPS_INL_H_
-
-#include "virtual-frame-mips.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-// Platform-specific inline functions.
-
-void DeferredCode::Jump() {
-  __ b(&entry_label_);
-  __ nop();
-}
-
-
-// Note: this has been hacked for submisson. Mips branches require two
-//  additional operands: Register src1, const Operand& src2.
-void DeferredCode::Branch(Condition cond) {
-  __ Branch(&entry_label_, cond, zero_reg, Operand(0));
-}
-
-
-void Reference::GetValueAndSpill() {
-  GetValue();
-}
-
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_MIPS_CODEGEN_MIPS_INL_H_
-
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index c1149df..4400b64 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,61 +25,18 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-
 #include "v8.h"
 
 #if defined(V8_TARGET_ARCH_MIPS)
 
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "codegen-inl.h"
-#include "compiler.h"
-#include "debug.h"
-#include "ic-inl.h"
-#include "jsregexp.h"
-#include "jump-target-inl.h"
-#include "parser.h"
-#include "regexp-macro-assembler.h"
-#include "regexp-stack.h"
-#include "register-allocator-inl.h"
-#include "runtime.h"
-#include "scopes.h"
-#include "stub-cache.h"
-#include "virtual-frame-inl.h"
-#include "virtual-frame-mips-inl.h"
+#include "codegen.h"
 
 namespace v8 {
 namespace internal {
 
-
-#define __ ACCESS_MASM(masm_)
-
-// -------------------------------------------------------------------------
-// Platform-specific DeferredCode functions.
-
-void DeferredCode::SaveRegisters() {
-  // On MIPS you either have a completely spilled frame or you
-  // handle it yourself, but at the moment there's no automation
-  // of registers and deferred code.
-}
-
-
-void DeferredCode::RestoreRegisters() {
-}
-
-
 // -------------------------------------------------------------------------
 // Platform-specific RuntimeCallHelper functions.
 
-void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
-  frame_state_->frame()->AssertIsSpilled();
-}
-
-
-void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-}
-
-
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
   masm->EnterInternalFrame();
 }
@@ -90,1124 +47,6 @@
 }
 
 
-// -----------------------------------------------------------------------------
-// CodeGenState implementation.
-
-CodeGenState::CodeGenState(CodeGenerator* owner)
-    : owner_(owner),
-      previous_(owner->state()) {
-  owner->set_state(this);
-}
-
-
-ConditionCodeGenState::ConditionCodeGenState(CodeGenerator* owner,
-                           JumpTarget* true_target,
-                           JumpTarget* false_target)
-    : CodeGenState(owner),
-      true_target_(true_target),
-      false_target_(false_target) {
-  owner->set_state(this);
-}
-
-
-TypeInfoCodeGenState::TypeInfoCodeGenState(CodeGenerator* owner,
-                                           Slot* slot,
-                                           TypeInfo type_info)
-    : CodeGenState(owner),
-      slot_(slot) {
-  owner->set_state(this);
-  old_type_info_ = owner->set_type_info(slot, type_info);
-}
-
-
-CodeGenState::~CodeGenState() {
-  ASSERT(owner_->state() == this);
-  owner_->set_state(previous_);
-}
-
-
-TypeInfoCodeGenState::~TypeInfoCodeGenState() {
-  owner()->set_type_info(slot_, old_type_info_);
-}
-
-
-// -----------------------------------------------------------------------------
-// CodeGenerator implementation.
-
-CodeGenerator::CodeGenerator(MacroAssembler* masm)
-    : deferred_(8),
-      masm_(masm),
-      info_(NULL),
-      frame_(NULL),
-      allocator_(NULL),
-      cc_reg_(cc_always),
-      state_(NULL),
-      loop_nesting_(0),
-      type_info_(NULL),
-      function_return_(JumpTarget::BIDIRECTIONAL),
-      function_return_is_shadowed_(false) {
-}
-
-
-// Calling conventions:
-// fp: caller's frame pointer
-// sp: stack pointer
-// a1: called JS function
-// cp: callee's context
-
-void CodeGenerator::Generate(CompilationInfo* info) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-int CodeGenerator::NumberOfSlot(Slot* slot) {
-  UNIMPLEMENTED_MIPS();
-  return 0;
-}
-
-
-MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
-  UNIMPLEMENTED_MIPS();
-  return MemOperand(zero_reg, 0);
-}
-
-
-MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
-    Slot* slot,
-    Register tmp,
-    Register tmp2,
-    JumpTarget* slow) {
-  UNIMPLEMENTED_MIPS();
-  return MemOperand(zero_reg, 0);
-}
-
-
-void CodeGenerator::LoadCondition(Expression* x,
-                                  JumpTarget* true_target,
-                                  JumpTarget* false_target,
-                                  bool force_cc) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::Load(Expression* x) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadGlobal() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadGlobalReceiver(Register scratch) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
-  UNIMPLEMENTED_MIPS();
-  return EAGER_ARGUMENTS_ALLOCATION;
-}
-
-
-void CodeGenerator::StoreArgumentsObject(bool initial) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadTypeofExpression(Expression* x) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-Reference::Reference(CodeGenerator* cgen,
-                     Expression* expression,
-                     bool persist_after_get)
-    : cgen_(cgen),
-      expression_(expression),
-      type_(ILLEGAL),
-      persist_after_get_(persist_after_get) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-Reference::~Reference() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadReference(Reference* ref) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::UnloadReference(Reference* ref) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
-// register to a boolean in the condition code register. The code
-// may jump to 'false_target' in case the register converts to 'false'.
-void CodeGenerator::ToBoolean(JumpTarget* true_target,
-                              JumpTarget* false_target) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenericBinaryOperation(Token::Value op,
-                                           OverwriteMode overwrite_mode,
-                                           GenerateInlineSmi inline_smi,
-                                           int constant_rhs) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredInlineSmiOperation: public DeferredCode {
- public:
-  DeferredInlineSmiOperation(Token::Value op,
-                             int value,
-                             bool reversed,
-                             OverwriteMode overwrite_mode,
-                             Register tos)
-      : op_(op),
-        value_(value),
-        reversed_(reversed),
-        overwrite_mode_(overwrite_mode),
-        tos_register_(tos) {
-    set_comment("[ DeferredInlinedSmiOperation");
-  }
-
-  virtual void Generate();
-  // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
-  // Exit(). Currently on MIPS SaveRegisters() and RestoreRegisters() are empty
-  // methods, it is the responsibility of the deferred code to save and restore
-  // registers.
-  virtual bool AutoSaveAndRestore() { return false; }
-
-  void JumpToNonSmiInput(Condition cond, Register cmp1, const Operand& cmp2);
-  void JumpToAnswerOutOfRange(Condition cond,
-                              Register cmp1,
-                              const Operand& cmp2);
-
- private:
-  void GenerateNonSmiInput();
-  void GenerateAnswerOutOfRange();
-  void WriteNonSmiAnswer(Register answer,
-                         Register heap_number,
-                         Register scratch);
-
-  Token::Value op_;
-  int value_;
-  bool reversed_;
-  OverwriteMode overwrite_mode_;
-  Register tos_register_;
-  Label non_smi_input_;
-  Label answer_out_of_range_;
-};
-
-
-// For bit operations we try harder and handle the case where the input is not
-// a Smi but a 32bits integer without calling the generic stub.
-void DeferredInlineSmiOperation::JumpToNonSmiInput(Condition cond,
-                                                   Register cmp1,
-                                                   const Operand& cmp2) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-// For bit operations the result is always 32bits so we handle the case where
-// the result does not fit in a Smi without calling the generic stub.
-void DeferredInlineSmiOperation::JumpToAnswerOutOfRange(Condition cond,
-                                                        Register cmp1,
-                                                        const Operand& cmp2) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-// On entry the non-constant side of the binary operation is in tos_register_
-// and the constant smi side is nowhere.  The tos_register_ is not used by the
-// virtual frame.  On exit the answer is in the tos_register_ and the virtual
-// frame is unchanged.
-void DeferredInlineSmiOperation::Generate() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-// Convert and write the integer answer into heap_number.
-void DeferredInlineSmiOperation::WriteNonSmiAnswer(Register answer,
-                                                   Register heap_number,
-                                                   Register scratch) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void DeferredInlineSmiOperation::GenerateNonSmiInput() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void DeferredInlineSmiOperation::GenerateAnswerOutOfRange() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::SmiOperation(Token::Value op,
-                                 Handle<Object> value,
-                                 bool reversed,
-                                 OverwriteMode mode) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-// On MIPS we load registers condReg1 and condReg2 with the values which should
-// be compared. With the CodeGenerator::cc_reg_ condition, functions will be
-// able to evaluate correctly the condition. (eg CodeGenerator::Branch)
-void CodeGenerator::Comparison(Condition cc,
-                               Expression* left,
-                               Expression* right,
-                               bool strict) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
-                                      CallFunctionFlags flags,
-                                      int position) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::CallApplyLazy(Expression* applicand,
-                                  Expression* receiver,
-                                  VariableProxy* arguments,
-                                  int position) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::CheckStack() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitBlock(Block* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitDeclaration(Declaration* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitIfStatement(IfStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateReturnSequence() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitForStatement(ForStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitForInStatement(ForInStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::InstantiateFunction(
-    Handle<SharedFunctionInfo> function_info,
-    bool pretenure) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitSharedFunctionInfoLiteral(
-    SharedFunctionInfoLiteral* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitConditional(Conditional* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
-                                                  TypeofState state) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
-                                                      TypeofState typeof_state,
-                                                      JumpTarget* slow) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
-                                                    TypeofState typeof_state,
-                                                    JumpTarget* slow,
-                                                    JumpTarget* done) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitSlot(Slot* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitLiteral(Literal* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitSlotAssignment(Assignment* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitAssignment(Assignment* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitThrow(Throw* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitProperty(Property* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCall(Call* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCallNew(CallNew* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredStringCharCodeAt : public DeferredCode {
- public:
-  DeferredStringCharCodeAt(Register object,
-                           Register index,
-                           Register scratch,
-                           Register result)
-      : result_(result),
-        char_code_at_generator_(object,
-                                index,
-                                scratch,
-                                result,
-                                &need_conversion_,
-                                &need_conversion_,
-                                &index_out_of_range_,
-                                STRING_INDEX_IS_NUMBER) {}
-
-  StringCharCodeAtGenerator* fast_case_generator() {
-    return &char_code_at_generator_;
-  }
-
-  virtual void Generate() {
-    UNIMPLEMENTED_MIPS();
-  }
-
- private:
-  Register result_;
-
-  Label need_conversion_;
-  Label index_out_of_range_;
-
-  StringCharCodeAtGenerator char_code_at_generator_;
-};
-
-
-void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredStringCharFromCode : public DeferredCode {
- public:
-  DeferredStringCharFromCode(Register code,
-                             Register result)
-      : char_from_code_generator_(code, result) {}
-
-  StringCharFromCodeGenerator* fast_case_generator() {
-    return &char_from_code_generator_;
-  }
-
-  virtual void Generate() {
-    VirtualFrameRuntimeCallHelper call_helper(frame_state());
-    char_from_code_generator_.GenerateSlow(masm(), call_helper);
-  }
-
- private:
-  StringCharFromCodeGenerator char_from_code_generator_;
-};
-
-
-void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredStringCharAt : public DeferredCode {
- public:
-  DeferredStringCharAt(Register object,
-                       Register index,
-                       Register scratch1,
-                       Register scratch2,
-                       Register result)
-      : result_(result),
-        char_at_generator_(object,
-                           index,
-                           scratch1,
-                           scratch2,
-                           result,
-                           &need_conversion_,
-                           &need_conversion_,
-                           &index_out_of_range_,
-                           STRING_INDEX_IS_NUMBER) {}
-
-  StringCharAtGenerator* fast_case_generator() {
-    return &char_at_generator_;
-  }
-
-  virtual void Generate() {
-  UNIMPLEMENTED_MIPS();
-}
-
- private:
-  Register result_;
-
-  Label need_conversion_;
-  Label index_out_of_range_;
-
-  StringCharAtGenerator char_at_generator_;
-};
-
-
-void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
- public:
-  DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
-                                               Register map_result,
-                                               Register scratch1,
-                                               Register scratch2)
-      : object_(object),
-        map_result_(map_result),
-        scratch1_(scratch1),
-        scratch2_(scratch2) { }
-
-  virtual void Generate() {
-    UNIMPLEMENTED_MIPS();
-  }
-
- private:
-  Register object_;
-  Register map_result_;
-  Register scratch1_;
-  Register scratch2_;
-};
-
-
-void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
-    ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateRandomHeapNumber(
-    ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredSearchCache: public DeferredCode {
- public:
-  DeferredSearchCache(Register dst, Register cache, Register key)
-      : dst_(dst), cache_(cache), key_(key) {
-    set_comment("[ DeferredSearchCache");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_, cache_, key_;
-};
-
-
-void DeferredSearchCache::Generate() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredSwapElements: public DeferredCode {
- public:
-  DeferredSwapElements(Register object, Register index1, Register index2)
-      : object_(object), index1_(index1), index2_(index2) {
-    set_comment("[ DeferredSwapElements");
-  }
-
-  virtual void Generate();
-
- private:
-  Register object_, index1_, index2_;
-};
-
-
-void DeferredSwapElements::Generate() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredCountOperation: public DeferredCode {
- public:
-  DeferredCountOperation(Register value,
-                         bool is_increment,
-                         bool is_postfix,
-                         int target_size)
-      : value_(value),
-        is_increment_(is_increment),
-        is_postfix_(is_postfix),
-        target_size_(target_size) {}
-
-  virtual void Generate() {
-    UNIMPLEMENTED_MIPS();
-  }
-
- private:
-  Register value_;
-  bool is_increment_;
-  bool is_postfix_;
-  int target_size_;
-};
-
-
-void CodeGenerator::VisitCountOperation(CountOperation* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitThisFunction(ThisFunction* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredReferenceGetNamedValue: public DeferredCode {
- public:
-  explicit DeferredReferenceGetNamedValue(Register receiver,
-                                          Handle<String> name,
-                                          bool is_contextual)
-      : receiver_(receiver),
-        name_(name),
-        is_contextual_(is_contextual),
-        is_dont_delete_(false) {
-    set_comment(is_contextual
-                ? "[ DeferredReferenceGetNamedValue (contextual)"
-                : "[ DeferredReferenceGetNamedValue");
-  }
-
-  virtual void Generate();
-
-  void set_is_dont_delete(bool value) {
-    ASSERT(is_contextual_);
-    is_dont_delete_ = value;
-  }
-
- private:
-  Register receiver_;
-  Handle<String> name_;
-  bool is_contextual_;
-  bool is_dont_delete_;
-};
-
-
-
-void DeferredReferenceGetNamedValue::Generate() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredReferenceGetKeyedValue: public DeferredCode {
- public:
-  DeferredReferenceGetKeyedValue(Register key, Register receiver)
-      : key_(key), receiver_(receiver) {
-    set_comment("[ DeferredReferenceGetKeyedValue");
-  }
-
-  virtual void Generate();
-
- private:
-  Register key_;
-  Register receiver_;
-};
-
-
-void DeferredReferenceGetKeyedValue::Generate() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredReferenceSetKeyedValue: public DeferredCode {
- public:
-  DeferredReferenceSetKeyedValue(Register value,
-                                 Register key,
-                                 Register receiver)
-      : value_(value), key_(key), receiver_(receiver) {
-    set_comment("[ DeferredReferenceSetKeyedValue");
-  }
-
-  virtual void Generate();
-
- private:
-  Register value_;
-  Register key_;
-  Register receiver_;
-};
-
-
-void DeferredReferenceSetKeyedValue::Generate() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredReferenceSetNamedValue: public DeferredCode {
- public:
-  DeferredReferenceSetNamedValue(Register value,
-                                 Register receiver,
-                                 Handle<String> name)
-      : value_(value), receiver_(receiver), name_(name) {
-    set_comment("[ DeferredReferenceSetNamedValue");
-  }
-
-  virtual void Generate();
-
- private:
-  Register value_;
-  Register receiver_;
-  Handle<String> name_;
-};
-
-
-void DeferredReferenceSetNamedValue::Generate() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitKeyedLoad() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitKeyedStore(StaticType* key_type,
-                                   WriteBarrierCharacter wb_info) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-#ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() {
-  UNIMPLEMENTED_MIPS();
-  return false;
-}
-#endif
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-// -----------------------------------------------------------------------------
-// Reference support.
-
-
-Handle<String> Reference::GetName() {
-  UNIMPLEMENTED_MIPS();
-  return Handle<String>();
-}
-
-
-void Reference::DupIfPersist() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void Reference::GetValue() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-const char* GenericBinaryOpStub::GetName() {
-  UNIMPLEMENTED_MIPS();
-  return name_;
-}
-
-
-#undef __
-
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/codegen-mips.h b/src/mips/codegen-mips.h
index 0a2cd45..fecd321 100644
--- a/src/mips/codegen-mips.h
+++ b/src/mips/codegen-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -37,204 +37,16 @@
 namespace v8 {
 namespace internal {
 
-#if(defined(__mips_hard_float) && __mips_hard_float != 0)
-// Use floating-point coprocessor instructions. This flag is raised when
-// -mhard-float is passed to the compiler.
-static const bool IsMipsSoftFloatABI = false;
-#elif(defined(__mips_soft_float) && __mips_soft_float != 0)
-// Not using floating-point coprocessor instructions. This flag is raised when
-// -msoft-float is passed to the compiler.
-static const bool IsMipsSoftFloatABI = true;
-#else
-static const bool IsMipsSoftFloatABI = true;
-#endif
-
 // Forward declarations
 class CompilationInfo;
-class DeferredCode;
-class JumpTarget;
-class RegisterAllocator;
-class RegisterFile;
 
-enum InitState { CONST_INIT, NOT_CONST_INIT };
 enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-enum GenerateInlineSmi { DONT_GENERATE_INLINE_SMI, GENERATE_INLINE_SMI };
-enum WriteBarrierCharacter { UNLIKELY_SMI, LIKELY_SMI, NEVER_NEWSPACE };
-
-
-// -----------------------------------------------------------------------------
-// Reference support
-
-// A reference is a C++ stack-allocated object that keeps an ECMA
-// reference on the execution stack while in scope. For variables
-// the reference is empty, indicating that it isn't necessary to
-// store state on the stack for keeping track of references to those.
-// For properties, we keep either one (named) or two (indexed) values
-// on the execution stack to represent the reference.
-class Reference BASE_EMBEDDED {
- public:
-  // The values of the types is important, see size().
-  enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
-  Reference(CodeGenerator* cgen,
-            Expression* expression,
-            bool persist_after_get = false);
-  ~Reference();
-
-  Expression* expression() const { return expression_; }
-  Type type() const { return type_; }
-  void set_type(Type value) {
-    ASSERT_EQ(ILLEGAL, type_);
-    type_ = value;
-  }
-
-  void set_unloaded() {
-    ASSERT_NE(ILLEGAL, type_);
-    ASSERT_NE(UNLOADED, type_);
-    type_ = UNLOADED;
-  }
-  // The size the reference takes up on the stack.
-  int size() const {
-    return (type_ < SLOT) ? 0 : type_;
-  }
-
-  bool is_illegal() const { return type_ == ILLEGAL; }
-  bool is_slot() const { return type_ == SLOT; }
-  bool is_property() const { return type_ == NAMED || type_ == KEYED; }
-  bool is_unloaded() const { return type_ == UNLOADED; }
-
-  // Return the name. Only valid for named property references.
-  Handle<String> GetName();
-
-  // Generate code to push the value of the reference on top of the
-  // expression stack.  The reference is expected to be already on top of
-  // the expression stack, and it is consumed by the call unless the
-  // reference is for a compound assignment.
-  // If the reference is not consumed, it is left in place under its value.
-  void GetValue();
-
-  // Generate code to pop a reference, push the value of the reference,
-  // and then spill the stack frame.
-  inline void GetValueAndSpill();
-
-  // Generate code to store the value on top of the expression stack in the
-  // reference.  The reference is expected to be immediately below the value
-  // on the expression stack.  The  value is stored in the location specified
-  // by the reference, and is left on top of the stack, after the reference
-  // is popped from beneath it (unloaded).
-  void SetValue(InitState init_state, WriteBarrierCharacter wb);
-
-  // This is in preparation for something that uses the reference on the stack.
-  // If we need this reference afterwards get then dup it now.  Otherwise mark
-  // it as used.
-  inline void DupIfPersist();
-
- private:
-  CodeGenerator* cgen_;
-  Expression* expression_;
-  Type type_;
-  // Keep the reference on the stack after get, so it can be used by set later.
-  bool persist_after_get_;
-};
-
-
-// -----------------------------------------------------------------------------
-// Code generation state
-
-// The state is passed down the AST by the code generator (and back up, in
-// the form of the state of the label pair).  It is threaded through the
-// call stack.  Constructing a state implicitly pushes it on the owning code
-// generator's stack of states, and destroying one implicitly pops it.
-
-class CodeGenState BASE_EMBEDDED {
- public:
-  // Create an initial code generator state.  Destroying the initial state
-  // leaves the code generator with a NULL state.
-  explicit CodeGenState(CodeGenerator* owner);
-
-
-
-  // Destroy a code generator state and restore the owning code generator's
-  // previous state.
-  virtual ~CodeGenState();
-
-  virtual JumpTarget* true_target() const { return NULL; }
-  virtual JumpTarget* false_target() const { return NULL; }
-
- protected:
-  inline CodeGenerator* owner() { return owner_; }
-  inline CodeGenState* previous() const { return previous_; }
-
- private:
-  // The owning code generator.
-  CodeGenerator* owner_;
-
-
-
-  // The previous state of the owning code generator, restored when
-  // this state is destroyed.
-  CodeGenState* previous_;
-};
-
-
-class ConditionCodeGenState : public CodeGenState {
- public:
-  // Create a code generator state based on a code generator's current
-  // state.  The new state has its own pair of branch labels.
-  ConditionCodeGenState(CodeGenerator* owner,
-                        JumpTarget* true_target,
-                        JumpTarget* false_target);
-
-  virtual JumpTarget* true_target() const { return true_target_; }
-  virtual JumpTarget* false_target() const { return false_target_; }
-
- private:
-  JumpTarget* true_target_;
-  JumpTarget* false_target_;
-};
-
-
-class TypeInfoCodeGenState : public CodeGenState {
- public:
-  TypeInfoCodeGenState(CodeGenerator* owner,
-                       Slot* slot_number,
-                       TypeInfo info);
-  virtual ~TypeInfoCodeGenState();
-
-  virtual JumpTarget* true_target() const { return previous()->true_target(); }
-  virtual JumpTarget* false_target() const {
-    return previous()->false_target();
-  }
-
- private:
-  Slot* slot_;
-  TypeInfo old_type_info_;
-};
-
 
 // -------------------------------------------------------------------------
-// Arguments allocation mode
-
-enum ArgumentsAllocationMode {
-  NO_ARGUMENTS_ALLOCATION,
-  EAGER_ARGUMENTS_ALLOCATION,
-  LAZY_ARGUMENTS_ALLOCATION
-};
-
-
-// -----------------------------------------------------------------------------
 // CodeGenerator
 
 class CodeGenerator: public AstVisitor {
  public:
-  // Compilation mode.  Either the compiler is used as the primary
-  // compiler and needs to setup everything or the compiler is used as
-  // the secondary compiler for split compilation and has to handle
-  // bailouts.
-  enum Mode {
-    PRIMARY,
-    SECONDARY
-  };
-
   static bool MakeCode(CompilationInfo* info);
 
   // Printing of AST, etc. as requested by flags.
@@ -261,50 +73,14 @@
                               int pos,
                               bool right_here = false);
 
-  // Accessors
-  MacroAssembler* masm() { return masm_; }
-  VirtualFrame* frame() const { return frame_; }
-  inline Handle<Script> script();
-
-  bool has_valid_frame() const { return frame_ != NULL; }
-
-  // Set the virtual frame to be new_frame, with non-frame register
-  // reference counts given by non_frame_registers.  The non-frame
-  // register reference counts of the old frame are returned in
-  // non_frame_registers.
-  void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
-
-  void DeleteFrame();
-
-  RegisterAllocator* allocator() const { return allocator_; }
-
-  CodeGenState* state() { return state_; }
-  void set_state(CodeGenState* state) { state_ = state; }
-
-  TypeInfo type_info(Slot* slot) {
-    int index = NumberOfSlot(slot);
-    if (index == kInvalidSlotNumber) return TypeInfo::Unknown();
-    return (*type_info_)[index];
-  }
-
-  TypeInfo set_type_info(Slot* slot, TypeInfo info) {
-    int index = NumberOfSlot(slot);
-    ASSERT(index >= kInvalidSlotNumber);
-    if (index != kInvalidSlotNumber) {
-      TypeInfo previous_value = (*type_info_)[index];
-      (*type_info_)[index] = info;
-      return previous_value;
-    }
-    return TypeInfo::Unknown();
-  }
-  void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
-
   // Constants related to patching of inlined load/store.
   static int GetInlinedKeyedLoadInstructionsAfterPatch() {
     // This is in correlation with the padding in MacroAssembler::Abort.
     return FLAG_debug_code ? 45 : 20;
   }
-  static const int kInlinedKeyedStoreInstructionsAfterPatch = 9;
+
+  static const int kInlinedKeyedStoreInstructionsAfterPatch = 13;
+
   static int GetInlinedNamedStoreInstructionsAfterPatch() {
     ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
     // Magic number 5: instruction count after patched map load:
@@ -313,317 +89,6 @@
   }
 
  private:
-  // Type of a member function that generates inline code for a native function.
-  typedef void (CodeGenerator::*InlineFunctionGenerator)
-      (ZoneList<Expression*>*);
-
-  static const InlineFunctionGenerator kInlineFunctionGenerators[];
-
-
-  // Construction/Destruction.
-  explicit CodeGenerator(MacroAssembler* masm);
-
-  // Accessors.
-  inline bool is_eval();
-  inline Scope* scope();
-  inline bool is_strict_mode();
-  inline StrictModeFlag strict_mode_flag();
-
-  // Generating deferred code.
-  void ProcessDeferred();
-
-  static const int kInvalidSlotNumber = -1;
-
-  int NumberOfSlot(Slot* slot);
-  // State
-  bool has_cc() const { return cc_reg_ != cc_always; }
-
-  JumpTarget* true_target() const { return state_->true_target(); }
-  JumpTarget* false_target() const { return state_->false_target(); }
-
-  // Track loop nesting level.
-  int loop_nesting() const { return loop_nesting_; }
-  void IncrementLoopNesting() { loop_nesting_++; }
-  void DecrementLoopNesting() { loop_nesting_--; }
-
-  // Node visitors.
-  void VisitStatements(ZoneList<Statement*>* statements);
-
-  virtual void VisitSlot(Slot* node);
-#define DEF_VISIT(type) \
-  virtual void Visit##type(type* node);
-  AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
-  // Main code generation function
-  void Generate(CompilationInfo* info);
-
-  // Generate the return sequence code.  Should be called no more than
-  // once per compiled function, immediately after binding the return
-  // target (which can not be done more than once).  The return value should
-  // be in v0.
-  void GenerateReturnSequence();
-
-  // Returns the arguments allocation mode.
-  ArgumentsAllocationMode ArgumentsMode();
-
-  // Store the arguments object and allocate it if necessary.
-  void StoreArgumentsObject(bool initial);
-
-  // The following are used by class Reference.
-  void LoadReference(Reference* ref);
-  void UnloadReference(Reference* ref);
-
-  MemOperand SlotOperand(Slot* slot, Register tmp);
-
-  MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
-                                               Register tmp,
-                                               Register tmp2,
-                                               JumpTarget* slow);
-
-  void LoadCondition(Expression* x,
-                     JumpTarget* true_target,
-                     JumpTarget* false_target,
-                     bool force_cc);
-  void Load(Expression* x);
-  void LoadGlobal();
-  void LoadGlobalReceiver(Register scratch);
-
-
-  // Special code for typeof expressions: Unfortunately, we must
-  // be careful when loading the expression in 'typeof'
-  // expressions. We are not allowed to throw reference errors for
-  // non-existing properties of the global object, so we must make it
-  // look like an explicit property access, instead of an access
-  // through the context chain.
-  void LoadTypeofExpression(Expression* x);
-
-  // Store a keyed property. Key and receiver are on the stack and the value is
-  // in a0. Result is returned in r0.
-  void EmitKeyedStore(StaticType* key_type, WriteBarrierCharacter wb_info);
-
-  // Read a value from a slot and leave it on top of the expression stack.
-  void LoadFromSlot(Slot* slot, TypeofState typeof_state);
-  void LoadFromGlobalSlotCheckExtensions(Slot* slot,
-                                         TypeofState typeof_state,
-                                         JumpTarget* slow);
-  void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
-
-  // Support for loading from local/global variables and arguments
-  // whose location is known unless they are shadowed by
-  // eval-introduced bindings. Generates no code for unsupported slot
-  // types and therefore expects to fall through to the slow jump target.
-  void EmitDynamicLoadFromSlotFastCase(Slot* slot,
-                                       TypeofState typeof_state,
-                                       JumpTarget* slow,
-                                       JumpTarget* done);
-
-  // Store the value on top of the stack to a slot.
-  void StoreToSlot(Slot* slot, InitState init_state);
-
-  // Support for compiling assignment expressions.
-  void EmitSlotAssignment(Assignment* node);
-  void EmitNamedPropertyAssignment(Assignment* node);
-  void EmitKeyedPropertyAssignment(Assignment* node);
-
-  // Load a named property, returning it in v0. The receiver is passed on the
-  // stack, and remains there.
-  void EmitNamedLoad(Handle<String> name, bool is_contextual);
-
-  // Store to a named property. If the store is contextual, value is passed on
-  // the frame and consumed. Otherwise, receiver and value are passed on the
-  // frame and consumed. The result is returned in v0.
-  void EmitNamedStore(Handle<String> name, bool is_contextual);
-
-  // Load a keyed property, leaving it in v0. The receiver and key are
-  // passed on the stack, and remain there.
-  void EmitKeyedLoad();
-
-  void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
-
-  // Generate code that computes a shortcutting logical operation.
-  void GenerateLogicalBooleanOperation(BinaryOperation* node);
-
-  void GenericBinaryOperation(Token::Value op,
-                              OverwriteMode overwrite_mode,
-                              GenerateInlineSmi inline_smi,
-                              int known_rhs =
-                                GenericBinaryOpStub::kUnknownIntValue);
-
-  void VirtualFrameBinaryOperation(Token::Value op,
-                                   OverwriteMode overwrite_mode,
-                                   int known_rhs =
-                                      GenericBinaryOpStub::kUnknownIntValue);
-
-  void SmiOperation(Token::Value op,
-                    Handle<Object> value,
-                    bool reversed,
-                    OverwriteMode mode);
-
-  void Comparison(Condition cc,
-                  Expression* left,
-                  Expression* right,
-                  bool strict = false);
-
-  void CallWithArguments(ZoneList<Expression*>* arguments,
-                         CallFunctionFlags flags,
-                         int position);
-
-  // An optimized implementation of expressions of the form
-  // x.apply(y, arguments).  We call x the applicand and y the receiver.
-  // The optimization avoids allocating an arguments object if possible.
-  void CallApplyLazy(Expression* applicand,
-                     Expression* receiver,
-                     VariableProxy* arguments,
-                     int position);
-
-  // Control flow
-  void Branch(bool if_true, JumpTarget* target);
-  void CheckStack();
-
-  bool CheckForInlineRuntimeCall(CallRuntime* node);
-
-  static Handle<Code> ComputeLazyCompile(int argc);
-  void ProcessDeclarations(ZoneList<Declaration*>* declarations);
-
-  // Declare global variables and functions in the given array of
-  // name/value pairs.
-  void DeclareGlobals(Handle<FixedArray> pairs);
-
-  // Instantiate the function based on the shared function info.
-  void InstantiateFunction(Handle<SharedFunctionInfo> function_info,
-                           bool pretenure);
-
-  // Support for type checks.
-  void GenerateIsSmi(ZoneList<Expression*>* args);
-  void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
-  void GenerateIsArray(ZoneList<Expression*>* args);
-  void GenerateIsRegExp(ZoneList<Expression*>* args);
-
-  // Support for construct call checks.
-  void GenerateIsConstructCall(ZoneList<Expression*>* args);
-
-  // Support for arguments.length and arguments[?].
-  void GenerateArgumentsLength(ZoneList<Expression*>* args);
-  void GenerateArguments(ZoneList<Expression*>* args);
-
-  // Support for accessing the class and value fields of an object.
-  void GenerateClassOf(ZoneList<Expression*>* args);
-  void GenerateValueOf(ZoneList<Expression*>* args);
-  void GenerateSetValueOf(ZoneList<Expression*>* args);
-
-  // Fast support for charCodeAt(n).
-  void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
-
-  // Fast support for string.charAt(n) and string[n].
-  void GenerateStringCharFromCode(ZoneList<Expression*>* args);
-
-  // Fast support for string.charAt(n) and string[n].
-  void GenerateStringCharAt(ZoneList<Expression*>* args);
-
-  // Fast support for object equality testing.
-  void GenerateObjectEquals(ZoneList<Expression*>* args);
-
-  void GenerateLog(ZoneList<Expression*>* args);
-
-  // Fast support for Math.random().
-  void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
-
-  void GenerateIsObject(ZoneList<Expression*>* args);
-  void GenerateIsSpecObject(ZoneList<Expression*>* args);
-  void GenerateIsFunction(ZoneList<Expression*>* args);
-  void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
-  void GenerateStringAdd(ZoneList<Expression*>* args);
-  void GenerateSubString(ZoneList<Expression*>* args);
-  void GenerateStringCompare(ZoneList<Expression*>* args);
-  void GenerateIsStringWrapperSafeForDefaultValueOf(
-      ZoneList<Expression*>* args);
-
-  // Support for direct calls from JavaScript to native RegExp code.
-  void GenerateRegExpExec(ZoneList<Expression*>* args);
-
-  void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
-
-  // Support for fast native caches.
-  void GenerateGetFromCache(ZoneList<Expression*>* args);
-
-  // Fast support for number to string.
-  void GenerateNumberToString(ZoneList<Expression*>* args);
-
-  // Fast swapping of elements.
-  void GenerateSwapElements(ZoneList<Expression*>* args);
-
-  // Fast call for custom callbacks.
-  void GenerateCallFunction(ZoneList<Expression*>* args);
-
-  // Fast call to math functions.
-  void GenerateMathPow(ZoneList<Expression*>* args);
-  void GenerateMathSin(ZoneList<Expression*>* args);
-  void GenerateMathCos(ZoneList<Expression*>* args);
-  void GenerateMathSqrt(ZoneList<Expression*>* args);
-  void GenerateMathLog(ZoneList<Expression*>* args);
-
-  void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
-
-  void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
-  void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
-  void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args);
-
-  // Simple condition analysis.
-  enum ConditionAnalysis {
-    ALWAYS_TRUE,
-    ALWAYS_FALSE,
-    DONT_KNOW
-  };
-  ConditionAnalysis AnalyzeCondition(Expression* cond);
-
-  // Methods used to indicate which source code is generated for. Source
-  // positions are collected by the assembler and emitted with the relocation
-  // information.
-  void CodeForFunctionPosition(FunctionLiteral* fun);
-  void CodeForReturnPosition(FunctionLiteral* fun);
-  void CodeForStatementPosition(Statement* node);
-  void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
-  void CodeForSourcePosition(int pos);
-
-#ifdef DEBUG
-  // True if the registers are valid for entry to a block.
-  bool HasValidEntryRegisters();
-#endif
-
-  List<DeferredCode*> deferred_;
-
-  // Assembler
-  MacroAssembler* masm_;  // to generate code
-
-  CompilationInfo* info_;
-
-  // Code generation state
-  VirtualFrame* frame_;
-  RegisterAllocator* allocator_;
-  Condition cc_reg_;
-  CodeGenState* state_;
-  int loop_nesting_;
-
-  Vector<TypeInfo>* type_info_;
-  // Jump targets
-  BreakTarget function_return_;
-
-  // True if the function return is shadowed (ie, jumping to the target
-  // function_return_ does not jump to the true function return, but rather
-  // to some unlinking code).
-  bool function_return_is_shadowed_;
-
-  friend class VirtualFrame;
-  friend class Isolate;
-  friend class JumpTarget;
-  friend class Reference;
-  friend class FastCodeGenerator;
-  friend class FullCodeGenerator;
-  friend class FullCodeGenSyntaxChecker;
-  friend class InlineRuntimeFunctionsTable;
-  friend class LCodeGen;
-
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
 
diff --git a/src/mips/constants-mips.cc b/src/mips/constants-mips.cc
index 16e49c9..96a2333 100644
--- a/src/mips/constants-mips.cc
+++ b/src/mips/constants-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -36,7 +36,7 @@
 
 
 // -----------------------------------------------------------------------------
-// Registers
+// Registers.
 
 
 // These register names are defined in a way to match the native disassembler
@@ -145,7 +145,7 @@
 
 
 // -----------------------------------------------------------------------------
-// Instruction
+// Instructions.
 
 bool Instruction::IsForbiddenInBranchDelay() const {
   const int op = OpcodeFieldRaw();
@@ -293,15 +293,15 @@
           UNREACHABLE();
       };
       break;
-    case COP1:    // Coprocessor instructions
+    case COP1:    // Coprocessor instructions.
       switch (RsFieldRawNoAssert()) {
-        case BC1:   // branch on coprocessor condition
+        case BC1:   // Branch on coprocessor condition.
           return kImmediateType;
         default:
           return kRegisterType;
       };
       break;
-    // 16 bits Immediate type instructions. eg: addi dest, src, imm16
+    // 16 bits Immediate type instructions. eg: addi dest, src, imm16.
     case REGIMM:
     case BEQ:
     case BNE:
@@ -336,7 +336,7 @@
     case SWC1:
     case SDC1:
       return kImmediateType;
-    // 26 bits immediate type instructions. eg: j imm26
+    // 26 bits immediate type instructions. eg: j imm26.
     case J:
     case JAL:
       return kJumpType;
diff --git a/src/mips/constants-mips.h b/src/mips/constants-mips.h
index b20e9a2..2567330 100644
--- a/src/mips/constants-mips.h
+++ b/src/mips/constants-mips.h
@@ -47,6 +47,19 @@
 #endif
 
 
+#if(defined(__mips_hard_float) && __mips_hard_float != 0)
+// Use floating-point coprocessor instructions. This flag is raised when
+// -mhard-float is passed to the compiler.
+static const bool IsMipsSoftFloatABI = false;
+#elif(defined(__mips_soft_float) && __mips_soft_float != 0)
+// Not using floating-point coprocessor instructions. This flag is raised when
+// -msoft-float is passed to the compiler.
+static const bool IsMipsSoftFloatABI = true;
+#else
+static const bool IsMipsSoftFloatABI = true;
+#endif
+
+
 // Defines constants and accessor classes to assemble, disassemble and
 // simulate MIPS32 instructions.
 //
@@ -58,7 +71,7 @@
 namespace internal {
 
 // -----------------------------------------------------------------------------
-// Registers and FPURegister.
+// Registers and FPURegisters.
 
 // Number of general purpose registers.
 static const int kNumRegisters = 32;
@@ -82,6 +95,11 @@
 // FCSR constants.
 static const uint32_t kFCSRFlagMask = (1 << 6) - 1;
 static const uint32_t kFCSRFlagShift = 2;
+static const uint32_t kFCSRInexactFlagBit = 1 << 0;
+static const uint32_t kFCSRUnderflowFlagBit = 1 << 1;
+static const uint32_t kFCSROverflowFlagBit = 1 << 2;
+static const uint32_t kFCSRDivideByZeroFlagBit = 1 << 3;
+static const uint32_t kFCSRInvalidOpFlagBit = 1 << 4;
 
 // Helper functions for converting between register numbers and names.
 class Registers {
@@ -133,8 +151,6 @@
 // On MIPS all instructions are 32 bits.
 typedef int32_t Instr;
 
-typedef unsigned char byte_;
-
 // Special Software Interrupt codes when used in the presence of the MIPS
 // simulator.
 enum SoftwareInterruptCodes {
@@ -175,7 +191,7 @@
 static const int kFBtrueShift   = 16;
 static const int kFBtrueBits    = 1;
 
-// ----- Miscellianous useful masks.
+// ----- Miscellaneous useful masks.
 // Instruction bit masks.
 static const int  kOpcodeMask   = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
 static const int  kImm16Mask    = ((1 << kImm16Bits) - 1) << kImm16Shift;
@@ -215,7 +231,7 @@
   XORI      =   ((1 << 3) + 6) << kOpcodeShift,
   LUI       =   ((1 << 3) + 7) << kOpcodeShift,
 
-  COP1      =   ((2 << 3) + 1) << kOpcodeShift,  // Coprocessor 1 class
+  COP1      =   ((2 << 3) + 1) << kOpcodeShift,  // Coprocessor 1 class.
   BEQL      =   ((2 << 3) + 4) << kOpcodeShift,
   BNEL      =   ((2 << 3) + 5) << kOpcodeShift,
   BLEZL     =   ((2 << 3) + 6) << kOpcodeShift,
@@ -393,7 +409,7 @@
 
   cc_always     = 16,
 
-  // aliases
+  // Aliases.
   carry         = Uless,
   not_carry     = Ugreater_equal,
   zero          = equal,
@@ -455,14 +471,14 @@
 
 // ----- Coprocessor conditions.
 enum FPUCondition {
-  F,    // False
-  UN,   // Unordered
-  EQ,   // Equal
-  UEQ,  // Unordered or Equal
-  OLT,  // Ordered or Less Than
-  ULT,  // Unordered or Less Than
-  OLE,  // Ordered or Less Than or Equal
-  ULE   // Unordered or Less Than or Equal
+  F,    // False.
+  UN,   // Unordered.
+  EQ,   // Equal.
+  UEQ,  // Unordered or Equal.
+  OLT,  // Ordered or Less Than.
+  ULT,  // Unordered or Less Than.
+  OLE,  // Ordered or Less Than or Equal.
+  ULE   // Unordered or Less Than or Equal.
 };
 
 
@@ -494,7 +510,7 @@
 extern const Instr kPushInstruction;
 // sw(r, MemOperand(sp, 0))
 extern const Instr kPushRegPattern;
-//  lw(r, MemOperand(sp, 0))
+// lw(r, MemOperand(sp, 0))
 extern const Instr kPopRegPattern;
 extern const Instr kLwRegFpOffsetPattern;
 extern const Instr kSwRegFpOffsetPattern;
@@ -687,7 +703,7 @@
   // reference to an instruction is to convert a pointer. There is no way
   // to allocate or create instances of class Instruction.
   // Use the At(pc) function to create references to Instruction.
-  static Instruction* At(byte_* pc) {
+  static Instruction* At(byte* pc) {
     return reinterpret_cast<Instruction*>(pc);
   }
 
diff --git a/src/mips/cpu-mips.cc b/src/mips/cpu-mips.cc
index 36f577b..26e95fb 100644
--- a/src/mips/cpu-mips.cc
+++ b/src/mips/cpu-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -48,19 +48,25 @@
 
 
 void CPU::Setup() {
-  CpuFeatures* cpu_features = Isolate::Current()->cpu_features();
-  cpu_features->Probe(true);
-  if (!cpu_features->IsSupported(FPU) || Serializer::enabled()) {
-    V8::DisableCrankshaft();
-  }
+  CpuFeatures::Probe();
+}
+
+
+bool CPU::SupportsCrankshaft() {
+  return CpuFeatures::IsSupported(FPU);
 }
 
 
 void CPU::FlushICache(void* start, size_t size) {
+  // Nothing to do, flushing no instructions.
+  if (size == 0) {
+    return;
+  }
+
 #if !defined (USE_SIMULATOR)
   int res;
 
-  // See http://www.linux-mips.org/wiki/Cacheflush_Syscall
+  // See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
   res = syscall(__NR_cacheflush, start, size, ICACHE);
 
   if (res) {
diff --git a/src/mips/debug-mips.cc b/src/mips/debug-mips.cc
index 35df69b..e323c50 100644
--- a/src/mips/debug-mips.cc
+++ b/src/mips/debug-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,7 +31,7 @@
 
 #if defined(V8_TARGET_ARCH_MIPS)
 
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "debug.h"
 
 namespace v8 {
@@ -40,106 +40,259 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
 
 bool BreakLocationIterator::IsDebugBreakAtReturn() {
-  UNIMPLEMENTED_MIPS();
-  return false;
+  return Debug::IsDebugBreakAtReturn(rinfo());
 }
 
 
 void BreakLocationIterator::SetDebugBreakAtReturn() {
-  UNIMPLEMENTED_MIPS();
+  // Mips return sequence:
+  // mov sp, fp
+  // lw fp, sp(0)
+  // lw ra, sp(4)
+  // addiu sp, sp, 8
+  // addiu sp, sp, N
+  // jr ra
+  // nop (in branch delay slot)
+
+  // Make sure this constant matches the number if instrucntions we emit.
+  ASSERT(Assembler::kJSReturnSequenceInstructions == 7);
+  CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
+  // li and Call pseudo-instructions emit two instructions each.
+  patcher.masm()->li(v8::internal::t9,
+      Operand(reinterpret_cast<int32_t>(
+          Isolate::Current()->debug()->debug_break_return()->entry())));
+  patcher.masm()->Call(v8::internal::t9);
+  patcher.masm()->nop();
+  patcher.masm()->nop();
+  patcher.masm()->nop();
+
+  // TODO(mips): Open issue about using breakpoint instruction instead of nops.
+  // patcher.masm()->bkpt(0);
 }
 
 
 // Restore the JS frame exit code.
 void BreakLocationIterator::ClearDebugBreakAtReturn() {
-  UNIMPLEMENTED_MIPS();
+  rinfo()->PatchCode(original_rinfo()->pc(),
+                     Assembler::kJSReturnSequenceInstructions);
 }
 
 
 // A debug break in the exit code is identified by the JS frame exit code
-// having been patched with li/call psuedo-instrunction (liu/ori/jalr)
+// having been patched with li/call psuedo-instrunction (liu/ori/jalr).
 bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
-  UNIMPLEMENTED_MIPS();
-  return false;
+  ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+  return rinfo->IsPatchedReturnSequence();
 }
 
 
 bool BreakLocationIterator::IsDebugBreakAtSlot() {
-  UNIMPLEMENTED_MIPS();
-  return false;
+  ASSERT(IsDebugBreakSlot());
+  // Check whether the debug break slot instructions have been patched.
+  return rinfo()->IsPatchedDebugBreakSlotSequence();
 }
 
 
 void BreakLocationIterator::SetDebugBreakAtSlot() {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(IsDebugBreakSlot());
+  // Patch the code changing the debug break slot code from:
+  //   nop(DEBUG_BREAK_NOP) - nop(1) is sll(zero_reg, zero_reg, 1)
+  //   nop(DEBUG_BREAK_NOP)
+  //   nop(DEBUG_BREAK_NOP)
+  //   nop(DEBUG_BREAK_NOP)
+  // to a call to the debug break slot code.
+  //   li t9, address   (lui t9 / ori t9 instruction pair)
+  //   call t9          (jalr t9 / nop instruction pair)
+  CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
+  patcher.masm()->li(v8::internal::t9, Operand(reinterpret_cast<int32_t>(
+      Isolate::Current()->debug()->debug_break_slot()->entry())));
+  patcher.masm()->Call(v8::internal::t9);
 }
 
 
 void BreakLocationIterator::ClearDebugBreakAtSlot() {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(IsDebugBreakSlot());
+  rinfo()->PatchCode(original_rinfo()->pc(),
+                     Assembler::kDebugBreakSlotInstructions);
 }
 
 
 #define __ ACCESS_MASM(masm)
 
 
+
+static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
+                                          RegList object_regs,
+                                          RegList non_object_regs) {
+  __ EnterInternalFrame();
+
+  // Store the registers containing live values on the expression stack to
+  // make sure that these are correctly updated during GC. Non object values
+  // are stored as a smi causing it to be untouched by GC.
+  ASSERT((object_regs & ~kJSCallerSaved) == 0);
+  ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+  ASSERT((object_regs & non_object_regs) == 0);
+  if ((object_regs | non_object_regs) != 0) {
+    for (int i = 0; i < kNumJSCallerSaved; i++) {
+      int r = JSCallerSavedCode(i);
+      Register reg = { r };
+      if ((non_object_regs & (1 << r)) != 0) {
+        if (FLAG_debug_code) {
+          __ And(at, reg, 0xc0000000);
+          __ Assert(eq, "Unable to encode value as smi", at, Operand(zero_reg));
+        }
+        __ sll(reg, reg, kSmiTagSize);
+      }
+    }
+    __ MultiPush(object_regs | non_object_regs);
+  }
+
+#ifdef DEBUG
+  __ RecordComment("// Calling from debug break to runtime - come in - over");
+#endif
+  __ mov(a0, zero_reg);  // No arguments.
+  __ li(a1, Operand(ExternalReference::debug_break(masm->isolate())));
+
+  CEntryStub ceb(1);
+  __ CallStub(&ceb);
+
+  // Restore the register values from the expression stack.
+  if ((object_regs | non_object_regs) != 0) {
+    __ MultiPop(object_regs | non_object_regs);
+    for (int i = 0; i < kNumJSCallerSaved; i++) {
+      int r = JSCallerSavedCode(i);
+      Register reg = { r };
+      if ((non_object_regs & (1 << r)) != 0) {
+        __ srl(reg, reg, kSmiTagSize);
+      }
+      if (FLAG_debug_code &&
+          (((object_regs |non_object_regs) & (1 << r)) == 0)) {
+        __ li(reg, kDebugZapValue);
+      }
+    }
+  }
+
+  __ LeaveInternalFrame();
+
+  // Now that the break point has been handled, resume normal execution by
+  // jumping to the target address intended by the caller and that was
+  // overwritten by the address of DebugBreakXXX.
+  __ li(t9, Operand(
+      ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate())));
+  __ lw(t9, MemOperand(t9));
+  __ Jump(t9);
+}
+
+
 void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Calling convention for IC load (from ic-mips.cc).
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  //  -- a0    : receiver
+  //  -- [sp]  : receiver
+  // -----------------------------------
+  // Registers a0 and a2 contain objects that need to be pushed on the
+  // expression stack of the fake JS frame.
+  Generate_DebugBreakCallHelper(masm, a0.bit() | a2.bit(), 0);
 }
 
 
 void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Calling convention for IC store (from ic-mips.cc).
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  // Registers a0, a1, and a2 contain objects that need to be pushed on the
+  // expression stack of the fake JS frame.
+  Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit() | a2.bit(), 0);
 }
 
 
 void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ---------- S t a t e --------------
+  //  -- ra  : return address
+  //  -- a0  : key
+  //  -- a1  : receiver
+  Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit(), 0);
 }
 
 
 void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ---------- S t a t e --------------
+  //  -- a0     : value
+  //  -- a1     : key
+  //  -- a2     : receiver
+  //  -- ra     : return address
+  Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit() | a2.bit(), 0);
 }
 
 
 void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Calling convention for IC call (from ic-mips.cc).
+  // ----------- S t a t e -------------
+  //  -- a2: name
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, a2.bit(), 0);
 }
 
 
 void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Calling convention for construct call (from builtins-mips.cc).
+  //  -- a0     : number of arguments (not smi)
+  //  -- a1     : constructor function
+  Generate_DebugBreakCallHelper(masm, a1.bit(), a0.bit());
 }
 
 
 void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // In places other than IC call sites it is expected that v0 is TOS which
+  // is an object - this is not generally the case so this should be used with
+  // care.
+  Generate_DebugBreakCallHelper(masm, v0.bit(), 0);
 }
 
 
 void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  No registers used on entry.
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, 0, 0);
 }
 
 
 void Debug::GenerateSlot(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // Generate enough nop's to make space for a call instruction. Avoid emitting
+  // the trampoline pool in the debug break slot code.
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
+  Label check_codesize;
+  __ bind(&check_codesize);
+  __ RecordDebugBreakSlot();
+  for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
+    __ nop(MacroAssembler::DEBUG_BREAK_NOP);
+  }
+  ASSERT_EQ(Assembler::kDebugBreakSlotInstructions,
+            masm->InstructionsGeneratedSince(&check_codesize));
 }
 
 
 void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // In the places where a debug break slot is inserted no registers can contain
+  // object pointers.
+  Generate_DebugBreakCallHelper(masm, 0, 0);
 }
 
 
 void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  masm->Abort("LiveEdit frame dropping is not supported on mips");
 }
 
 
 void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  masm->Abort("LiveEdit frame dropping is not supported on mips");
 }
 
 
diff --git a/src/mips/disasm-mips.cc b/src/mips/disasm-mips.cc
index b7ceb2b..7df5c41 100644
--- a/src/mips/disasm-mips.cc
+++ b/src/mips/disasm-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -33,7 +33,7 @@
 //
 //   NameConverter converter;
 //   Disassembler d(converter);
-//   for (byte_* pc = begin; pc < end;) {
+//   for (byte* pc = begin; pc < end;) {
 //     v8::internal::EmbeddedVector<char, 256> buffer;
 //     byte* prev_pc = pc;
 //     pc += d.InstructionDecode(buffer, pc);
@@ -85,7 +85,7 @@
 
   // Writes one disassembled instruction into 'buffer' (0-terminated).
   // Returns the length of the disassembled machine instruction in bytes.
-  int InstructionDecode(byte_* instruction);
+  int InstructionDecode(byte* instruction);
 
  private:
   // Bottleneck functions to print into the out_buffer.
@@ -103,6 +103,8 @@
   void PrintFd(Instruction* instr);
   void PrintSa(Instruction* instr);
   void PrintSd(Instruction* instr);
+  void PrintSs1(Instruction* instr);
+  void PrintSs2(Instruction* instr);
   void PrintBc(Instruction* instr);
   void PrintCc(Instruction* instr);
   void PrintFunction(Instruction* instr);
@@ -212,13 +214,29 @@
 }
 
 
-// Print the integer value of the rd field, (when it is not used as reg).
+// Print the integer value of the rd field, when it is not used as reg.
 void Decoder::PrintSd(Instruction* instr) {
   int sd = instr->RdValue();
   out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sd);
 }
 
 
+// Print the integer value of the rd field, when used as 'ext' size.
+void Decoder::PrintSs1(Instruction* instr) {
+  int ss = instr->RdValue();
+  out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss + 1);
+}
+
+
+// Print the integer value of the rd field, when used as 'ins' size.
+void Decoder::PrintSs2(Instruction* instr) {
+  int ss = instr->RdValue();
+  int pos = instr->SaValue();
+  out_buffer_pos_ +=
+      OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss - pos + 1);
+}
+
+
 // Print the integer value of the cc field for the bc1t/f instructions.
 void Decoder::PrintBc(Instruction* instr) {
   int cc = instr->FBccValue();
@@ -242,7 +260,7 @@
 
 // Print 16-bit signed immediate value.
 void Decoder::PrintSImm16(Instruction* instr) {
-  int32_t imm = ((instr->Imm16Value())<<16)>>16;
+  int32_t imm = ((instr->Imm16Value()) << 16) >> 16;
   out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
 }
 
@@ -298,15 +316,15 @@
 // complexity of FormatOption.
 int Decoder::FormatRegister(Instruction* instr, const char* format) {
   ASSERT(format[0] == 'r');
-  if (format[1] == 's') {  // 'rs: Rs register
+  if (format[1] == 's') {  // 'rs: Rs register.
     int reg = instr->RsValue();
     PrintRegister(reg);
     return 2;
-  } else if (format[1] == 't') {  // 'rt: rt register
+  } else if (format[1] == 't') {  // 'rt: rt register.
     int reg = instr->RtValue();
     PrintRegister(reg);
     return 2;
-  } else if (format[1] == 'd') {  // 'rd: rd register
+  } else if (format[1] == 'd') {  // 'rd: rd register.
     int reg = instr->RdValue();
     PrintRegister(reg);
     return 2;
@@ -320,15 +338,15 @@
 // complexity of FormatOption.
 int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
   ASSERT(format[0] == 'f');
-  if (format[1] == 's') {  // 'fs: fs register
+  if (format[1] == 's') {  // 'fs: fs register.
     int reg = instr->FsValue();
     PrintFPURegister(reg);
     return 2;
-  } else if (format[1] == 't') {  // 'ft: ft register
+  } else if (format[1] == 't') {  // 'ft: ft register.
     int reg = instr->FtValue();
     PrintFPURegister(reg);
     return 2;
-  } else if (format[1] == 'd') {  // 'fd: fd register
+  } else if (format[1] == 'd') {  // 'fd: fd register.
     int reg = instr->FdValue();
     PrintFPURegister(reg);
     return 2;
@@ -345,12 +363,12 @@
 // characters that were consumed from the formatting string.
 int Decoder::FormatOption(Instruction* instr, const char* format) {
   switch (format[0]) {
-    case 'c': {   // 'code for break or trap instructions
+    case 'c': {   // 'code for break or trap instructions.
       ASSERT(STRING_STARTS_WITH(format, "code"));
       PrintCode(instr);
       return 4;
     }
-    case 'i': {   // 'imm16u or 'imm26
+    case 'i': {   // 'imm16u or 'imm26.
       if (format[3] == '1') {
         ASSERT(STRING_STARTS_WITH(format, "imm16"));
         if (format[5] == 's') {
@@ -370,13 +388,13 @@
         return 5;
       }
     }
-    case 'r': {   // 'r: registers
+    case 'r': {   // 'r: registers.
       return FormatRegister(instr, format);
     }
-    case 'f': {   // 'f: FPUregisters
+    case 'f': {   // 'f: FPUregisters.
       return FormatFPURegister(instr, format);
     }
-    case 's': {   // 'sa
+    case 's': {   // 'sa.
       switch (format[1]) {
         case 'a': {
           ASSERT(STRING_STARTS_WITH(format, "sa"));
@@ -388,6 +406,17 @@
           PrintSd(instr);
           return 2;
         }
+        case 's': {
+          if (format[2] == '1') {
+              ASSERT(STRING_STARTS_WITH(format, "ss1"));  /* ext size */
+              PrintSs1(instr);
+              return 3;
+          } else {
+              ASSERT(STRING_STARTS_WITH(format, "ss2"));  /* ins size */
+              PrintSs2(instr);
+              return 3;
+          }
+        }
       }
     }
     case 'b': {   // 'bc - Special for bc1 cc field.
@@ -432,29 +461,29 @@
 
 void Decoder::DecodeTypeRegister(Instruction* instr) {
   switch (instr->OpcodeFieldRaw()) {
-    case COP1:    // Coprocessor instructions
+    case COP1:    // Coprocessor instructions.
       switch (instr->RsFieldRaw()) {
         case BC1:   // bc1 handled in DecodeTypeImmediate.
           UNREACHABLE();
           break;
         case MFC1:
-          Format(instr, "mfc1   'rt, 'fs");
+          Format(instr, "mfc1    'rt, 'fs");
           break;
         case MFHC1:
-          Format(instr, "mfhc1  'rt, 'fs");
+          Format(instr, "mfhc1   'rt, 'fs");
           break;
         case MTC1:
-          Format(instr, "mtc1   'rt, 'fs");
+          Format(instr, "mtc1    'rt, 'fs");
           break;
         // These are called "fs" too, although they are not FPU registers.
         case CTC1:
-          Format(instr, "ctc1   'rt, 'fs");
+          Format(instr, "ctc1    'rt, 'fs");
           break;
         case CFC1:
-          Format(instr, "cfc1   'rt, 'fs");
+          Format(instr, "cfc1    'rt, 'fs");
           break;
         case MTHC1:
-          Format(instr, "mthc1  'rt, 'fs");
+          Format(instr, "mthc1   'rt, 'fs");
           break;
         case D:
           switch (instr->FunctionFieldRaw()) {
@@ -480,7 +509,7 @@
               Format(instr, "neg.d   'fd, 'fs");
               break;
             case SQRT_D:
-              Format(instr, "sqrt.d   'fd, 'fs");
+              Format(instr, "sqrt.d  'fd, 'fs");
               break;
             case CVT_W_D:
               Format(instr, "cvt.w.d 'fd, 'fs");
@@ -592,134 +621,134 @@
     case SPECIAL:
       switch (instr->FunctionFieldRaw()) {
         case JR:
-          Format(instr, "jr   'rs");
+          Format(instr, "jr      'rs");
           break;
         case JALR:
-          Format(instr, "jalr 'rs");
+          Format(instr, "jalr    'rs");
           break;
         case SLL:
           if ( 0x0 == static_cast<int>(instr->InstructionBits()))
             Format(instr, "nop");
           else
-            Format(instr, "sll  'rd, 'rt, 'sa");
+            Format(instr, "sll     'rd, 'rt, 'sa");
           break;
         case SRL:
           if (instr->RsValue() == 0) {
-            Format(instr, "srl  'rd, 'rt, 'sa");
+            Format(instr, "srl     'rd, 'rt, 'sa");
           } else {
             if (mips32r2) {
-              Format(instr, "rotr  'rd, 'rt, 'sa");
+              Format(instr, "rotr    'rd, 'rt, 'sa");
             } else {
               Unknown(instr);
             }
           }
           break;
         case SRA:
-          Format(instr, "sra  'rd, 'rt, 'sa");
+          Format(instr, "sra     'rd, 'rt, 'sa");
           break;
         case SLLV:
-          Format(instr, "sllv 'rd, 'rt, 'rs");
+          Format(instr, "sllv    'rd, 'rt, 'rs");
           break;
         case SRLV:
           if (instr->SaValue() == 0) {
-            Format(instr, "srlv 'rd, 'rt, 'rs");
+            Format(instr, "srlv    'rd, 'rt, 'rs");
           } else {
             if (mips32r2) {
-              Format(instr, "rotrv 'rd, 'rt, 'rs");
+              Format(instr, "rotrv   'rd, 'rt, 'rs");
             } else {
               Unknown(instr);
             }
           }
           break;
         case SRAV:
-          Format(instr, "srav 'rd, 'rt, 'rs");
+          Format(instr, "srav    'rd, 'rt, 'rs");
           break;
         case MFHI:
-          Format(instr, "mfhi 'rd");
+          Format(instr, "mfhi    'rd");
           break;
         case MFLO:
-          Format(instr, "mflo 'rd");
+          Format(instr, "mflo    'rd");
           break;
         case MULT:
-          Format(instr, "mult 'rs, 'rt");
+          Format(instr, "mult    'rs, 'rt");
           break;
         case MULTU:
-          Format(instr, "multu  'rs, 'rt");
+          Format(instr, "multu   'rs, 'rt");
           break;
         case DIV:
-          Format(instr, "div  'rs, 'rt");
+          Format(instr, "div     'rs, 'rt");
           break;
         case DIVU:
-          Format(instr, "divu 'rs, 'rt");
+          Format(instr, "divu    'rs, 'rt");
           break;
         case ADD:
-          Format(instr, "add  'rd, 'rs, 'rt");
+          Format(instr, "add     'rd, 'rs, 'rt");
           break;
         case ADDU:
-          Format(instr, "addu 'rd, 'rs, 'rt");
+          Format(instr, "addu    'rd, 'rs, 'rt");
           break;
         case SUB:
-          Format(instr, "sub  'rd, 'rs, 'rt");
+          Format(instr, "sub     'rd, 'rs, 'rt");
           break;
         case SUBU:
-          Format(instr, "sub  'rd, 'rs, 'rt");
+          Format(instr, "subu    'rd, 'rs, 'rt");
           break;
         case AND:
-          Format(instr, "and  'rd, 'rs, 'rt");
+          Format(instr, "and     'rd, 'rs, 'rt");
           break;
         case OR:
           if (0 == instr->RsValue()) {
-            Format(instr, "mov  'rd, 'rt");
+            Format(instr, "mov     'rd, 'rt");
           } else if (0 == instr->RtValue()) {
-            Format(instr, "mov  'rd, 'rs");
+            Format(instr, "mov     'rd, 'rs");
           } else {
-            Format(instr, "or   'rd, 'rs, 'rt");
+            Format(instr, "or      'rd, 'rs, 'rt");
           }
           break;
         case XOR:
-          Format(instr, "xor  'rd, 'rs, 'rt");
+          Format(instr, "xor     'rd, 'rs, 'rt");
           break;
         case NOR:
-          Format(instr, "nor  'rd, 'rs, 'rt");
+          Format(instr, "nor     'rd, 'rs, 'rt");
           break;
         case SLT:
-          Format(instr, "slt  'rd, 'rs, 'rt");
+          Format(instr, "slt     'rd, 'rs, 'rt");
           break;
         case SLTU:
-          Format(instr, "sltu 'rd, 'rs, 'rt");
+          Format(instr, "sltu    'rd, 'rs, 'rt");
           break;
         case BREAK:
           Format(instr, "break, code: 'code");
           break;
         case TGE:
-          Format(instr, "tge  'rs, 'rt, code: 'code");
+          Format(instr, "tge     'rs, 'rt, code: 'code");
           break;
         case TGEU:
-          Format(instr, "tgeu 'rs, 'rt, code: 'code");
+          Format(instr, "tgeu    'rs, 'rt, code: 'code");
           break;
         case TLT:
-          Format(instr, "tlt  'rs, 'rt, code: 'code");
+          Format(instr, "tlt     'rs, 'rt, code: 'code");
           break;
         case TLTU:
-          Format(instr, "tltu 'rs, 'rt, code: 'code");
+          Format(instr, "tltu    'rs, 'rt, code: 'code");
           break;
         case TEQ:
-          Format(instr, "teq  'rs, 'rt, code: 'code");
+          Format(instr, "teq     'rs, 'rt, code: 'code");
           break;
         case TNE:
-          Format(instr, "tne  'rs, 'rt, code: 'code");
+          Format(instr, "tne     'rs, 'rt, code: 'code");
           break;
         case MOVZ:
-          Format(instr, "movz 'rd, 'rs, 'rt");
+          Format(instr, "movz    'rd, 'rs, 'rt");
           break;
         case MOVN:
-          Format(instr, "movn 'rd, 'rs, 'rt");
+          Format(instr, "movn    'rd, 'rs, 'rt");
           break;
         case MOVCI:
           if (instr->Bit(16)) {
-            Format(instr, "movt 'rd, 'rs, 'Cc");
+            Format(instr, "movt    'rd, 'rs, 'bc");
           } else {
-            Format(instr, "movf 'rd, 'rs, 'Cc");
+            Format(instr, "movf    'rd, 'rs, 'bc");
           }
           break;
         default:
@@ -729,10 +758,10 @@
     case SPECIAL2:
       switch (instr->FunctionFieldRaw()) {
         case MUL:
-          Format(instr, "mul  'rd, 'rs, 'rt");
+          Format(instr, "mul     'rd, 'rs, 'rt");
           break;
         case CLZ:
-          Format(instr, "clz  'rd, 'rs");
+          Format(instr, "clz     'rd, 'rs");
           break;
         default:
           UNREACHABLE();
@@ -742,7 +771,7 @@
       switch (instr->FunctionFieldRaw()) {
         case INS: {
           if (mips32r2) {
-            Format(instr, "ins  'rt, 'rs, 'sd, 'sa");
+            Format(instr, "ins     'rt, 'rs, 'sa, 'ss2");
           } else {
             Unknown(instr);
           }
@@ -750,7 +779,7 @@
         }
         case EXT: {
           if (mips32r2) {
-            Format(instr, "ext  'rt, 'rs, 'sd, 'sa");
+            Format(instr, "ext     'rt, 'rs, 'sa, 'ss1");
           } else {
             Unknown(instr);
           }
@@ -785,16 +814,16 @@
     case REGIMM:
       switch (instr->RtFieldRaw()) {
         case BLTZ:
-          Format(instr, "bltz 'rs, 'imm16u");
+          Format(instr, "bltz    'rs, 'imm16u");
           break;
         case BLTZAL:
-          Format(instr, "bltzal 'rs, 'imm16u");
+          Format(instr, "bltzal  'rs, 'imm16u");
           break;
         case BGEZ:
-          Format(instr, "bgez 'rs, 'imm16u");
+          Format(instr, "bgez    'rs, 'imm16u");
           break;
         case BGEZAL:
-          Format(instr, "bgezal 'rs, 'imm16u");
+          Format(instr, "bgezal  'rs, 'imm16u");
           break;
         default:
           UNREACHABLE();
@@ -802,90 +831,90 @@
     break;  // Case REGIMM.
     // ------------- Branch instructions.
     case BEQ:
-      Format(instr, "beq  'rs, 'rt, 'imm16u");
+      Format(instr, "beq     'rs, 'rt, 'imm16u");
       break;
     case BNE:
-      Format(instr, "bne  'rs, 'rt, 'imm16u");
+      Format(instr, "bne     'rs, 'rt, 'imm16u");
       break;
     case BLEZ:
-      Format(instr, "blez 'rs, 'imm16u");
+      Format(instr, "blez    'rs, 'imm16u");
       break;
     case BGTZ:
-      Format(instr, "bgtz 'rs, 'imm16u");
+      Format(instr, "bgtz    'rs, 'imm16u");
       break;
     // ------------- Arithmetic instructions.
     case ADDI:
-      Format(instr, "addi   'rt, 'rs, 'imm16s");
+      Format(instr, "addi    'rt, 'rs, 'imm16s");
       break;
     case ADDIU:
-      Format(instr, "addiu  'rt, 'rs, 'imm16s");
+      Format(instr, "addiu   'rt, 'rs, 'imm16s");
       break;
     case SLTI:
-      Format(instr, "slti   'rt, 'rs, 'imm16s");
+      Format(instr, "slti    'rt, 'rs, 'imm16s");
       break;
     case SLTIU:
-      Format(instr, "sltiu  'rt, 'rs, 'imm16u");
+      Format(instr, "sltiu   'rt, 'rs, 'imm16u");
       break;
     case ANDI:
-      Format(instr, "andi   'rt, 'rs, 'imm16x");
+      Format(instr, "andi    'rt, 'rs, 'imm16x");
       break;
     case ORI:
-      Format(instr, "ori    'rt, 'rs, 'imm16x");
+      Format(instr, "ori     'rt, 'rs, 'imm16x");
       break;
     case XORI:
-      Format(instr, "xori   'rt, 'rs, 'imm16x");
+      Format(instr, "xori    'rt, 'rs, 'imm16x");
       break;
     case LUI:
-      Format(instr, "lui    'rt, 'imm16x");
+      Format(instr, "lui     'rt, 'imm16x");
       break;
     // ------------- Memory instructions.
     case LB:
-      Format(instr, "lb     'rt, 'imm16s('rs)");
+      Format(instr, "lb      'rt, 'imm16s('rs)");
       break;
     case LH:
-      Format(instr, "lh     'rt, 'imm16s('rs)");
+      Format(instr, "lh      'rt, 'imm16s('rs)");
       break;
     case LWL:
-      Format(instr, "lwl    'rt, 'imm16s('rs)");
+      Format(instr, "lwl     'rt, 'imm16s('rs)");
       break;
     case LW:
-      Format(instr, "lw     'rt, 'imm16s('rs)");
+      Format(instr, "lw      'rt, 'imm16s('rs)");
       break;
     case LBU:
-      Format(instr, "lbu    'rt, 'imm16s('rs)");
+      Format(instr, "lbu     'rt, 'imm16s('rs)");
       break;
     case LHU:
-      Format(instr, "lhu    'rt, 'imm16s('rs)");
+      Format(instr, "lhu     'rt, 'imm16s('rs)");
       break;
     case LWR:
-      Format(instr, "lwr    'rt, 'imm16s('rs)");
+      Format(instr, "lwr     'rt, 'imm16s('rs)");
       break;
     case SB:
-      Format(instr, "sb     'rt, 'imm16s('rs)");
+      Format(instr, "sb      'rt, 'imm16s('rs)");
       break;
     case SH:
-      Format(instr, "sh     'rt, 'imm16s('rs)");
+      Format(instr, "sh      'rt, 'imm16s('rs)");
       break;
     case SWL:
-      Format(instr, "swl    'rt, 'imm16s('rs)");
+      Format(instr, "swl     'rt, 'imm16s('rs)");
       break;
     case SW:
-      Format(instr, "sw     'rt, 'imm16s('rs)");
+      Format(instr, "sw      'rt, 'imm16s('rs)");
       break;
     case SWR:
-      Format(instr, "swr    'rt, 'imm16s('rs)");
+      Format(instr, "swr     'rt, 'imm16s('rs)");
       break;
     case LWC1:
-      Format(instr, "lwc1   'ft, 'imm16s('rs)");
+      Format(instr, "lwc1    'ft, 'imm16s('rs)");
       break;
     case LDC1:
-      Format(instr, "ldc1   'ft, 'imm16s('rs)");
+      Format(instr, "ldc1    'ft, 'imm16s('rs)");
       break;
     case SWC1:
-      Format(instr, "swc1   'ft, 'imm16s('rs)");
+      Format(instr, "swc1    'ft, 'imm16s('rs)");
       break;
     case SDC1:
-      Format(instr, "sdc1   'ft, 'imm16s('rs)");
+      Format(instr, "sdc1    'ft, 'imm16s('rs)");
       break;
     default:
       UNREACHABLE();
@@ -897,10 +926,10 @@
 void Decoder::DecodeTypeJump(Instruction* instr) {
   switch (instr->OpcodeFieldRaw()) {
     case J:
-      Format(instr, "j    'imm26");
+      Format(instr, "j       'imm26");
       break;
     case JAL:
-      Format(instr, "jal  'imm26");
+      Format(instr, "jal     'imm26");
       break;
     default:
       UNREACHABLE();
@@ -909,7 +938,7 @@
 
 
 // Disassemble the instruction at *instr_ptr into the output buffer.
-int Decoder::InstructionDecode(byte_* instr_ptr) {
+int Decoder::InstructionDecode(byte* instr_ptr) {
   Instruction* instr = Instruction::At(instr_ptr);
   // Print raw instruction bytes.
   out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
@@ -944,15 +973,13 @@
 
 namespace disasm {
 
-using v8::internal::byte_;
-
-const char* NameConverter::NameOfAddress(byte_* addr) const {
+const char* NameConverter::NameOfAddress(byte* addr) const {
   v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
   return tmp_buffer_.start();
 }
 
 
-const char* NameConverter::NameOfConstant(byte_* addr) const {
+const char* NameConverter::NameOfConstant(byte* addr) const {
   return NameOfAddress(addr);
 }
 
@@ -968,12 +995,12 @@
 
 
 const char* NameConverter::NameOfByteCPURegister(int reg) const {
-  UNREACHABLE();  // MIPS does not have the concept of a byte register
+  UNREACHABLE();  // MIPS does not have the concept of a byte register.
   return "nobytereg";
 }
 
 
-const char* NameConverter::NameInCode(byte_* addr) const {
+const char* NameConverter::NameInCode(byte* addr) const {
   // The default name converter is called for unknown code. So we will not try
   // to access any memory.
   return "";
@@ -990,25 +1017,25 @@
 
 
 int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
-                                    byte_* instruction) {
+                                    byte* instruction) {
   v8::internal::Decoder d(converter_, buffer);
   return d.InstructionDecode(instruction);
 }
 
 
 // The MIPS assembler does not currently use constant pools.
-int Disassembler::ConstantPoolSizeAt(byte_* instruction) {
+int Disassembler::ConstantPoolSizeAt(byte* instruction) {
   return -1;
 }
 
 
-void Disassembler::Disassemble(FILE* f, byte_* begin, byte_* end) {
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
   NameConverter converter;
   Disassembler d(converter);
-  for (byte_* pc = begin; pc < end;) {
+  for (byte* pc = begin; pc < end;) {
     v8::internal::EmbeddedVector<char, 128> buffer;
     buffer[0] = '\0';
-    byte_* prev_pc = pc;
+    byte* prev_pc = pc;
     pc += d.InstructionDecode(buffer, pc);
     fprintf(f, "%p    %08x      %s\n",
             prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
diff --git a/src/mips/frames-mips.cc b/src/mips/frames-mips.cc
index e2e0c91..faaa0e0 100644
--- a/src/mips/frames-mips.cc
+++ b/src/mips/frames-mips.cc
@@ -38,8 +38,7 @@
 
 
 Address ExitFrame::ComputeStackPointer(Address fp) {
-  UNIMPLEMENTED_MIPS();
-  return fp;
+  return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
 }
 
 
diff --git a/src/mips/frames-mips.h b/src/mips/frames-mips.h
index f507590..2e720fb 100644
--- a/src/mips/frames-mips.h
+++ b/src/mips/frames-mips.h
@@ -59,7 +59,7 @@
   // Saved temporaries.
   1 << 16 | 1 << 17 | 1 << 18 | 1 << 19 |
   1 << 20 | 1 << 21 | 1 << 22 | 1 << 23 |
-  // gp, sp, fp
+  // gp, sp, fp.
   1 << 28 | 1 << 29 | 1 << 30;
 
 static const int kNumCalleeSaved = 11;
@@ -79,6 +79,43 @@
 
 typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
 
+static const int kUndefIndex = -1;
+// Map with indexes on stack that corresponds to codes of saved registers.
+static const int kSafepointRegisterStackIndexMap[kNumRegs] = {
+  kUndefIndex,
+  kUndefIndex,
+  0,  // v0
+  kUndefIndex,
+  1,  // a0
+  2,  // a1
+  3,  // a2
+  4,  // a3
+  kUndefIndex,
+  kUndefIndex,
+  kUndefIndex,
+  kUndefIndex,
+  kUndefIndex,
+  kUndefIndex,
+  kUndefIndex,
+  kUndefIndex,
+  5,  // Saved temporaries.
+  6,
+  7,
+  8,
+  9,
+  10,
+  11,
+  12,
+  kUndefIndex,
+  kUndefIndex,
+  kUndefIndex,
+  kUndefIndex,
+  13,  // gp
+  14,  // sp
+  15,  // fp
+  kUndefIndex
+};
+
 
 // ----------------------------------------------------
 
@@ -101,22 +138,24 @@
 
 class ExitFrameConstants : public AllStatic {
  public:
-  static const int kDebugMarkOffset = -1 * kPointerSize;
-  // Must be the same as kDebugMarkOffset. Alias introduced when upgrading.
-  static const int kCodeOffset = -1 * kPointerSize;
-  static const int kSPOffset = -1 * kPointerSize;
+  // See some explanation in MacroAssembler::EnterExitFrame.
+  // This marks the top of the extra allocated stack space.
+  static const int kStackSpaceOffset = -3 * kPointerSize;
 
-  // TODO(mips): Use a patched sp value on the stack instead.
-  // A marker of 0 indicates that double registers are saved.
-  static const int kMarkerOffset = -2 * kPointerSize;
+  static const int kCodeOffset = -2 * kPointerSize;
+
+  static const int kSPOffset = -1 * kPointerSize;
 
   // The caller fields are below the frame pointer on the stack.
   static const int kCallerFPOffset = +0 * kPointerSize;
   // The calling JS function is between FP and PC.
   static const int kCallerPCOffset = +1 * kPointerSize;
 
+  // MIPS-specific: a pointer to the old sp to avoid unnecessary calculations.
+  static const int kCallerSPOffset = +2 * kPointerSize;
+
   // FP-relative displacement of the caller's SP.
-  static const int kCallerSPDisplacement = +3 * kPointerSize;
+  static const int kCallerSPDisplacement = +2 * kPointerSize;
 };
 
 
@@ -135,7 +174,8 @@
   static const int kRegularArgsSlotsSize = kRArgsSlotsSize;
 
   // C/C++ argument slots size.
-  static const int kCArgsSlotsSize = 4 * kPointerSize;
+  static const int kCArgSlotCount = 4;
+  static const int kCArgsSlotsSize = kCArgSlotCount * kPointerSize;
   // JS argument slots size.
   static const int kJSArgsSlotsSize = 0 * kPointerSize;
   // Assembly builtins argument slots size.
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 87507ff..9c93c63 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -38,7 +38,7 @@
 // next call: mov(a0, v0). This is not needed on the other architectures.
 
 #include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "compiler.h"
 #include "debug.h"
 #include "full-codegen.h"
@@ -53,6 +53,73 @@
 
 #define __ ACCESS_MASM(masm_)
 
+
+static unsigned GetPropertyId(Property* property) {
+  if (property->is_synthetic()) return AstNode::kNoNumber;
+  return property->id();
+}
+
+
+// A patch site is a location in the code which it is possible to patch. This
+// class has a number of methods to emit the code which is patchable and the
+// method EmitPatchInfo to record a marker back to the patchable code. This
+// marker is a andi at, rx, #yyy instruction, and x * 0x0000ffff + yyy (raw 16
+// bit immediate value is used) is the delta from the pc to the first
+// instruction of the patchable code.
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+  explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
+#ifdef DEBUG
+    info_emitted_ = false;
+#endif
+  }
+
+  ~JumpPatchSite() {
+    ASSERT(patch_site_.is_bound() == info_emitted_);
+  }
+
+  // When initially emitting this ensure that a jump is always generated to skip
+  // the inlined smi code.
+  void EmitJumpIfNotSmi(Register reg, Label* target) {
+    ASSERT(!patch_site_.is_bound() && !info_emitted_);
+    Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+    __ bind(&patch_site_);
+    __ andi(at, reg, 0);
+    // Always taken before patched.
+    __ Branch(target, eq, at, Operand(zero_reg));
+  }
+
+  // When initially emitting this ensure that a jump is never generated to skip
+  // the inlined smi code.
+  void EmitJumpIfSmi(Register reg, Label* target) {
+    Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+    ASSERT(!patch_site_.is_bound() && !info_emitted_);
+    __ bind(&patch_site_);
+    __ andi(at, reg, 0);
+    // Never taken before patched.
+    __ Branch(target, ne, at, Operand(zero_reg));
+  }
+
+  void EmitPatchInfo() {
+    int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
+    Register reg = Register::from_code(delta_to_patch_site / kImm16Mask);
+    __ andi(at, reg, delta_to_patch_site % kImm16Mask);
+#ifdef DEBUG
+    info_emitted_ = true;
+#endif
+  }
+
+  bool is_bound() const { return patch_site_.is_bound(); }
+
+ private:
+  MacroAssembler* masm_;
+  Label patch_site_;
+#ifdef DEBUG
+  bool info_emitted_;
+#endif
+};
+
+
 // Generate code for a JS function.  On entry to the function the receiver
 // and arguments have been pushed on the stack left to right.  The actual
 // argument count matches the formal parameter count expected by the
@@ -68,189 +135,510 @@
 // The function builds a JS frame.  Please see JavaScriptFrameConstants in
 // frames-mips.h for its layout.
 void FullCodeGenerator::Generate(CompilationInfo* info) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(info_ == NULL);
+  info_ = info;
+  SetFunctionPosition(function());
+  Comment cmnt(masm_, "[ function compiled by full code generator");
+
+#ifdef DEBUG
+  if (strlen(FLAG_stop_at) > 0 &&
+      info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+    __ stop("stop-at");
+  }
+#endif
+
+  // Strict mode functions need to replace the receiver with undefined
+  // when called as functions (without an explicit receiver
+  // object). t1 is zero for method calls and non-zero for function
+  // calls.
+  if (info->is_strict_mode()) {
+    Label ok;
+    __ Branch(&ok, eq, t1, Operand(zero_reg));
+    int receiver_offset = scope()->num_parameters() * kPointerSize;
+    __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+    __ sw(a2, MemOperand(sp, receiver_offset));
+    __ bind(&ok);
+  }
+
+  int locals_count = scope()->num_stack_slots();
+
+  __ Push(ra, fp, cp, a1);
+  if (locals_count > 0) {
+    // Load undefined value here, so the value is ready for the loop
+    // below.
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+  }
+  // Adjust fp to point to caller's fp.
+  __ Addu(fp, sp, Operand(2 * kPointerSize));
+
+  { Comment cmnt(masm_, "[ Allocate locals");
+    for (int i = 0; i < locals_count; i++) {
+      __ push(at);
+    }
+  }
+
+  bool function_in_register = true;
+
+  // Possibly allocate a local context.
+  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+  if (heap_slots > 0) {
+    Comment cmnt(masm_, "[ Allocate local context");
+    // Argument to NewContext is the function, which is in a1.
+    __ push(a1);
+    if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+      FastNewContextStub stub(heap_slots);
+      __ CallStub(&stub);
+    } else {
+      __ CallRuntime(Runtime::kNewContext, 1);
+    }
+    function_in_register = false;
+    // Context is returned in both v0 and cp.  It replaces the context
+    // passed to us.  It's saved in the stack and kept live in cp.
+    __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    // Copy any necessary parameters into the context.
+    int num_parameters = scope()->num_parameters();
+    for (int i = 0; i < num_parameters; i++) {
+      Slot* slot = scope()->parameter(i)->AsSlot();
+      if (slot != NULL && slot->type() == Slot::CONTEXT) {
+        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+                                 (num_parameters - 1 - i) * kPointerSize;
+        // Load parameter from stack.
+        __ lw(a0, MemOperand(fp, parameter_offset));
+        // Store it in the context.
+        __ li(a1, Operand(Context::SlotOffset(slot->index())));
+        __ addu(a2, cp, a1);
+        __ sw(a0, MemOperand(a2, 0));
+        // Update the write barrier. This clobbers all involved
+        // registers, so we have to use two more registers to avoid
+        // clobbering cp.
+        __ mov(a2, cp);
+        __ RecordWrite(a2, a1, a3);
+      }
+    }
+  }
+
+  Variable* arguments = scope()->arguments();
+  if (arguments != NULL) {
+    // Function uses arguments object.
+    Comment cmnt(masm_, "[ Allocate arguments object");
+    if (!function_in_register) {
+      // Load this again, if it's used by the local context below.
+      __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+    } else {
+      __ mov(a3, a1);
+    }
+    // Receiver is just before the parameters on the caller's stack.
+    int offset = scope()->num_parameters() * kPointerSize;
+    __ Addu(a2, fp,
+           Operand(StandardFrameConstants::kCallerSPOffset + offset));
+    __ li(a1, Operand(Smi::FromInt(scope()->num_parameters())));
+    __ Push(a3, a2, a1);
+
+    // Arguments to ArgumentsAccessStub:
+    //   function, receiver address, parameter count.
+    // The stub will rewrite receiever and parameter count if the previous
+    // stack frame was an arguments adapter frame.
+    ArgumentsAccessStub stub(
+        is_strict_mode() ? ArgumentsAccessStub::NEW_STRICT
+                         : ArgumentsAccessStub::NEW_NON_STRICT);
+    __ CallStub(&stub);
+
+    Variable* arguments_shadow = scope()->arguments_shadow();
+    if (arguments_shadow != NULL) {
+      // Duplicate the value; move-to-slot operation might clobber registers.
+      __ mov(a3, v0);
+      Move(arguments_shadow->AsSlot(), a3, a1, a2);
+    }
+    Move(arguments->AsSlot(), v0, a1, a2);
+  }
+
+  if (FLAG_trace) {
+    __ CallRuntime(Runtime::kTraceEnter, 0);
+  }
+
+  // Visit the declarations and body unless there is an illegal
+  // redeclaration.
+  if (scope()->HasIllegalRedeclaration()) {
+    Comment cmnt(masm_, "[ Declarations");
+    scope()->VisitIllegalRedeclaration(this);
+
+  } else {
+    { Comment cmnt(masm_, "[ Declarations");
+      // For named function expressions, declare the function name as a
+      // constant.
+      if (scope()->is_function_scope() && scope()->function() != NULL) {
+        EmitDeclaration(scope()->function(), Variable::CONST, NULL);
+      }
+      VisitDeclarations(scope()->declarations());
+    }
+
+    { Comment cmnt(masm_, "[ Stack check");
+      PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
+      Label ok;
+      __ LoadRoot(t0, Heap::kStackLimitRootIndex);
+      __ Branch(&ok, hs, sp, Operand(t0));
+      StackCheckStub stub;
+      __ CallStub(&stub);
+      __ bind(&ok);
+    }
+
+    { Comment cmnt(masm_, "[ Body");
+      ASSERT(loop_depth() == 0);
+      VisitStatements(function()->body());
+      ASSERT(loop_depth() == 0);
+    }
+  }
+
+  // Always emit a 'return undefined' in case control fell off the end of
+  // the body.
+  { Comment cmnt(masm_, "[ return <undefined>;");
+    __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+  }
+  EmitReturnSequence();
 }
 
 
 void FullCodeGenerator::ClearAccumulator() {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(Smi::FromInt(0) == 0);
+  __ mov(v0, zero_reg);
 }
 
 
 void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ Stack check");
+  Label ok;
+  __ LoadRoot(t0, Heap::kStackLimitRootIndex);
+  __ Branch(&ok, hs, sp, Operand(t0));
+  StackCheckStub stub;
+  // Record a mapping of this PC offset to the OSR id.  This is used to find
+  // the AST id from the unoptimized code in order to use it as a key into
+  // the deoptimization input data found in the optimized code.
+  RecordStackCheck(stmt->OsrEntryId());
+
+  __ CallStub(&stub);
+  __ bind(&ok);
+  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  // Record a mapping of the OSR id to this PC.  This is used if the OSR
+  // entry becomes the target of a bailout.  We don't expect it to be, but
+  // we want it to work if it is.
+  PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
 }
 
 
 void FullCodeGenerator::EmitReturnSequence() {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ Return sequence");
+  if (return_label_.is_bound()) {
+    __ Branch(&return_label_);
+  } else {
+    __ bind(&return_label_);
+    if (FLAG_trace) {
+      // Push the return value on the stack as the parameter.
+      // Runtime::TraceExit returns its parameter in v0.
+      __ push(v0);
+      __ CallRuntime(Runtime::kTraceExit, 1);
+    }
+
+#ifdef DEBUG
+    // Add a label for checking the size of the code used for returning.
+    Label check_exit_codesize;
+    masm_->bind(&check_exit_codesize);
+#endif
+    // Make sure that the constant pool is not emitted inside of the return
+    // sequence.
+    { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+      // Here we use masm_-> instead of the __ macro to avoid the code coverage
+      // tool from instrumenting as we rely on the code size here.
+      int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
+      CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
+      __ RecordJSReturn();
+      masm_->mov(sp, fp);
+      masm_->MultiPop(static_cast<RegList>(fp.bit() | ra.bit()));
+      masm_->Addu(sp, sp, Operand(sp_delta));
+      masm_->Jump(ra);
+    }
+
+#ifdef DEBUG
+    // Check that the size of the code used for returning is large enough
+    // for the debugger's requirements.
+    ASSERT(Assembler::kJSReturnSequenceInstructions <=
+           masm_->InstructionsGeneratedSince(&check_exit_codesize));
+#endif
+  }
 }
 
 
 void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
-  UNIMPLEMENTED_MIPS();
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::Plug(Slot* slot) const {
-  UNIMPLEMENTED_MIPS();
+  codegen()->Move(result_register(), slot);
 }
 
 
 void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
-  UNIMPLEMENTED_MIPS();
+  codegen()->Move(result_register(), slot);
+  __ push(result_register());
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
-  UNIMPLEMENTED_MIPS();
+  // For simplicity we always test the accumulator register.
+  codegen()->Move(result_register(), slot);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->DoTest(true_label_, false_label_, fall_through_);
 }
 
 
 void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
-  UNIMPLEMENTED_MIPS();
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::Plug(
     Heap::RootListIndex index) const {
-  UNIMPLEMENTED_MIPS();
+  __ LoadRoot(result_register(), index);
 }
 
 
 void FullCodeGenerator::StackValueContext::Plug(
     Heap::RootListIndex index) const {
-  UNIMPLEMENTED_MIPS();
+  __ LoadRoot(result_register(), index);
+  __ push(result_register());
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
-  UNIMPLEMENTED_MIPS();
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+                                          true,
+                                          true_label_,
+                                          false_label_);
+  if (index == Heap::kUndefinedValueRootIndex ||
+      index == Heap::kNullValueRootIndex ||
+      index == Heap::kFalseValueRootIndex) {
+    if (false_label_ != fall_through_) __ Branch(false_label_);
+  } else if (index == Heap::kTrueValueRootIndex) {
+    if (true_label_ != fall_through_) __ Branch(true_label_);
+  } else {
+    __ LoadRoot(result_register(), index);
+    codegen()->DoTest(true_label_, false_label_, fall_through_);
+  }
 }
 
 
 void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
-  UNIMPLEMENTED_MIPS();
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::Plug(
     Handle<Object> lit) const {
-  UNIMPLEMENTED_MIPS();
+  __ li(result_register(), Operand(lit));
 }
 
 
 void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
-  UNIMPLEMENTED_MIPS();
+  // Immediates cannot be pushed directly.
+  __ li(result_register(), Operand(lit));
+  __ push(result_register());
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
-  UNIMPLEMENTED_MIPS();
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+                                          true,
+                                          true_label_,
+                                          false_label_);
+  ASSERT(!lit->IsUndetectableObject());  // There are no undetectable literals.
+  if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+    if (false_label_ != fall_through_) __ Branch(false_label_);
+  } else if (lit->IsTrue() || lit->IsJSObject()) {
+    if (true_label_ != fall_through_) __ Branch(true_label_);
+  } else if (lit->IsString()) {
+    if (String::cast(*lit)->length() == 0) {
+      if (false_label_ != fall_through_) __ Branch(false_label_);
+    } else {
+      if (true_label_ != fall_through_) __ Branch(true_label_);
+    }
+  } else if (lit->IsSmi()) {
+    if (Smi::cast(*lit)->value() == 0) {
+      if (false_label_ != fall_through_) __ Branch(false_label_);
+    } else {
+      if (true_label_ != fall_through_) __ Branch(true_label_);
+    }
+  } else {
+    // For simplicity we always test the accumulator register.
+    __ li(result_register(), Operand(lit));
+    codegen()->DoTest(true_label_, false_label_, fall_through_);
+  }
 }
 
 
 void FullCodeGenerator::EffectContext::DropAndPlug(int count,
                                                    Register reg) const {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(count > 0);
+  __ Drop(count);
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
     int count,
     Register reg) const {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(count > 0);
+  __ Drop(count);
+  __ Move(result_register(), reg);
 }
 
 
 void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
                                                        Register reg) const {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(count > 0);
+  if (count > 1) __ Drop(count - 1);
+  __ sw(reg, MemOperand(sp, 0));
 }
 
 
 void FullCodeGenerator::TestContext::DropAndPlug(int count,
                                                  Register reg) const {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(count > 0);
+  // For simplicity we always test the accumulator register.
+  __ Drop(count);
+  __ Move(result_register(), reg);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+  codegen()->DoTest(true_label_, false_label_, fall_through_);
 }
 
 
 void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
                                             Label* materialize_false) const {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(materialize_true == materialize_false);
+  __ bind(materialize_true);
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::Plug(
     Label* materialize_true,
     Label* materialize_false) const {
-  UNIMPLEMENTED_MIPS();
+  Label done;
+  __ bind(materialize_true);
+  __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+  __ Branch(&done);
+  __ bind(materialize_false);
+  __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+  __ bind(&done);
 }
 
 
 void FullCodeGenerator::StackValueContext::Plug(
     Label* materialize_true,
     Label* materialize_false) const {
-  UNIMPLEMENTED_MIPS();
+  Label done;
+  __ bind(materialize_true);
+  __ LoadRoot(at, Heap::kTrueValueRootIndex);
+  __ push(at);
+  __ Branch(&done);
+  __ bind(materialize_false);
+  __ LoadRoot(at, Heap::kFalseValueRootIndex);
+  __ push(at);
+  __ bind(&done);
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
                                           Label* materialize_false) const {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(materialize_true == true_label_);
+  ASSERT(materialize_false == false_label_);
 }
 
 
 void FullCodeGenerator::EffectContext::Plug(bool flag) const {
-  UNIMPLEMENTED_MIPS();
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
-  UNIMPLEMENTED_MIPS();
+  Heap::RootListIndex value_root_index =
+      flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+  __ LoadRoot(result_register(), value_root_index);
 }
 
 
 void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
-  UNIMPLEMENTED_MIPS();
+  Heap::RootListIndex value_root_index =
+      flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+  __ LoadRoot(at, value_root_index);
+  __ push(at);
 }
 
 
 void FullCodeGenerator::TestContext::Plug(bool flag) const {
-  UNIMPLEMENTED_MIPS();
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+                                          true,
+                                          true_label_,
+                                          false_label_);
+  if (flag) {
+    if (true_label_ != fall_through_) __ Branch(true_label_);
+  } else {
+    if (false_label_ != fall_through_) __ Branch(false_label_);
+  }
 }
 
 
 void FullCodeGenerator::DoTest(Label* if_true,
                                Label* if_false,
                                Label* fall_through) {
-  UNIMPLEMENTED_MIPS();
+  if (CpuFeatures::IsSupported(FPU)) {
+    ToBooleanStub stub(result_register());
+    __ CallStub(&stub);
+    __ mov(at, zero_reg);
+  } else {
+    // Call the runtime to find the boolean value of the source and then
+    // translate it into control flow to the pair of labels.
+    __ push(result_register());
+    __ CallRuntime(Runtime::kToBool, 1);
+    __ LoadRoot(at, Heap::kFalseValueRootIndex);
+  }
+  Split(ne, v0, Operand(at), if_true, if_false, fall_through);
 }
 
 
-// Original prototype for mips, needs arch-indep change. Leave out for now.
-// void FullCodeGenerator::Split(Condition cc,
-//                               Register lhs,
-//                               const Operand&  rhs,
-//                               Label* if_true,
-//                               Label* if_false,
-//                               Label* fall_through) {
 void FullCodeGenerator::Split(Condition cc,
+                              Register lhs,
+                              const Operand&  rhs,
                               Label* if_true,
                               Label* if_false,
                               Label* fall_through) {
-  UNIMPLEMENTED_MIPS();
+  if (if_false == fall_through) {
+    __ Branch(if_true, cc, lhs, rhs);
+  } else if (if_true == fall_through) {
+    __ Branch(if_false, NegateCondition(cc), lhs, rhs);
+  } else {
+    __ Branch(if_true, cc, lhs, rhs);
+    __ Branch(if_false);
+  }
 }
 
 
 MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
-  UNIMPLEMENTED_MIPS();
-  return MemOperand(zero_reg, 0);
+  switch (slot->type()) {
+    case Slot::PARAMETER:
+    case Slot::LOCAL:
+      return MemOperand(fp, SlotOffset(slot));
+    case Slot::CONTEXT: {
+      int context_chain_length =
+          scope()->ContextChainLength(slot->var()->scope());
+      __ LoadContext(scratch, context_chain_length);
+      return ContextOperand(scratch, slot->index());
+    }
+    case Slot::LOOKUP:
+      UNREACHABLE();
+  }
+  UNREACHABLE();
+  return MemOperand(v0, 0);
 }
 
 
 void FullCodeGenerator::Move(Register destination, Slot* source) {
-  UNIMPLEMENTED_MIPS();
+  // Use destination as scratch.
+  MemOperand slot_operand = EmitSlotSearch(source, destination);
+  __ lw(destination, slot_operand);
 }
 
 
@@ -258,7 +646,25 @@
                                                      bool should_normalize,
                                                      Label* if_true,
                                                      Label* if_false) {
-  UNIMPLEMENTED_MIPS();
+  // Only prepare for bailouts before splits if we're in a test
+  // context. Otherwise, we let the Visit function deal with the
+  // preparation to avoid preparing with the same AST id twice.
+  if (!context()->IsTest() || !info_->IsOptimizable()) return;
+
+  Label skip;
+  if (should_normalize) __ Branch(&skip);
+
+  ForwardBailoutStack* current = forward_bailout_stack_;
+  while (current != NULL) {
+    PrepareForBailout(current->expr(), state);
+    current = current->parent();
+  }
+
+  if (should_normalize) {
+    __ LoadRoot(t0, Heap::kTrueValueRootIndex);
+    Split(eq, a0, Operand(t0), if_true, if_false, NULL);
+    __ bind(&skip);
+  }
 }
 
 
@@ -266,53 +672,532 @@
                              Register src,
                              Register scratch1,
                              Register scratch2) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(dst->type() != Slot::LOOKUP);  // Not yet implemented.
+  ASSERT(!scratch1.is(src) && !scratch2.is(src));
+  MemOperand location = EmitSlotSearch(dst, scratch1);
+  __ sw(src, location);
+  // Emit the write barrier code if the location is in the heap.
+  if (dst->type() == Slot::CONTEXT) {
+    __ RecordWrite(scratch1,
+                   Operand(Context::SlotOffset(dst->index())),
+                   scratch2,
+                   src);
+  }
 }
 
 
 void FullCodeGenerator::EmitDeclaration(Variable* variable,
                                         Variable::Mode mode,
                                         FunctionLiteral* function) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ Declaration");
+  ASSERT(variable != NULL);  // Must have been resolved.
+  Slot* slot = variable->AsSlot();
+  Property* prop = variable->AsProperty();
+
+  if (slot != NULL) {
+    switch (slot->type()) {
+      case Slot::PARAMETER:
+      case Slot::LOCAL:
+        if (mode == Variable::CONST) {
+          __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+          __ sw(t0, MemOperand(fp, SlotOffset(slot)));
+        } else if (function != NULL) {
+          VisitForAccumulatorValue(function);
+          __ sw(result_register(), MemOperand(fp, SlotOffset(slot)));
+        }
+        break;
+
+      case Slot::CONTEXT:
+        // We bypass the general EmitSlotSearch because we know more about
+        // this specific context.
+
+        // The variable in the decl always resides in the current function
+        // context.
+        ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+        if (FLAG_debug_code) {
+          // Check that we're not inside a 'with'.
+          __ lw(a1, ContextOperand(cp, Context::FCONTEXT_INDEX));
+          __ Check(eq, "Unexpected declaration in current context.",
+                   a1, Operand(cp));
+        }
+        if (mode == Variable::CONST) {
+          __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+          __ sw(at, ContextOperand(cp, slot->index()));
+          // No write barrier since the_hole_value is in old space.
+        } else if (function != NULL) {
+          VisitForAccumulatorValue(function);
+          __ sw(result_register(), ContextOperand(cp, slot->index()));
+          int offset = Context::SlotOffset(slot->index());
+          // We know that we have written a function, which is not a smi.
+          __ mov(a1, cp);
+          __ RecordWrite(a1, Operand(offset), a2, result_register());
+        }
+        break;
+
+      case Slot::LOOKUP: {
+        __ li(a2, Operand(variable->name()));
+        // Declaration nodes are always introduced in one of two modes.
+        ASSERT(mode == Variable::VAR ||
+               mode == Variable::CONST);
+        PropertyAttributes attr =
+            (mode == Variable::VAR) ? NONE : READ_ONLY;
+        __ li(a1, Operand(Smi::FromInt(attr)));
+        // Push initial value, if any.
+        // Note: For variables we must not push an initial value (such as
+        // 'undefined') because we may have a (legal) redeclaration and we
+        // must not destroy the current value.
+        if (mode == Variable::CONST) {
+          __ LoadRoot(a0, Heap::kTheHoleValueRootIndex);
+          __ Push(cp, a2, a1, a0);
+        } else if (function != NULL) {
+          __ Push(cp, a2, a1);
+          // Push initial value for function declaration.
+          VisitForStackValue(function);
+        } else {
+          ASSERT(Smi::FromInt(0) == 0);
+          // No initial value!
+          __ mov(a0, zero_reg);  // Operand(Smi::FromInt(0)));
+          __ Push(cp, a2, a1, a0);
+        }
+        __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+        break;
+      }
+    }
+
+  } else if (prop != NULL) {
+    // A const declaration aliasing a parameter is an illegal redeclaration.
+    ASSERT(mode != Variable::CONST);
+    if (function != NULL) {
+      // We are declaring a function that rewrites to a property.
+      // Use (keyed) IC to set the initial value.  We cannot visit the
+      // rewrite because it's shared and we risk recording duplicate AST
+      // IDs for bailouts from optimized code.
+      ASSERT(prop->obj()->AsVariableProxy() != NULL);
+      { AccumulatorValueContext for_object(this);
+        EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
+      }
+
+      __ push(result_register());
+      VisitForAccumulatorValue(function);
+      __ mov(a0, result_register());
+      __ pop(a2);
+
+      ASSERT(prop->key()->AsLiteral() != NULL &&
+             prop->key()->AsLiteral()->handle()->IsSmi());
+      __ li(a1, Operand(prop->key()->AsLiteral()->handle()));
+
+      Handle<Code> ic = is_strict_mode()
+          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+          : isolate()->builtins()->KeyedStoreIC_Initialize();
+      EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+      // Value in v0 is ignored (declarations are statements).
+    }
+  }
 }
 
 
 void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
-  UNIMPLEMENTED_MIPS();
+  EmitDeclaration(decl->proxy()->var(), decl->mode(), decl->fun());
 }
 
 
 void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
-  UNIMPLEMENTED_MIPS();
+  // Call the runtime to declare the globals.
+  // The context is the first argument.
+  __ li(a2, Operand(pairs));
+  __ li(a1, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
+  __ li(a0, Operand(Smi::FromInt(strict_mode_flag())));
+  __ Push(cp, a2, a1, a0);
+  __ CallRuntime(Runtime::kDeclareGlobals, 4);
+  // Return value is ignored.
 }
 
 
 void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ SwitchStatement");
+  Breakable nested_statement(this, stmt);
+  SetStatementPosition(stmt);
+
+  // Keep the switch value on the stack until a case matches.
+  VisitForStackValue(stmt->tag());
+  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+
+  ZoneList<CaseClause*>* clauses = stmt->cases();
+  CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
+
+  Label next_test;  // Recycled for each test.
+  // Compile all the tests with branches to their bodies.
+  for (int i = 0; i < clauses->length(); i++) {
+    CaseClause* clause = clauses->at(i);
+    clause->body_target()->Unuse();
+
+    // The default is not a test, but remember it as final fall through.
+    if (clause->is_default()) {
+      default_clause = clause;
+      continue;
+    }
+
+    Comment cmnt(masm_, "[ Case comparison");
+    __ bind(&next_test);
+    next_test.Unuse();
+
+    // Compile the label expression.
+    VisitForAccumulatorValue(clause->label());
+    __ mov(a0, result_register());  // CompareStub requires args in a0, a1.
+
+    // Perform the comparison as if via '==='.
+    __ lw(a1, MemOperand(sp, 0));  // Switch value.
+    bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+    JumpPatchSite patch_site(masm_);
+    if (inline_smi_code) {
+      Label slow_case;
+      __ or_(a2, a1, a0);
+      patch_site.EmitJumpIfNotSmi(a2, &slow_case);
+
+      __ Branch(&next_test, ne, a1, Operand(a0));
+      __ Drop(1);  // Switch value is no longer needed.
+      __ Branch(clause->body_target());
+
+      __ bind(&slow_case);
+    }
+
+    // Record position before stub call for type feedback.
+    SetSourcePosition(clause->position());
+    Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
+    EmitCallIC(ic, &patch_site, clause->CompareId());
+
+    __ Branch(&next_test, ne, v0, Operand(zero_reg));
+    __ Drop(1);  // Switch value is no longer needed.
+    __ Branch(clause->body_target());
+  }
+
+  // Discard the test value and jump to the default if present, otherwise to
+  // the end of the statement.
+  __ bind(&next_test);
+  __ Drop(1);  // Switch value is no longer needed.
+  if (default_clause == NULL) {
+    __ Branch(nested_statement.break_target());
+  } else {
+    __ Branch(default_clause->body_target());
+  }
+
+  // Compile all the case bodies.
+  for (int i = 0; i < clauses->length(); i++) {
+    Comment cmnt(masm_, "[ Case body");
+    CaseClause* clause = clauses->at(i);
+    __ bind(clause->body_target());
+    PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+    VisitStatements(clause->statements());
+  }
+
+  __ bind(nested_statement.break_target());
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
 }
 
 
 void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ ForInStatement");
+  SetStatementPosition(stmt);
+
+  Label loop, exit;
+  ForIn loop_statement(this, stmt);
+  increment_loop_depth();
+
+  // Get the object to enumerate over. Both SpiderMonkey and JSC
+  // ignore null and undefined in contrast to the specification; see
+  // ECMA-262 section 12.6.4.
+  VisitForAccumulatorValue(stmt->enumerable());
+  __ mov(a0, result_register());  // Result as param to InvokeBuiltin below.
+  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+  __ Branch(&exit, eq, a0, Operand(at));
+  Register null_value = t1;
+  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+  __ Branch(&exit, eq, a0, Operand(null_value));
+
+  // Convert the object to a JS object.
+  Label convert, done_convert;
+  __ JumpIfSmi(a0, &convert);
+  __ GetObjectType(a0, a1, a1);
+  __ Branch(&done_convert, hs, a1, Operand(FIRST_JS_OBJECT_TYPE));
+  __ bind(&convert);
+  __ push(a0);
+  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+  __ mov(a0, v0);
+  __ bind(&done_convert);
+  __ push(a0);
+
+  // Check cache validity in generated code. This is a fast case for
+  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+  // guarantee cache validity, call the runtime system to check cache
+  // validity or get the property names in a fixed array.
+  Label next, call_runtime;
+  // Preload a couple of values used in the loop.
+  Register  empty_fixed_array_value = t2;
+  __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+  Register empty_descriptor_array_value = t3;
+  __ LoadRoot(empty_descriptor_array_value,
+              Heap::kEmptyDescriptorArrayRootIndex);
+  __ mov(a1, a0);
+  __ bind(&next);
+
+  // Check that there are no elements.  Register a1 contains the
+  // current JS object we've reached through the prototype chain.
+  __ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
+  __ Branch(&call_runtime, ne, a2, Operand(empty_fixed_array_value));
+
+  // Check that instance descriptors are not empty so that we can
+  // check for an enum cache.  Leave the map in a2 for the subsequent
+  // prototype load.
+  __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ lw(a3, FieldMemOperand(a2, Map::kInstanceDescriptorsOrBitField3Offset));
+  __ JumpIfSmi(a3, &call_runtime);
+
+  // Check that there is an enum cache in the non-empty instance
+  // descriptors (a3).  This is the case if the next enumeration
+  // index field does not contain a smi.
+  __ lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumerationIndexOffset));
+  __ JumpIfSmi(a3, &call_runtime);
+
+  // For all objects but the receiver, check that the cache is empty.
+  Label check_prototype;
+  __ Branch(&check_prototype, eq, a1, Operand(a0));
+  __ lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumCacheBridgeCacheOffset));
+  __ Branch(&call_runtime, ne, a3, Operand(empty_fixed_array_value));
+
+  // Load the prototype from the map and loop if non-null.
+  __ bind(&check_prototype);
+  __ lw(a1, FieldMemOperand(a2, Map::kPrototypeOffset));
+  __ Branch(&next, ne, a1, Operand(null_value));
+
+  // The enum cache is valid.  Load the map of the object being
+  // iterated over and use the cache for the iteration.
+  Label use_cache;
+  __ lw(v0, FieldMemOperand(a0, HeapObject::kMapOffset));
+  __ Branch(&use_cache);
+
+  // Get the set of properties to enumerate.
+  __ bind(&call_runtime);
+  __ push(a0);  // Duplicate the enumerable object on the stack.
+  __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+  // If we got a map from the runtime call, we can do a fast
+  // modification check. Otherwise, we got a fixed array, and we have
+  // to do a slow check.
+  Label fixed_array;
+  __ mov(a2, v0);
+  __ lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kMetaMapRootIndex);
+  __ Branch(&fixed_array, ne, a1, Operand(at));
+
+  // We got a map in register v0. Get the enumeration cache from it.
+  __ bind(&use_cache);
+  __ LoadInstanceDescriptors(v0, a1);
+  __ lw(a1, FieldMemOperand(a1, DescriptorArray::kEnumerationIndexOffset));
+  __ lw(a2, FieldMemOperand(a1, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+  // Setup the four remaining stack slots.
+  __ push(v0);  // Map.
+  __ lw(a1, FieldMemOperand(a2, FixedArray::kLengthOffset));
+  __ li(a0, Operand(Smi::FromInt(0)));
+  // Push enumeration cache, enumeration cache length (as smi) and zero.
+  __ Push(a2, a1, a0);
+  __ jmp(&loop);
+
+  // We got a fixed array in register v0. Iterate through that.
+  __ bind(&fixed_array);
+  __ li(a1, Operand(Smi::FromInt(0)));  // Map (0) - force slow check.
+  __ Push(a1, v0);
+  __ lw(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
+  __ li(a0, Operand(Smi::FromInt(0)));
+  __ Push(a1, a0);  // Fixed array length (as smi) and initial index.
+
+  // Generate code for doing the condition check.
+  __ bind(&loop);
+  // Load the current count to a0, load the length to a1.
+  __ lw(a0, MemOperand(sp, 0 * kPointerSize));
+  __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+  __ Branch(loop_statement.break_target(), hs, a0, Operand(a1));
+
+  // Get the current entry of the array into register a3.
+  __ lw(a2, MemOperand(sp, 2 * kPointerSize));
+  __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
+  __ addu(t0, a2, t0);  // Array base + scaled (smi) index.
+  __ lw(a3, MemOperand(t0));  // Current entry.
+
+  // Get the expected map from the stack or a zero map in the
+  // permanent slow case into register a2.
+  __ lw(a2, MemOperand(sp, 3 * kPointerSize));
+
+  // Check if the expected map still matches that of the enumerable.
+  // If not, we have to filter the key.
+  Label update_each;
+  __ lw(a1, MemOperand(sp, 4 * kPointerSize));
+  __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ Branch(&update_each, eq, t0, Operand(a2));
+
+  // Convert the entry to a string or (smi) 0 if it isn't a property
+  // any more. If the property has been removed while iterating, we
+  // just skip it.
+  __ push(a1);  // Enumerable.
+  __ push(a3);  // Current entry.
+  __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+  __ mov(a3, result_register());
+  __ Branch(loop_statement.continue_target(), eq, a3, Operand(zero_reg));
+
+  // Update the 'each' property or variable from the possibly filtered
+  // entry in register a3.
+  __ bind(&update_each);
+  __ mov(result_register(), a3);
+  // Perform the assignment as if via '='.
+  { EffectContext context(this);
+    EmitAssignment(stmt->each(), stmt->AssignmentId());
+  }
+
+  // Generate code for the body of the loop.
+  Visit(stmt->body());
+
+  // Generate code for the going to the next element by incrementing
+  // the index (smi) stored on top of the stack.
+  __ bind(loop_statement.continue_target());
+  __ pop(a0);
+  __ Addu(a0, a0, Operand(Smi::FromInt(1)));
+  __ push(a0);
+
+  EmitStackCheck(stmt);
+  __ Branch(&loop);
+
+  // Remove the pointers stored on the stack.
+  __ bind(loop_statement.break_target());
+  __ Drop(5);
+
+  // Exit and decrement the loop depth.
+  __ bind(&exit);
+  decrement_loop_depth();
 }
 
 
 void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
                                        bool pretenure) {
-  UNIMPLEMENTED_MIPS();
+  // Use the fast case closure allocation code that allocates in new
+  // space for nested functions that don't need literals cloning. If
+  // we're running with the --always-opt or the --prepare-always-opt
+  // flag, we need to use the runtime function so that the new function
+  // we are creating here gets a chance to have its code optimized and
+  // doesn't just get a copy of the existing unoptimized code.
+  if (!FLAG_always_opt &&
+      !FLAG_prepare_always_opt &&
+      !pretenure &&
+      scope()->is_function_scope() &&
+      info->num_literals() == 0) {
+    FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
+    __ li(a0, Operand(info));
+    __ push(a0);
+    __ CallStub(&stub);
+  } else {
+    __ li(a0, Operand(info));
+    __ LoadRoot(a1, pretenure ? Heap::kTrueValueRootIndex
+                              : Heap::kFalseValueRootIndex);
+    __ Push(cp, a0, a1);
+    __ CallRuntime(Runtime::kNewClosure, 3);
+  }
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ VariableProxy");
+  EmitVariableLoad(expr->var());
+}
+
+
+void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
+    Slot* slot,
+    TypeofState typeof_state,
+    Label* slow) {
+  Register current = cp;
+  Register next = a1;
+  Register temp = a2;
+
+  Scope* s = scope();
+  while (s != NULL) {
+    if (s->num_heap_slots() > 0) {
+      if (s->calls_eval()) {
+        // Check that extension is NULL.
+        __ lw(temp, ContextOperand(current, Context::EXTENSION_INDEX));
+        __ Branch(slow, ne, temp, Operand(zero_reg));
+      }
+      // Load next context in chain.
+      __ lw(next, ContextOperand(current, Context::CLOSURE_INDEX));
+      __ lw(next, FieldMemOperand(next, JSFunction::kContextOffset));
+      // Walk the rest of the chain without clobbering cp.
+      current = next;
+    }
+    // If no outer scope calls eval, we do not need to check more
+    // context extensions.
+    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+    s = s->outer_scope();
+  }
+
+  if (s->is_eval_scope()) {
+    Label loop, fast;
+    if (!current.is(next)) {
+      __ Move(next, current);
+    }
+    __ bind(&loop);
+    // Terminate at global context.
+    __ lw(temp, FieldMemOperand(next, HeapObject::kMapOffset));
+    __ LoadRoot(t0, Heap::kGlobalContextMapRootIndex);
+    __ Branch(&fast, eq, temp, Operand(t0));
+    // Check that extension is NULL.
+    __ lw(temp, ContextOperand(next, Context::EXTENSION_INDEX));
+    __ Branch(slow, ne, temp, Operand(zero_reg));
+    // Load next context in chain.
+    __ lw(next, ContextOperand(next, Context::CLOSURE_INDEX));
+    __ lw(next, FieldMemOperand(next, JSFunction::kContextOffset));
+    __ Branch(&loop);
+    __ bind(&fast);
+  }
+
+  __ lw(a0, GlobalObjectOperand());
+  __ li(a2, Operand(slot->var()->name()));
+  RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
+      ? RelocInfo::CODE_TARGET
+      : RelocInfo::CODE_TARGET_CONTEXT;
+  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+  EmitCallIC(ic, mode, AstNode::kNoNumber);
 }
 
 
 MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
     Slot* slot,
     Label* slow) {
-  UNIMPLEMENTED_MIPS();
-  return MemOperand(zero_reg, 0);
+  ASSERT(slot->type() == Slot::CONTEXT);
+  Register context = cp;
+  Register next = a3;
+  Register temp = t0;
+
+  for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
+    if (s->num_heap_slots() > 0) {
+      if (s->calls_eval()) {
+        // Check that extension is NULL.
+        __ lw(temp, ContextOperand(context, Context::EXTENSION_INDEX));
+        __ Branch(slow, ne, temp, Operand(zero_reg));
+      }
+      __ lw(next, ContextOperand(context, Context::CLOSURE_INDEX));
+      __ lw(next, FieldMemOperand(next, JSFunction::kContextOffset));
+      // Walk the rest of the chain without clobbering cp.
+      context = next;
+    }
+  }
+  // Check that last extension is NULL.
+  __ lw(temp, ContextOperand(context, Context::EXTENSION_INDEX));
+  __ Branch(slow, ne, temp, Operand(zero_reg));
+
+  // This function is used only for loads, not stores, so it's safe to
+  // return an cp-based operand (the write barrier cannot be allowed to
+  // destroy the cp register).
+  return ContextOperand(context, slot->index());
 }
 
 
@@ -321,336 +1206,2841 @@
     TypeofState typeof_state,
     Label* slow,
     Label* done) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
-    Slot* slot,
-    TypeofState typeof_state,
-    Label* slow) {
-  UNIMPLEMENTED_MIPS();
+  // Generate fast-case code for variables that might be shadowed by
+  // eval-introduced variables.  Eval is used a lot without
+  // introducing variables.  In those cases, we do not want to
+  // perform a runtime call for all variables in the scope
+  // containing the eval.
+  if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
+    EmitLoadGlobalSlotCheckExtensions(slot, typeof_state, slow);
+    __ Branch(done);
+  } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
+    Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
+    Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
+    if (potential_slot != NULL) {
+      // Generate fast case for locals that rewrite to slots.
+      __ lw(v0, ContextSlotOperandCheckExtensions(potential_slot, slow));
+      if (potential_slot->var()->mode() == Variable::CONST) {
+        __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+        __ subu(at, v0, at);  // Sub as compare: at == 0 on eq.
+        __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+        __ movz(v0, a0, at);  // Conditional move.
+      }
+      __ Branch(done);
+    } else if (rewrite != NULL) {
+      // Generate fast case for calls of an argument function.
+      Property* property = rewrite->AsProperty();
+      if (property != NULL) {
+        VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+        Literal* key_literal = property->key()->AsLiteral();
+        if (obj_proxy != NULL &&
+            key_literal != NULL &&
+            obj_proxy->IsArguments() &&
+            key_literal->handle()->IsSmi()) {
+          // Load arguments object if there are no eval-introduced
+          // variables. Then load the argument from the arguments
+          // object using keyed load.
+          __ lw(a1,
+                 ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
+                                                   slow));
+          __ li(a0, Operand(key_literal->handle()));
+          Handle<Code> ic =
+              isolate()->builtins()->KeyedLoadIC_Initialize();
+          EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
+          __ Branch(done);
+        }
+      }
+    }
+  }
 }
 
 
 void FullCodeGenerator::EmitVariableLoad(Variable* var) {
-  UNIMPLEMENTED_MIPS();
+  // Four cases: non-this global variables, lookup slots, all other
+  // types of slots, and parameters that rewrite to explicit property
+  // accesses on the arguments object.
+  Slot* slot = var->AsSlot();
+  Property* property = var->AsProperty();
+
+  if (var->is_global() && !var->is_this()) {
+    Comment cmnt(masm_, "Global variable");
+    // Use inline caching. Variable name is passed in a2 and the global
+    // object (receiver) in a0.
+    __ lw(a0, GlobalObjectOperand());
+    __ li(a2, Operand(var->name()));
+    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
+    context()->Plug(v0);
+
+  } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+    Label done, slow;
+
+    // Generate code for loading from variables potentially shadowed
+    // by eval-introduced variables.
+    EmitDynamicLoadFromSlotFastCase(slot, NOT_INSIDE_TYPEOF, &slow, &done);
+
+    __ bind(&slow);
+    Comment cmnt(masm_, "Lookup slot");
+    __ li(a1, Operand(var->name()));
+    __ Push(cp, a1);  // Context and name.
+    __ CallRuntime(Runtime::kLoadContextSlot, 2);
+    __ bind(&done);
+
+    context()->Plug(v0);
+
+  } else if (slot != NULL) {
+    Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
+                            ? "Context slot"
+                            : "Stack slot");
+    if (var->mode() == Variable::CONST) {
+       // Constants may be the hole value if they have not been initialized.
+       // Unhole them.
+       MemOperand slot_operand = EmitSlotSearch(slot, a0);
+       __ lw(v0, slot_operand);
+       __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+       __ subu(at, v0, at);  // Sub as compare: at == 0 on eq.
+       __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+       __ movz(v0, a0, at);  // Conditional move.
+       context()->Plug(v0);
+     } else {
+       context()->Plug(slot);
+     }
+  } else {
+    Comment cmnt(masm_, "Rewritten parameter");
+    ASSERT_NOT_NULL(property);
+    // Rewritten parameter accesses are of the form "slot[literal]".
+    // Assert that the object is in a slot.
+    Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
+    ASSERT_NOT_NULL(object_var);
+    Slot* object_slot = object_var->AsSlot();
+    ASSERT_NOT_NULL(object_slot);
+
+    // Load the object.
+    Move(a1, object_slot);
+
+    // Assert that the key is a smi.
+    Literal* key_literal = property->key()->AsLiteral();
+    ASSERT_NOT_NULL(key_literal);
+    ASSERT(key_literal->handle()->IsSmi());
+
+    // Load the key.
+    __ li(a0, Operand(key_literal->handle()));
+
+    // Call keyed load IC. It has arguments key and receiver in a0 and a1.
+    Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+    EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
+    context()->Plug(v0);
+  }
 }
 
 
 void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ RegExpLiteral");
+  Label materialized;
+  // Registers will be used as follows:
+  // t1 = materialized value (RegExp literal)
+  // t0 = JS function, literals array
+  // a3 = literal index
+  // a2 = RegExp pattern
+  // a1 = RegExp flags
+  // a0 = RegExp literal clone
+  __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ lw(t0, FieldMemOperand(a0, JSFunction::kLiteralsOffset));
+  int literal_offset =
+      FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+  __ lw(t1, FieldMemOperand(t0, literal_offset));
+  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+  __ Branch(&materialized, ne, t1, Operand(at));
+
+  // Create regexp literal using runtime function.
+  // Result will be in v0.
+  __ li(a3, Operand(Smi::FromInt(expr->literal_index())));
+  __ li(a2, Operand(expr->pattern()));
+  __ li(a1, Operand(expr->flags()));
+  __ Push(t0, a3, a2, a1);
+  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+  __ mov(t1, v0);
+
+  __ bind(&materialized);
+  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+  Label allocated, runtime_allocate;
+  __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
+  __ jmp(&allocated);
+
+  __ bind(&runtime_allocate);
+  __ push(t1);
+  __ li(a0, Operand(Smi::FromInt(size)));
+  __ push(a0);
+  __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+  __ pop(t1);
+
+  __ bind(&allocated);
+
+  // After this, registers are used as follows:
+  // v0: Newly allocated regexp.
+  // t1: Materialized regexp.
+  // a2: temp.
+  __ CopyFields(v0, t1, a2.bit(), size / kPointerSize);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ ObjectLiteral");
+  __ lw(a3, MemOperand(fp,  JavaScriptFrameConstants::kFunctionOffset));
+  __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
+  __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
+  __ li(a1, Operand(expr->constant_properties()));
+  int flags = expr->fast_elements()
+      ? ObjectLiteral::kFastElements
+      : ObjectLiteral::kNoFlags;
+  flags |= expr->has_function()
+      ? ObjectLiteral::kHasFunction
+      : ObjectLiteral::kNoFlags;
+  __ li(a0, Operand(Smi::FromInt(flags)));
+  __ Push(a3, a2, a1, a0);
+  if (expr->depth() > 1) {
+    __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+  } else {
+    __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+  }
+
+  // If result_saved is true the result is on top of the stack.  If
+  // result_saved is false the result is in v0.
+  bool result_saved = false;
+
+  // Mark all computed expressions that are bound to a key that
+  // is shadowed by a later occurrence of the same key. For the
+  // marked expressions, no store code is emitted.
+  expr->CalculateEmitStore();
+
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+    if (property->IsCompileTimeValue()) continue;
+
+    Literal* key = property->key();
+    Expression* value = property->value();
+    if (!result_saved) {
+      __ push(v0);  // Save result on stack.
+      result_saved = true;
+    }
+    switch (property->kind()) {
+      case ObjectLiteral::Property::CONSTANT:
+        UNREACHABLE();
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+        ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
+        // Fall through.
+      case ObjectLiteral::Property::COMPUTED:
+        if (key->handle()->IsSymbol()) {
+          if (property->emit_store()) {
+            VisitForAccumulatorValue(value);
+            __ mov(a0, result_register());
+            __ li(a2, Operand(key->handle()));
+            __ lw(a1, MemOperand(sp));
+            Handle<Code> ic = is_strict_mode()
+                ? isolate()->builtins()->StoreIC_Initialize_Strict()
+                : isolate()->builtins()->StoreIC_Initialize();
+            EmitCallIC(ic, RelocInfo::CODE_TARGET, key->id());
+            PrepareForBailoutForId(key->id(), NO_REGISTERS);
+          } else {
+            VisitForEffect(value);
+          }
+          break;
+        }
+        // Fall through.
+      case ObjectLiteral::Property::PROTOTYPE:
+        // Duplicate receiver on stack.
+        __ lw(a0, MemOperand(sp));
+        __ push(a0);
+        VisitForStackValue(key);
+        VisitForStackValue(value);
+        if (property->emit_store()) {
+          __ li(a0, Operand(Smi::FromInt(NONE)));  // PropertyAttributes.
+          __ push(a0);
+          __ CallRuntime(Runtime::kSetProperty, 4);
+        } else {
+          __ Drop(3);
+        }
+        break;
+      case ObjectLiteral::Property::GETTER:
+      case ObjectLiteral::Property::SETTER:
+        // Duplicate receiver on stack.
+        __ lw(a0, MemOperand(sp));
+        __ push(a0);
+        VisitForStackValue(key);
+        __ li(a1, Operand(property->kind() == ObjectLiteral::Property::SETTER ?
+                           Smi::FromInt(1) :
+                           Smi::FromInt(0)));
+        __ push(a1);
+        VisitForStackValue(value);
+        __ CallRuntime(Runtime::kDefineAccessor, 4);
+        break;
+    }
+  }
+
+  if (expr->has_function()) {
+    ASSERT(result_saved);
+    __ lw(a0, MemOperand(sp));
+    __ push(a0);
+    __ CallRuntime(Runtime::kToFastProperties, 1);
+  }
+
+  if (result_saved) {
+    context()->PlugTOS();
+  } else {
+    context()->Plug(v0);
+  }
 }
 
 
 void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ ArrayLiteral");
+
+  ZoneList<Expression*>* subexprs = expr->values();
+  int length = subexprs->length();
+  __ mov(a0, result_register());
+  __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
+  __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
+  __ li(a1, Operand(expr->constant_elements()));
+  __ Push(a3, a2, a1);
+  if (expr->constant_elements()->map() ==
+      isolate()->heap()->fixed_cow_array_map()) {
+    FastCloneShallowArrayStub stub(
+        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
+    __ CallStub(&stub);
+    __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(),
+        1, a1, a2);
+  } else if (expr->depth() > 1) {
+    __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
+  } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+    __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+  } else {
+    FastCloneShallowArrayStub stub(
+        FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
+    __ CallStub(&stub);
+  }
+
+  bool result_saved = false;  // Is the result saved to the stack?
+
+  // Emit code to evaluate all the non-constant subexpressions and to store
+  // them into the newly cloned array.
+  for (int i = 0; i < length; i++) {
+    Expression* subexpr = subexprs->at(i);
+    // If the subexpression is a literal or a simple materialized literal it
+    // is already set in the cloned array.
+    if (subexpr->AsLiteral() != NULL ||
+        CompileTimeValue::IsCompileTimeValue(subexpr)) {
+      continue;
+    }
+
+    if (!result_saved) {
+      __ push(v0);
+      result_saved = true;
+    }
+    VisitForAccumulatorValue(subexpr);
+
+    // Store the subexpression value in the array's elements.
+    __ lw(a1, MemOperand(sp));  // Copy of array literal.
+    __ lw(a1, FieldMemOperand(a1, JSObject::kElementsOffset));
+    int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+    __ sw(result_register(), FieldMemOperand(a1, offset));
+
+    // Update the write barrier for the array store with v0 as the scratch
+    // register.
+    __ li(a2, Operand(offset));
+    // TODO(PJ): double check this RecordWrite call.
+    __ RecordWrite(a1, a2, result_register());
+
+    PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
+  }
+
+  if (result_saved) {
+    context()->PlugTOS();
+  } else {
+    context()->Plug(v0);
+  }
 }
 
 
 void FullCodeGenerator::VisitAssignment(Assignment* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ Assignment");
+  // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
+  // on the left-hand side.
+  if (!expr->target()->IsValidLeftHandSide()) {
+    VisitForEffect(expr->target());
+    return;
+  }
+
+  // Left-hand side can only be a property, a global or a (parameter or local)
+  // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+  enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+  LhsKind assign_type = VARIABLE;
+  Property* property = expr->target()->AsProperty();
+  if (property != NULL) {
+    assign_type = (property->key()->IsPropertyName())
+        ? NAMED_PROPERTY
+        : KEYED_PROPERTY;
+  }
+
+  // Evaluate LHS expression.
+  switch (assign_type) {
+    case VARIABLE:
+      // Nothing to do here.
+      break;
+    case NAMED_PROPERTY:
+      if (expr->is_compound()) {
+        // We need the receiver both on the stack and in the accumulator.
+        VisitForAccumulatorValue(property->obj());
+        __ push(result_register());
+      } else {
+        VisitForStackValue(property->obj());
+      }
+      break;
+    case KEYED_PROPERTY:
+      // We need the key and receiver on both the stack and in v0 and a1.
+      if (expr->is_compound()) {
+        if (property->is_arguments_access()) {
+          VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+          __ lw(v0, EmitSlotSearch(obj_proxy->var()->AsSlot(), v0));
+          __ push(v0);
+          __ li(v0, Operand(property->key()->AsLiteral()->handle()));
+        } else {
+          VisitForStackValue(property->obj());
+          VisitForAccumulatorValue(property->key());
+        }
+        __ lw(a1, MemOperand(sp, 0));
+        __ push(v0);
+      } else {
+        if (property->is_arguments_access()) {
+          VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+          __ lw(a1, EmitSlotSearch(obj_proxy->var()->AsSlot(), v0));
+          __ li(v0, Operand(property->key()->AsLiteral()->handle()));
+          __ Push(a1, v0);
+        } else {
+          VisitForStackValue(property->obj());
+          VisitForStackValue(property->key());
+        }
+      }
+      break;
+  }
+
+  // For compound assignments we need another deoptimization point after the
+  // variable/property load.
+  if (expr->is_compound()) {
+    { AccumulatorValueContext context(this);
+      switch (assign_type) {
+        case VARIABLE:
+          EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+          PrepareForBailout(expr->target(), TOS_REG);
+          break;
+        case NAMED_PROPERTY:
+          EmitNamedPropertyLoad(property);
+          PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+          break;
+        case KEYED_PROPERTY:
+          EmitKeyedPropertyLoad(property);
+          PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+          break;
+      }
+    }
+
+    Token::Value op = expr->binary_op();
+    __ push(v0);  // Left operand goes on the stack.
+    VisitForAccumulatorValue(expr->value());
+
+    OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
+        ? OVERWRITE_RIGHT
+        : NO_OVERWRITE;
+    SetSourcePosition(expr->position() + 1);
+    AccumulatorValueContext context(this);
+    if (ShouldInlineSmiCase(op)) {
+      EmitInlineSmiBinaryOp(expr->binary_operation(),
+                            op,
+                            mode,
+                            expr->target(),
+                            expr->value());
+    } else {
+      EmitBinaryOp(expr->binary_operation(), op, mode);
+    }
+
+    // Deoptimization point in case the binary operation may have side effects.
+    PrepareForBailout(expr->binary_operation(), TOS_REG);
+  } else {
+    VisitForAccumulatorValue(expr->value());
+  }
+
+  // Record source position before possible IC call.
+  SetSourcePosition(expr->position());
+
+  // Store the value.
+  switch (assign_type) {
+    case VARIABLE:
+      EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+                             expr->op());
+      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      context()->Plug(v0);
+      break;
+    case NAMED_PROPERTY:
+      EmitNamedPropertyAssignment(expr);
+      break;
+    case KEYED_PROPERTY:
+      EmitKeyedPropertyAssignment(expr);
+      break;
+  }
 }
 
 
 void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
-  UNIMPLEMENTED_MIPS();
+  SetSourcePosition(prop->position());
+  Literal* key = prop->key()->AsLiteral();
+  __ mov(a0, result_register());
+  __ li(a2, Operand(key->handle()));
+  // Call load IC. It has arguments receiver and property name a0 and a2.
+  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+  EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
-  UNIMPLEMENTED_MIPS();
+  SetSourcePosition(prop->position());
+  __ mov(a0, result_register());
+  // Call keyed load IC. It has arguments key and receiver in a0 and a1.
+  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+  EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
 }
 
 
-void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
                                               Token::Value op,
                                               OverwriteMode mode,
-                                              Expression* left,
-                                              Expression* right) {
-  UNIMPLEMENTED_MIPS();
+                                              Expression* left_expr,
+                                              Expression* right_expr) {
+  Label done, smi_case, stub_call;
+
+  Register scratch1 = a2;
+  Register scratch2 = a3;
+
+  // Get the arguments.
+  Register left = a1;
+  Register right = a0;
+  __ pop(left);
+  __ mov(a0, result_register());
+
+  // Perform combined smi check on both operands.
+  __ Or(scratch1, left, Operand(right));
+  STATIC_ASSERT(kSmiTag == 0);
+  JumpPatchSite patch_site(masm_);
+  patch_site.EmitJumpIfSmi(scratch1, &smi_case);
+
+  __ bind(&stub_call);
+  BinaryOpStub stub(op, mode);
+  EmitCallIC(stub.GetCode(), &patch_site, expr->id());
+  __ jmp(&done);
+
+  __ bind(&smi_case);
+  // Smi case. This code works the same way as the smi-smi case in the type
+  // recording binary operation stub, see
+  // BinaryOpStub::GenerateSmiSmiOperation for comments.
+  switch (op) {
+    case Token::SAR:
+      __ Branch(&stub_call);
+      __ GetLeastBitsFromSmi(scratch1, right, 5);
+      __ srav(right, left, scratch1);
+      __ And(v0, right, Operand(~kSmiTagMask));
+      break;
+    case Token::SHL: {
+      __ Branch(&stub_call);
+      __ SmiUntag(scratch1, left);
+      __ GetLeastBitsFromSmi(scratch2, right, 5);
+      __ sllv(scratch1, scratch1, scratch2);
+      __ Addu(scratch2, scratch1, Operand(0x40000000));
+      __ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
+      __ SmiTag(v0, scratch1);
+      break;
+    }
+    case Token::SHR: {
+      __ Branch(&stub_call);
+      __ SmiUntag(scratch1, left);
+      __ GetLeastBitsFromSmi(scratch2, right, 5);
+      __ srlv(scratch1, scratch1, scratch2);
+      __ And(scratch2, scratch1, 0xc0000000);
+      __ Branch(&stub_call, ne, scratch2, Operand(zero_reg));
+      __ SmiTag(v0, scratch1);
+      break;
+    }
+    case Token::ADD:
+      __ AdduAndCheckForOverflow(v0, left, right, scratch1);
+      __ BranchOnOverflow(&stub_call, scratch1);
+      break;
+    case Token::SUB:
+      __ SubuAndCheckForOverflow(v0, left, right, scratch1);
+      __ BranchOnOverflow(&stub_call, scratch1);
+      break;
+    case Token::MUL: {
+      __ SmiUntag(scratch1, right);
+      __ Mult(left, scratch1);
+      __ mflo(scratch1);
+      __ mfhi(scratch2);
+      __ sra(scratch1, scratch1, 31);
+      __ Branch(&stub_call, ne, scratch1, Operand(scratch2));
+      __ mflo(v0);
+      __ Branch(&done, ne, v0, Operand(zero_reg));
+      __ Addu(scratch2, right, left);
+      __ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
+      ASSERT(Smi::FromInt(0) == 0);
+      __ mov(v0, zero_reg);
+      break;
+    }
+    case Token::BIT_OR:
+      __ Or(v0, left, Operand(right));
+      break;
+    case Token::BIT_AND:
+      __ And(v0, left, Operand(right));
+      break;
+    case Token::BIT_XOR:
+      __ Xor(v0, left, Operand(right));
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  __ bind(&done);
+  context()->Plug(v0);
 }
 
 
-void FullCodeGenerator::EmitBinaryOp(Token::Value op,
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
+                                     Token::Value op,
                                      OverwriteMode mode) {
-  UNIMPLEMENTED_MIPS();
+  __ mov(a0, result_register());
+  __ pop(a1);
+  BinaryOpStub stub(op, mode);
+  EmitCallIC(stub.GetCode(), NULL, expr->id());
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
-  UNIMPLEMENTED_MIPS();
+  // Invalid left-hand sides are rewritten to have a 'throw
+  // ReferenceError' on the left-hand side.
+  if (!expr->IsValidLeftHandSide()) {
+    VisitForEffect(expr);
+    return;
+  }
+
+  // Left-hand side can only be a property, a global or a (parameter or local)
+  // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+  enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+  LhsKind assign_type = VARIABLE;
+  Property* prop = expr->AsProperty();
+  if (prop != NULL) {
+    assign_type = (prop->key()->IsPropertyName())
+        ? NAMED_PROPERTY
+        : KEYED_PROPERTY;
+  }
+
+  switch (assign_type) {
+    case VARIABLE: {
+      Variable* var = expr->AsVariableProxy()->var();
+      EffectContext context(this);
+      EmitVariableAssignment(var, Token::ASSIGN);
+      break;
+    }
+    case NAMED_PROPERTY: {
+      __ push(result_register());  // Preserve value.
+      VisitForAccumulatorValue(prop->obj());
+      __ mov(a1, result_register());
+      __ pop(a0);  // Restore value.
+      __ li(a2, Operand(prop->key()->AsLiteral()->handle()));
+      Handle<Code> ic = is_strict_mode()
+          ? isolate()->builtins()->StoreIC_Initialize_Strict()
+          : isolate()->builtins()->StoreIC_Initialize();
+      EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+      break;
+    }
+    case KEYED_PROPERTY: {
+      __ push(result_register());  // Preserve value.
+      if (prop->is_synthetic()) {
+        ASSERT(prop->obj()->AsVariableProxy() != NULL);
+        ASSERT(prop->key()->AsLiteral() != NULL);
+        { AccumulatorValueContext for_object(this);
+          EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
+        }
+        __ mov(a2, result_register());
+        __ li(a1, Operand(prop->key()->AsLiteral()->handle()));
+      } else {
+        VisitForStackValue(prop->obj());
+        VisitForAccumulatorValue(prop->key());
+        __ mov(a1, result_register());
+        __ pop(a2);
+      }
+      __ pop(a0);  // Restore value.
+      Handle<Code> ic = is_strict_mode()
+        ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+        : isolate()->builtins()->KeyedStoreIC_Initialize();
+      EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+      break;
+    }
+  }
+  PrepareForBailoutForId(bailout_ast_id, TOS_REG);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitVariableAssignment(Variable* var,
                                                Token::Value op) {
-  UNIMPLEMENTED_MIPS();
+  // Left-hand sides that rewrite to explicit property accesses do not reach
+  // here.
+  ASSERT(var != NULL);
+  ASSERT(var->is_global() || var->AsSlot() != NULL);
+
+  if (var->is_global()) {
+    ASSERT(!var->is_this());
+    // Assignment to a global variable.  Use inline caching for the
+    // assignment.  Right-hand-side value is passed in a0, variable name in
+    // a2, and the global object in a1.
+    __ mov(a0, result_register());
+    __ li(a2, Operand(var->name()));
+    __ lw(a1, GlobalObjectOperand());
+    Handle<Code> ic = is_strict_mode()
+        ? isolate()->builtins()->StoreIC_Initialize_Strict()
+        : isolate()->builtins()->StoreIC_Initialize();
+    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
+
+  } else if (op == Token::INIT_CONST) {
+    // Like var declarations, const declarations are hoisted to function
+    // scope.  However, unlike var initializers, const initializers are able
+    // to drill a hole to that function context, even from inside a 'with'
+    // context.  We thus bypass the normal static scope lookup.
+    Slot* slot = var->AsSlot();
+    Label skip;
+    switch (slot->type()) {
+      case Slot::PARAMETER:
+        // No const parameters.
+        UNREACHABLE();
+        break;
+      case Slot::LOCAL:
+        // Detect const reinitialization by checking for the hole value.
+        __ lw(a1, MemOperand(fp, SlotOffset(slot)));
+        __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+        __ Branch(&skip, ne, a1, Operand(t0));
+        __ sw(result_register(), MemOperand(fp, SlotOffset(slot)));
+        break;
+      case Slot::CONTEXT: {
+        __ lw(a1, ContextOperand(cp, Context::FCONTEXT_INDEX));
+        __ lw(a2, ContextOperand(a1, slot->index()));
+        __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+        __ Branch(&skip, ne, a2, Operand(t0));
+        __ sw(result_register(), ContextOperand(a1, slot->index()));
+        int offset = Context::SlotOffset(slot->index());
+        __ mov(a3, result_register());  // Preserve the stored value in v0.
+        __ RecordWrite(a1, Operand(offset), a3, a2);
+        break;
+      }
+      case Slot::LOOKUP:
+        __ push(result_register());
+        __ li(a0, Operand(slot->var()->name()));
+        __ Push(cp, a0);  // Context and name.
+        __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+        break;
+    }
+    __ bind(&skip);
+
+  } else if (var->mode() != Variable::CONST) {
+    // Perform the assignment for non-const variables.  Const assignments
+    // are simply skipped.
+    Slot* slot = var->AsSlot();
+    switch (slot->type()) {
+      case Slot::PARAMETER:
+      case Slot::LOCAL:
+        // Perform the assignment.
+        __ sw(result_register(), MemOperand(fp, SlotOffset(slot)));
+        break;
+
+      case Slot::CONTEXT: {
+        MemOperand target = EmitSlotSearch(slot, a1);
+        // Perform the assignment and issue the write barrier.
+        __ sw(result_register(), target);
+        // RecordWrite may destroy all its register arguments.
+        __ mov(a3, result_register());
+         int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+        __ RecordWrite(a1, Operand(offset), a2, a3);
+        break;
+      }
+
+      case Slot::LOOKUP:
+        // Call the runtime for the assignment.
+        __ push(v0);  // Value.
+        __ li(a1, Operand(slot->var()->name()));
+        __ li(a0, Operand(Smi::FromInt(strict_mode_flag())));
+        __ Push(cp, a1, a0);  // Context, name, strict mode.
+        __ CallRuntime(Runtime::kStoreContextSlot, 4);
+        break;
+    }
+  }
 }
 
 
 void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
-  UNIMPLEMENTED_MIPS();
+  // Assignment to a property, using a named store IC.
+  Property* prop = expr->target()->AsProperty();
+  ASSERT(prop != NULL);
+  ASSERT(prop->key()->AsLiteral() != NULL);
+
+  // If the assignment starts a block of assignments to the same object,
+  // change to slow case to avoid the quadratic behavior of repeatedly
+  // adding fast properties.
+  if (expr->starts_initialization_block()) {
+    __ push(result_register());
+    __ lw(t0, MemOperand(sp, kPointerSize));  // Receiver is now under value.
+    __ push(t0);
+    __ CallRuntime(Runtime::kToSlowProperties, 1);
+    __ pop(result_register());
+  }
+
+  // Record source code position before IC call.
+  SetSourcePosition(expr->position());
+  __ mov(a0, result_register());  // Load the value.
+  __ li(a2, Operand(prop->key()->AsLiteral()->handle()));
+  // Load receiver to a1. Leave a copy in the stack if needed for turning the
+  // receiver into fast case.
+  if (expr->ends_initialization_block()) {
+    __ lw(a1, MemOperand(sp));
+  } else {
+    __ pop(a1);
+  }
+
+  Handle<Code> ic = is_strict_mode()
+        ? isolate()->builtins()->StoreIC_Initialize_Strict()
+        : isolate()->builtins()->StoreIC_Initialize();
+  EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+
+  // If the assignment ends an initialization block, revert to fast case.
+  if (expr->ends_initialization_block()) {
+    __ push(v0);  // Result of assignment, saved even if not needed.
+    // Receiver is under the result value.
+    __ lw(t0, MemOperand(sp, kPointerSize));
+    __ push(t0);
+    __ CallRuntime(Runtime::kToFastProperties, 1);
+    __ pop(v0);
+    __ Drop(1);
+  }
+  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
-  UNIMPLEMENTED_MIPS();
+  // Assignment to a property, using a keyed store IC.
+
+  // If the assignment starts a block of assignments to the same object,
+  // change to slow case to avoid the quadratic behavior of repeatedly
+  // adding fast properties.
+  if (expr->starts_initialization_block()) {
+    __ push(result_register());
+    // Receiver is now under the key and value.
+    __ lw(t0, MemOperand(sp, 2 * kPointerSize));
+    __ push(t0);
+    __ CallRuntime(Runtime::kToSlowProperties, 1);
+    __ pop(result_register());
+  }
+
+  // Record source code position before IC call.
+  SetSourcePosition(expr->position());
+  // Call keyed store IC.
+  // The arguments are:
+  // - a0 is the value,
+  // - a1 is the key,
+  // - a2 is the receiver.
+  __ mov(a0, result_register());
+  __ pop(a1);  // Key.
+  // Load receiver to a2. Leave a copy in the stack if needed for turning the
+  // receiver into fast case.
+  if (expr->ends_initialization_block()) {
+    __ lw(a2, MemOperand(sp));
+  } else {
+    __ pop(a2);
+  }
+
+  Handle<Code> ic = is_strict_mode()
+      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+      : isolate()->builtins()->KeyedStoreIC_Initialize();
+  EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+
+  // If the assignment ends an initialization block, revert to fast case.
+  if (expr->ends_initialization_block()) {
+    __ push(v0);  // Result of assignment, saved even if not needed.
+    // Receiver is under the result value.
+    __ lw(t0, MemOperand(sp, kPointerSize));
+    __ push(t0);
+    __ CallRuntime(Runtime::kToFastProperties, 1);
+    __ pop(v0);
+    __ Drop(1);
+  }
+  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::VisitProperty(Property* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ Property");
+  Expression* key = expr->key();
+
+  if (key->IsPropertyName()) {
+    VisitForAccumulatorValue(expr->obj());
+    EmitNamedPropertyLoad(expr);
+    context()->Plug(v0);
+  } else {
+    VisitForStackValue(expr->obj());
+    VisitForAccumulatorValue(expr->key());
+    __ pop(a1);
+    EmitKeyedPropertyLoad(expr);
+    context()->Plug(v0);
+  }
 }
 
 
 void FullCodeGenerator::EmitCallWithIC(Call* expr,
                                        Handle<Object> name,
                                        RelocInfo::Mode mode) {
-  UNIMPLEMENTED_MIPS();
+  // Code common for calls using the IC.
+  ZoneList<Expression*>* args = expr->arguments();
+  int arg_count = args->length();
+  { PreservePositionScope scope(masm()->positions_recorder());
+    for (int i = 0; i < arg_count; i++) {
+      VisitForStackValue(args->at(i));
+    }
+    __ li(a2, Operand(name));
+  }
+  // Record source position for debugger.
+  SetSourcePosition(expr->position());
+  // Call the IC initialization code.
+  InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
+  EmitCallIC(ic, mode, expr->id());
+  RecordJSReturnSite(expr);
+  // Restore context register.
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
-                                            Expression* key,
-                                            RelocInfo::Mode mode) {
-  UNIMPLEMENTED_MIPS();
+                                            Expression* key) {
+  // Load the key.
+  VisitForAccumulatorValue(key);
+
+  // Swap the name of the function and the receiver on the stack to follow
+  // the calling convention for call ICs.
+  __ pop(a1);
+  __ push(v0);
+  __ push(a1);
+
+  // Code common for calls using the IC.
+  ZoneList<Expression*>* args = expr->arguments();
+  int arg_count = args->length();
+  { PreservePositionScope scope(masm()->positions_recorder());
+    for (int i = 0; i < arg_count; i++) {
+      VisitForStackValue(args->at(i));
+    }
+  }
+  // Record source position for debugger.
+  SetSourcePosition(expr->position());
+  // Call the IC initialization code.
+  InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
+  __ lw(a2, MemOperand(sp, (arg_count + 1) * kPointerSize));  // Key.
+  EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+  RecordJSReturnSite(expr);
+  // Restore context register.
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  context()->DropAndPlug(1, v0);  // Drop the key still on the stack.
 }
 
 
-void FullCodeGenerator::EmitCallWithStub(Call* expr) {
-  UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
+  // Code common for calls using the call stub.
+  ZoneList<Expression*>* args = expr->arguments();
+  int arg_count = args->length();
+  { PreservePositionScope scope(masm()->positions_recorder());
+    for (int i = 0; i < arg_count; i++) {
+      VisitForStackValue(args->at(i));
+    }
+  }
+  // Record source position for debugger.
+  SetSourcePosition(expr->position());
+  InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+  CallFunctionStub stub(arg_count, in_loop, flags);
+  __ CallStub(&stub);
+  RecordJSReturnSite(expr);
+  // Restore context register.
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  context()->DropAndPlug(1, v0);
+}
+
+
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
+                                                      int arg_count) {
+  // Push copy of the first argument or undefined if it doesn't exist.
+  if (arg_count > 0) {
+    __ lw(a1, MemOperand(sp, arg_count * kPointerSize));
+  } else {
+    __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+  }
+  __ push(a1);
+
+  // Push the receiver of the enclosing function and do runtime call.
+  __ lw(a1, MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize));
+  __ push(a1);
+  // Push the strict mode flag.
+  __ li(a1, Operand(Smi::FromInt(strict_mode_flag())));
+  __ push(a1);
+
+  __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
+                 ? Runtime::kResolvePossiblyDirectEvalNoLookup
+                 : Runtime::kResolvePossiblyDirectEval, 4);
 }
 
 
 void FullCodeGenerator::VisitCall(Call* expr) {
-  UNIMPLEMENTED_MIPS();
+#ifdef DEBUG
+  // We want to verify that RecordJSReturnSite gets called on all paths
+  // through this function.  Avoid early returns.
+  expr->return_is_recorded_ = false;
+#endif
+
+  Comment cmnt(masm_, "[ Call");
+  Expression* fun = expr->expression();
+  Variable* var = fun->AsVariableProxy()->AsVariable();
+
+  if (var != NULL && var->is_possibly_eval()) {
+    // In a call to eval, we first call %ResolvePossiblyDirectEval to
+    // resolve the function we need to call and the receiver of the
+    // call.  Then we call the resolved function using the given
+    // arguments.
+    ZoneList<Expression*>* args = expr->arguments();
+    int arg_count = args->length();
+
+    { PreservePositionScope pos_scope(masm()->positions_recorder());
+      VisitForStackValue(fun);
+      __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+      __ push(a2);  // Reserved receiver slot.
+
+      // Push the arguments.
+      for (int i = 0; i < arg_count; i++) {
+        VisitForStackValue(args->at(i));
+      }
+      // If we know that eval can only be shadowed by eval-introduced
+      // variables we attempt to load the global eval function directly
+      // in generated code. If we succeed, there is no need to perform a
+      // context lookup in the runtime system.
+      Label done;
+      if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
+        Label slow;
+        EmitLoadGlobalSlotCheckExtensions(var->AsSlot(),
+                                          NOT_INSIDE_TYPEOF,
+                                          &slow);
+        // Push the function and resolve eval.
+        __ push(v0);
+        EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
+        __ jmp(&done);
+        __ bind(&slow);
+      }
+
+      // Push copy of the function (found below the arguments) and
+      // resolve eval.
+      __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+      __ push(a1);
+      EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
+      if (done.is_linked()) {
+        __ bind(&done);
+      }
+
+      // The runtime call returns a pair of values in v0 (function) and
+      // v1 (receiver). Touch up the stack with the right values.
+      __ sw(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
+      __ sw(v1, MemOperand(sp, arg_count * kPointerSize));
+    }
+    // Record source position for debugger.
+    SetSourcePosition(expr->position());
+    InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+    CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_IMPLICIT);
+    __ CallStub(&stub);
+    RecordJSReturnSite(expr);
+    // Restore context register.
+    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    context()->DropAndPlug(1, v0);
+  } else if (var != NULL && !var->is_this() && var->is_global()) {
+    // Push global object as receiver for the call IC.
+    __ lw(a0, GlobalObjectOperand());
+    __ push(a0);
+    EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
+  } else if (var != NULL && var->AsSlot() != NULL &&
+             var->AsSlot()->type() == Slot::LOOKUP) {
+    // Call to a lookup slot (dynamically introduced variable).
+    Label slow, done;
+
+    { PreservePositionScope scope(masm()->positions_recorder());
+      // Generate code for loading from variables potentially shadowed
+      // by eval-introduced variables.
+      EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
+                                      NOT_INSIDE_TYPEOF,
+                                      &slow,
+                                      &done);
+    }
+
+    __ bind(&slow);
+    // Call the runtime to find the function to call (returned in v0)
+    // and the object holding it (returned in v1).
+    __ push(context_register());
+    __ li(a2, Operand(var->name()));
+    __ push(a2);
+    __ CallRuntime(Runtime::kLoadContextSlot, 2);
+    __ Push(v0, v1);  // Function, receiver.
+
+    // If fast case code has been generated, emit code to push the
+    // function and receiver and have the slow path jump around this
+    // code.
+    if (done.is_linked()) {
+      Label call;
+      __ Branch(&call);
+      __ bind(&done);
+      // Push function.
+      __ push(v0);
+      // Push global receiver.
+      __ lw(a1, GlobalObjectOperand());
+      __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
+      __ push(a1);
+      __ bind(&call);
+    }
+
+    // The receiver is either the global receiver or an object found
+    // by LoadContextSlot. That object could be the hole if the
+    // receiver is implicitly the global object.
+    EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
+  } else if (fun->AsProperty() != NULL) {
+    // Call to an object property.
+    Property* prop = fun->AsProperty();
+    Literal* key = prop->key()->AsLiteral();
+    if (key != NULL && key->handle()->IsSymbol()) {
+      // Call to a named property, use call IC.
+      { PreservePositionScope scope(masm()->positions_recorder());
+        VisitForStackValue(prop->obj());
+      }
+      EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
+    } else {
+      // Call to a keyed property.
+      // For a synthetic property use keyed load IC followed by function call,
+      // for a regular property use keyed EmitCallIC.
+      if (prop->is_synthetic()) {
+        // Do not visit the object and key subexpressions (they are shared
+        // by all occurrences of the same rewritten parameter).
+        ASSERT(prop->obj()->AsVariableProxy() != NULL);
+        ASSERT(prop->obj()->AsVariableProxy()->var()->AsSlot() != NULL);
+        Slot* slot = prop->obj()->AsVariableProxy()->var()->AsSlot();
+        MemOperand operand = EmitSlotSearch(slot, a1);
+        __ lw(a1, operand);
+
+        ASSERT(prop->key()->AsLiteral() != NULL);
+        ASSERT(prop->key()->AsLiteral()->handle()->IsSmi());
+        __ li(a0, Operand(prop->key()->AsLiteral()->handle()));
+
+        // Record source code position for IC call.
+        SetSourcePosition(prop->position());
+
+        Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+        EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+        __ lw(a1, GlobalObjectOperand());
+        __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
+        __ Push(v0, a1);  // Function, receiver.
+        EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
+      } else {
+        { PreservePositionScope scope(masm()->positions_recorder());
+          VisitForStackValue(prop->obj());
+        }
+        EmitKeyedCallWithIC(expr, prop->key());
+      }
+    }
+  } else {
+    { PreservePositionScope scope(masm()->positions_recorder());
+      VisitForStackValue(fun);
+    }
+    // Load global receiver object.
+    __ lw(a1, GlobalObjectOperand());
+    __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
+    __ push(a1);
+    // Emit function call.
+    EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
+  }
+
+#ifdef DEBUG
+  // RecordJSReturnSite should have been called.
+  ASSERT(expr->return_is_recorded_);
+#endif
 }
 
 
 void FullCodeGenerator::VisitCallNew(CallNew* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ CallNew");
+  // According to ECMA-262, section 11.2.2, page 44, the function
+  // expression in new calls must be evaluated before the
+  // arguments.
+
+  // Push constructor on the stack.  If it's not a function it's used as
+  // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+  // ignored.
+  VisitForStackValue(expr->expression());
+
+  // Push the arguments ("left-to-right") on the stack.
+  ZoneList<Expression*>* args = expr->arguments();
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    VisitForStackValue(args->at(i));
+  }
+
+  // Call the construct call builtin that handles allocation and
+  // constructor invocation.
+  SetSourcePosition(expr->position());
+
+  // Load function and argument count into a1 and a0.
+  __ li(a0, Operand(arg_count));
+  __ lw(a1, MemOperand(sp, arg_count * kPointerSize));
+
+  Handle<Code> construct_builtin =
+      isolate()->builtins()->JSConstructCall();
+  __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  __ And(t0, v0, Operand(kSmiTagMask));
+  Split(eq, t0, Operand(zero_reg), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  __ And(at, v0, Operand(kSmiTagMask | 0x80000000));
+  Split(eq, at, Operand(zero_reg), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ JumpIfSmi(v0, if_false);
+  __ LoadRoot(at, Heap::kNullValueRootIndex);
+  __ Branch(if_true, eq, v0, Operand(at));
+  __ lw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+  // Undetectable objects behave like undefined when tested with typeof.
+  __ lbu(a1, FieldMemOperand(a2, Map::kBitFieldOffset));
+  __ And(at, a1, Operand(1 << Map::kIsUndetectable));
+  __ Branch(if_false, ne, at, Operand(zero_reg));
+  __ lbu(a1, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+  __ Branch(if_false, lt, a1, Operand(FIRST_JS_OBJECT_TYPE));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  Split(le, a1, Operand(LAST_JS_OBJECT_TYPE), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ JumpIfSmi(v0, if_false);
+  __ GetObjectType(v0, a1, a1);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  Split(ge, a1, Operand(FIRST_JS_OBJECT_TYPE),
+        if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ JumpIfSmi(v0, if_false);
+  __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+  __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
+  __ And(at, a1, Operand(1 << Map::kIsUndetectable));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  Split(ne, at, Operand(zero_reg), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
     ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  if (FLAG_debug_code) __ AbortIfSmi(v0);
+
+  __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+  __ lbu(t0, FieldMemOperand(a1, Map::kBitField2Offset));
+  __ And(t0, t0, 1 << Map::kStringWrapperSafeForDefaultValueOf);
+  __ Branch(if_true, ne, t0, Operand(zero_reg));
+
+  // Check for fast case object. Generate false result for slow case object.
+  __ lw(a2, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+  __ lw(a2, FieldMemOperand(a2, HeapObject::kMapOffset));
+  __ LoadRoot(t0, Heap::kHashTableMapRootIndex);
+  __ Branch(if_false, eq, a2, Operand(t0));
+
+  // Look for valueOf symbol in the descriptor array, and indicate false if
+  // found. The type is not checked, so if it is a transition it is a false
+  // negative.
+  __ LoadInstanceDescriptors(a1, t0);
+  __ lw(a3, FieldMemOperand(t0, FixedArray::kLengthOffset));
+  // t0: descriptor array
+  // a3: length of descriptor array
+  // Calculate the end of the descriptor array.
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  STATIC_ASSERT(kPointerSize == 4);
+  __ Addu(a2, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(t1, a3, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(a2, a2, t1);
+
+  // Calculate location of the first key name.
+  __ Addu(t0,
+          t0,
+          Operand(FixedArray::kHeaderSize - kHeapObjectTag +
+                  DescriptorArray::kFirstIndex * kPointerSize));
+  // Loop through all the keys in the descriptor array. If one of these is the
+  // symbol valueOf the result is false.
+  Label entry, loop;
+  // The use of t2 to store the valueOf symbol asumes that it is not otherwise
+  // used in the loop below.
+  __ li(t2, Operand(FACTORY->value_of_symbol()));
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ lw(a3, MemOperand(t0, 0));
+  __ Branch(if_false, eq, a3, Operand(t2));
+  __ Addu(t0, t0, Operand(kPointerSize));
+  __ bind(&entry);
+  __ Branch(&loop, ne, t0, Operand(a2));
+
+  // If a valueOf property is not found on the object check that it's
+  // prototype is the un-modified String prototype. If not result is false.
+  __ lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
+  __ JumpIfSmi(a2, if_false);
+  __ lw(a2, FieldMemOperand(a2, HeapObject::kMapOffset));
+  __ lw(a3, ContextOperand(cp, Context::GLOBAL_INDEX));
+  __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset));
+  __ lw(a3, ContextOperand(a3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+  __ Branch(if_false, ne, a2, Operand(a3));
+
+  // Set the bit in the map to indicate that it has been checked safe for
+  // default valueOf and set true result.
+  __ lbu(a2, FieldMemOperand(a1, Map::kBitField2Offset));
+  __ Or(a2, a2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+  __ sb(a2, FieldMemOperand(a1, Map::kBitField2Offset));
+  __ jmp(if_true);
+
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ JumpIfSmi(v0, if_false);
+  __ GetObjectType(v0, a1, a2);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  __ Branch(if_true, eq, a2, Operand(JS_FUNCTION_TYPE));
+  __ Branch(if_false);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ JumpIfSmi(v0, if_false);
+  __ GetObjectType(v0, a1, a1);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  Split(eq, a1, Operand(JS_ARRAY_TYPE),
+        if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ JumpIfSmi(v0, if_false);
+  __ GetObjectType(v0, a1, a1);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  Split(eq, a1, Operand(JS_REGEXP_TYPE), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 0);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  // Get the frame pointer for the calling frame.
+  __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+  // Skip the arguments adaptor frame if it exists.
+  Label check_frame_marker;
+  __ lw(a1, MemOperand(a2, StandardFrameConstants::kContextOffset));
+  __ Branch(&check_frame_marker, ne,
+            a1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+
+  // Check the marker in the calling frame.
+  __ bind(&check_frame_marker);
+  __ lw(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  Split(eq, a1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)),
+        if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 2);
+
+  // Load the two objects into registers and perform the comparison.
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ pop(a1);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  Split(eq, v0, Operand(a1), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  // ArgumentsAccessStub expects the key in a1 and the formal
+  // parameter count in a0.
+  VisitForAccumulatorValue(args->at(0));
+  __ mov(a1, v0);
+  __ li(a0, Operand(Smi::FromInt(scope()->num_parameters())));
+  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 0);
+
+  Label exit;
+  // Get the number of formal parameters.
+  __ li(v0, Operand(Smi::FromInt(scope()->num_parameters())));
+
+  // Check if the calling frame is an arguments adaptor frame.
+  __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+  __ Branch(&exit, ne, a3,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Arguments adaptor case: Read the arguments length from the
+  // adaptor frame.
+  __ lw(v0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+  __ bind(&exit);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+  Label done, null, function, non_function_constructor;
+
+  VisitForAccumulatorValue(args->at(0));
+
+  // If the object is a smi, we return null.
+  __ JumpIfSmi(v0, &null);
+
+  // Check that the object is a JS object but take special care of JS
+  // functions to make sure they have 'Function' as their class.
+  __ GetObjectType(v0, v0, a1);  // Map is now in v0.
+  __ Branch(&null, lt, a1, Operand(FIRST_JS_OBJECT_TYPE));
+
+  // As long as JS_FUNCTION_TYPE is the last instance type and it is
+  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+  // LAST_JS_OBJECT_TYPE.
+  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+  __ Branch(&function, eq, a1, Operand(JS_FUNCTION_TYPE));
+
+  // Check if the constructor in the map is a function.
+  __ lw(v0, FieldMemOperand(v0, Map::kConstructorOffset));
+  __ GetObjectType(v0, a1, a1);
+  __ Branch(&non_function_constructor, ne, a1, Operand(JS_FUNCTION_TYPE));
+
+  // v0 now contains the constructor function. Grab the
+  // instance class name from there.
+  __ lw(v0, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(v0, FieldMemOperand(v0, SharedFunctionInfo::kInstanceClassNameOffset));
+  __ Branch(&done);
+
+  // Functions have class 'Function'.
+  __ bind(&function);
+  __ LoadRoot(v0, Heap::kfunction_class_symbolRootIndex);
+  __ jmp(&done);
+
+  // Objects with a non-function constructor have class 'Object'.
+  __ bind(&non_function_constructor);
+  __ LoadRoot(v0, Heap::kfunction_class_symbolRootIndex);
+  __ jmp(&done);
+
+  // Non-JS objects have class null.
+  __ bind(&null);
+  __ LoadRoot(v0, Heap::kNullValueRootIndex);
+
+  // All done.
+  __ bind(&done);
+
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  // Conditionally generate a log call.
+  // Args:
+  //   0 (literal string): The type of logging (corresponds to the flags).
+  //     This is used to determine whether or not to generate the log call.
+  //   1 (string): Format string.  Access the string at argument index 2
+  //     with '%2s' (see Logger::LogRuntime for all the formats).
+  //   2 (array): Arguments to the format string.
+  ASSERT_EQ(args->length(), 3);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
+    VisitForStackValue(args->at(1));
+    VisitForStackValue(args->at(2));
+    __ CallRuntime(Runtime::kLog, 2);
+  }
+#endif
+  // Finally, we're expected to leave a value on the top of the stack.
+  __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 0);
+
+  Label slow_allocate_heapnumber;
+  Label heapnumber_allocated;
+
+  // Save the new heap number in callee-saved register s0, since
+  // we call out to external C code below.
+  __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+  __ AllocateHeapNumber(s0, a1, a2, t6, &slow_allocate_heapnumber);
+  __ jmp(&heapnumber_allocated);
+
+  __ bind(&slow_allocate_heapnumber);
+
+  // Allocate a heap number.
+  __ CallRuntime(Runtime::kNumberAlloc, 0);
+  __ mov(s0, v0);   // Save result in s0, so it is saved thru CFunc call.
+
+  __ bind(&heapnumber_allocated);
+
+  // Convert 32 random bits in v0 to 0.(32 random bits) in a double
+  // by computing:
+  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
+  if (CpuFeatures::IsSupported(FPU)) {
+    __ PrepareCallCFunction(1, a0);
+    __ li(a0, Operand(ExternalReference::isolate_address()));
+    __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
+
+
+    CpuFeatures::Scope scope(FPU);
+    // 0x41300000 is the top half of 1.0 x 2^20 as a double.
+    __ li(a1, Operand(0x41300000));
+    // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
+    __ Move(f12, v0, a1);
+    // Move 0x4130000000000000 to FPU.
+    __ Move(f14, zero_reg, a1);
+    // Subtract and store the result in the heap number.
+    __ sub_d(f0, f12, f14);
+    __ sdc1(f0, MemOperand(s0, HeapNumber::kValueOffset - kHeapObjectTag));
+    __ mov(v0, s0);
+  } else {
+    __ PrepareCallCFunction(2, a0);
+    __ mov(a0, s0);
+    __ li(a1, Operand(ExternalReference::isolate_address()));
+    __ CallCFunction(
+        ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
+  }
+
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  // Load the arguments on the stack and call the stub.
+  SubStringStub stub;
+  ASSERT(args->length() == 3);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  // Load the arguments on the stack and call the stub.
+  RegExpExecStub stub;
+  ASSERT(args->length() == 4);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
+  VisitForStackValue(args->at(3));
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));  // Load the object.
+
+  Label done;
+  // If the object is a smi return the object.
+  __ JumpIfSmi(v0, &done);
+  // If the object is not a value type, return the object.
+  __ GetObjectType(v0, a1, a1);
+  __ Branch(&done, ne, a1, Operand(JS_VALUE_TYPE));
+
+  __ lw(v0, FieldMemOperand(v0, JSValue::kValueOffset));
+
+  __ bind(&done);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  // Load the arguments on the stack and call the runtime function.
+  ASSERT(args->length() == 2);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  MathPowStub stub;
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 2);
+
+  VisitForStackValue(args->at(0));  // Load the object.
+  VisitForAccumulatorValue(args->at(1));  // Load the value.
+  __ pop(a1);  // v0 = value. a1 = object.
+
+  Label done;
+  // If the object is a smi, return the value.
+  __ JumpIfSmi(a1, &done);
+
+  // If the object is not a value type, return the value.
+  __ GetObjectType(a1, a2, a2);
+  __ Branch(&done, ne, a2, Operand(JS_VALUE_TYPE));
+
+  // Store the value.
+  __ sw(v0, FieldMemOperand(a1, JSValue::kValueOffset));
+  // Update the write barrier.  Save the value as it will be
+  // overwritten by the write barrier code and is needed afterward.
+  __ RecordWrite(a1, Operand(JSValue::kValueOffset - kHeapObjectTag), a2, a3);
+
+  __ bind(&done);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT_EQ(args->length(), 1);
+
+  // Load the argument on the stack and call the stub.
+  VisitForStackValue(args->at(0));
+
+  NumberToStringStub stub;
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label done;
+  StringCharFromCodeGenerator generator(v0, a1);
+  generator.GenerateFast(masm_);
+  __ jmp(&done);
+
+  NopRuntimeCallHelper call_helper;
+  generator.GenerateSlow(masm_, call_helper);
+
+  __ bind(&done);
+  context()->Plug(a1);
 }
 
 
 void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 2);
+
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
+  __ mov(a0, result_register());
+
+  Register object = a1;
+  Register index = a0;
+  Register scratch = a2;
+  Register result = v0;
+
+  __ pop(object);
+
+  Label need_conversion;
+  Label index_out_of_range;
+  Label done;
+  StringCharCodeAtGenerator generator(object,
+                                      index,
+                                      scratch,
+                                      result,
+                                      &need_conversion,
+                                      &need_conversion,
+                                      &index_out_of_range,
+                                      STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm_);
+  __ jmp(&done);
+
+  __ bind(&index_out_of_range);
+  // When the index is out of range, the spec requires us to return
+  // NaN.
+  __ LoadRoot(result, Heap::kNanValueRootIndex);
+  __ jmp(&done);
+
+  __ bind(&need_conversion);
+  // Load the undefined value into the result register, which will
+  // trigger conversion.
+  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+  __ jmp(&done);
+
+  NopRuntimeCallHelper call_helper;
+  generator.GenerateSlow(masm_, call_helper);
+
+  __ bind(&done);
+  context()->Plug(result);
 }
 
 
 void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 2);
+
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
+  __ mov(a0, result_register());
+
+  Register object = a1;
+  Register index = a0;
+  Register scratch1 = a2;
+  Register scratch2 = a3;
+  Register result = v0;
+
+  __ pop(object);
+
+  Label need_conversion;
+  Label index_out_of_range;
+  Label done;
+  StringCharAtGenerator generator(object,
+                                  index,
+                                  scratch1,
+                                  scratch2,
+                                  result,
+                                  &need_conversion,
+                                  &need_conversion,
+                                  &index_out_of_range,
+                                  STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm_);
+  __ jmp(&done);
+
+  __ bind(&index_out_of_range);
+  // When the index is out of range, the spec requires us to return
+  // the empty string.
+  __ LoadRoot(result, Heap::kEmptyStringRootIndex);
+  __ jmp(&done);
+
+  __ bind(&need_conversion);
+  // Move smi zero into the result register, which will trigger
+  // conversion.
+  __ li(result, Operand(Smi::FromInt(0)));
+  __ jmp(&done);
+
+  NopRuntimeCallHelper call_helper;
+  generator.GenerateSlow(masm_, call_helper);
+
+  __ bind(&done);
+  context()->Plug(result);
 }
 
 
 void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT_EQ(2, args->length());
+
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+
+  StringAddStub stub(NO_STRING_ADD_FLAGS);
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT_EQ(2, args->length());
+
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+
+  StringCompareStub stub;
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  // Load the argument on the stack and call the stub.
+  TranscendentalCacheStub stub(TranscendentalCache::SIN,
+                               TranscendentalCacheStub::TAGGED);
+  ASSERT(args->length() == 1);
+  VisitForStackValue(args->at(0));
+  __ mov(a0, result_register());  // Stub requires parameter in a0 and on tos.
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  // Load the argument on the stack and call the stub.
+  TranscendentalCacheStub stub(TranscendentalCache::COS,
+                               TranscendentalCacheStub::TAGGED);
+  ASSERT(args->length() == 1);
+  VisitForStackValue(args->at(0));
+  __ mov(a0, result_register());  // Stub requires parameter in a0 and on tos.
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  // Load the argument on the stack and call the stub.
+  TranscendentalCacheStub stub(TranscendentalCache::LOG,
+                               TranscendentalCacheStub::TAGGED);
+  ASSERT(args->length() == 1);
+  VisitForStackValue(args->at(0));
+  __ mov(a0, result_register());  // Stub requires parameter in a0 and on tos.
+  __ CallStub(&stub);
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+  // Load the argument on the stack and call the runtime function.
+  ASSERT(args->length() == 1);
+  VisitForStackValue(args->at(0));
+  __ CallRuntime(Runtime::kMath_sqrt, 1);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() >= 2);
+
+  int arg_count = args->length() - 2;  // 2 ~ receiver and function.
+  for (int i = 0; i < arg_count + 1; i++) {
+    VisitForStackValue(args->at(i));
+  }
+  VisitForAccumulatorValue(args->last());  // Function.
+
+  // InvokeFunction requires the function in a1. Move it in there.
+  __ mov(a1, result_register());
+  ParameterCount count(arg_count);
+  __ InvokeFunction(a1, count, CALL_FUNCTION);
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  RegExpConstructResultStub stub;
+  ASSERT(args->length() == 3);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
+  __ CallStub(&stub);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 3);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
+  Label done;
+  Label slow_case;
+  Register object = a0;
+  Register index1 = a1;
+  Register index2 = a2;
+  Register elements = a3;
+  Register scratch1 = t0;
+  Register scratch2 = t1;
+
+  __ lw(object, MemOperand(sp, 2 * kPointerSize));
+  // Fetch the map and check if array is in fast case.
+  // Check that object doesn't require security checks and
+  // has no indexed interceptor.
+  __ GetObjectType(object, scratch1, scratch2);
+  __ Branch(&slow_case, ne, scratch2, Operand(JS_ARRAY_TYPE));
+  // Map is now in scratch1.
+
+  __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
+  __ And(scratch2, scratch2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
+  __ Branch(&slow_case, ne, scratch2, Operand(zero_reg));
+
+  // Check the object's elements are in fast case and writable.
+  __ lw(elements, FieldMemOperand(object, JSObject::kElementsOffset));
+  __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ LoadRoot(scratch2, Heap::kFixedArrayMapRootIndex);
+  __ Branch(&slow_case, ne, scratch1, Operand(scratch2));
+
+  // Check that both indices are smis.
+  __ lw(index1, MemOperand(sp, 1 * kPointerSize));
+  __ lw(index2, MemOperand(sp, 0));
+  __ JumpIfNotBothSmi(index1, index2, &slow_case);
+
+  // Check that both indices are valid.
+  Label not_hi;
+  __ lw(scratch1, FieldMemOperand(object, JSArray::kLengthOffset));
+  __ Branch(&slow_case, ls, scratch1, Operand(index1));
+  __ Branch(&not_hi, NegateCondition(hi), scratch1, Operand(index1));
+  __ Branch(&slow_case, ls, scratch1, Operand(index2));
+  __ bind(&not_hi);
+
+  // Bring the address of the elements into index1 and index2.
+  __ Addu(scratch1, elements,
+      Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(index1, index1, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(index1, scratch1, index1);
+  __ sll(index2, index2, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(index2, scratch1, index2);
+
+  // Swap elements.
+  __ lw(scratch1, MemOperand(index1, 0));
+  __ lw(scratch2, MemOperand(index2, 0));
+  __ sw(scratch1, MemOperand(index2, 0));
+  __ sw(scratch2, MemOperand(index1, 0));
+
+  Label new_space;
+  __ InNewSpace(elements, scratch1, eq, &new_space);
+  // Possible optimization: do a check that both values are Smis
+  // (or them and test against Smi mask).
+
+  __ mov(scratch1, elements);
+  __ RecordWriteHelper(elements, index1, scratch2);
+  __ RecordWriteHelper(scratch1, index2, scratch2);  // scratch1 holds elements.
+
+  __ bind(&new_space);
+  // We are done. Drop elements from the stack, and return undefined.
+  __ Drop(3);
+  __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+  __ jmp(&done);
+
+  __ bind(&slow_case);
+  __ CallRuntime(Runtime::kSwapElements, 3);
+
+  __ bind(&done);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT_EQ(2, args->length());
+
+  ASSERT_NE(NULL, args->at(0)->AsLiteral());
+  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+
+  Handle<FixedArray> jsfunction_result_caches(
+      isolate()->global_context()->jsfunction_result_caches());
+  if (jsfunction_result_caches->length() <= cache_id) {
+    __ Abort("Attempt to use undefined cache.");
+    __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+    context()->Plug(v0);
+    return;
+  }
+
+  VisitForAccumulatorValue(args->at(1));
+
+  Register key = v0;
+  Register cache = a1;
+  __ lw(cache, ContextOperand(cp, Context::GLOBAL_INDEX));
+  __ lw(cache, FieldMemOperand(cache, GlobalObject::kGlobalContextOffset));
+  __ lw(cache,
+         ContextOperand(
+             cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
+  __ lw(cache,
+         FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
+
+
+  Label done, not_found;
+  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  __ lw(a2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
+  // a2 now holds finger offset as a smi.
+  __ Addu(a3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  // a3 now points to the start of fixed array elements.
+  __ sll(at, a2, kPointerSizeLog2 - kSmiTagSize);
+  __ addu(a3, a3, at);
+  // a3 now points to key of indexed element of cache.
+  __ lw(a2, MemOperand(a3));
+  __ Branch(&not_found, ne, key, Operand(a2));
+
+  __ lw(v0, MemOperand(a3, kPointerSize));
+  __ Branch(&done);
+
+  __ bind(&not_found);
+  // Call runtime to perform the lookup.
+  __ Push(cache, key);
+  __ CallRuntime(Runtime::kGetFromCache, 2);
+
+  __ bind(&done);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT_EQ(2, args->length());
+
+  Register right = v0;
+  Register left = a1;
+  Register tmp = a2;
+  Register tmp2 = a3;
+
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));  // Result (right) in v0.
+  __ pop(left);
+
+  Label done, fail, ok;
+  __ Branch(&ok, eq, left, Operand(right));
+  // Fail if either is a non-HeapObject.
+  __ And(tmp, left, Operand(right));
+  __ And(at, tmp, Operand(kSmiTagMask));
+  __ Branch(&fail, eq, at, Operand(zero_reg));
+  __ lw(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
+  __ lbu(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
+  __ Branch(&fail, ne, tmp2, Operand(JS_REGEXP_TYPE));
+  __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+  __ Branch(&fail, ne, tmp, Operand(tmp2));
+  __ lw(tmp, FieldMemOperand(left, JSRegExp::kDataOffset));
+  __ lw(tmp2, FieldMemOperand(right, JSRegExp::kDataOffset));
+  __ Branch(&ok, eq, tmp, Operand(tmp2));
+  __ bind(&fail);
+  __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+  __ jmp(&done);
+  __ bind(&ok);
+  __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+  __ bind(&done);
+
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ lw(a0, FieldMemOperand(v0, String::kHashFieldOffset));
+  __ And(a0, a0, Operand(String::kContainsCachedArrayIndexMask));
+
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  Split(eq, a0, Operand(zero_reg), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(args->length() == 1);
+  VisitForAccumulatorValue(args->at(0));
+
+  if (FLAG_debug_code) {
+    __ AbortIfNotString(v0);
+  }
+
+  __ lw(v0, FieldMemOperand(v0, String::kHashFieldOffset));
+  __ IndexFromHash(v0, v0);
+
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+  Label bailout, done, one_char_separator, long_separator,
+      non_trivial_array, not_size_one_array, loop,
+      empty_separator_loop, one_char_separator_loop,
+      one_char_separator_loop_entry, long_separator_loop;
+
+  ASSERT(args->length() == 2);
+  VisitForStackValue(args->at(1));
+  VisitForAccumulatorValue(args->at(0));
+
+  // All aliases of the same register have disjoint lifetimes.
+  Register array = v0;
+  Register elements = no_reg;  // Will be v0.
+  Register result = no_reg;  // Will be v0.
+  Register separator = a1;
+  Register array_length = a2;
+  Register result_pos = no_reg;  // Will be a2.
+  Register string_length = a3;
+  Register string = t0;
+  Register element = t1;
+  Register elements_end = t2;
+  Register scratch1 = t3;
+  Register scratch2 = t5;
+  Register scratch3 = t4;
+  Register scratch4 = v1;
+
+  // Separator operand is on the stack.
+  __ pop(separator);
+
+  // Check that the array is a JSArray.
+  __ JumpIfSmi(array, &bailout);
+  __ GetObjectType(array, scratch1, scratch2);
+  __ Branch(&bailout, ne, scratch2, Operand(JS_ARRAY_TYPE));
+
+  // Check that the array has fast elements.
+  __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
+  __ And(scratch3, scratch2, Operand(1 << Map::kHasFastElements));
+  __ Branch(&bailout, eq, scratch3, Operand(zero_reg));
+
+  // If the array has length zero, return the empty string.
+  __ lw(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
+  __ SmiUntag(array_length);
+  __ Branch(&non_trivial_array, ne, array_length, Operand(zero_reg));
+  __ LoadRoot(v0, Heap::kEmptyStringRootIndex);
+  __ Branch(&done);
+
+  __ bind(&non_trivial_array);
+
+  // Get the FixedArray containing array's elements.
+  elements = array;
+  __ lw(elements, FieldMemOperand(array, JSArray::kElementsOffset));
+  array = no_reg;  // End of array's live range.
+
+  // Check that all array elements are sequential ASCII strings, and
+  // accumulate the sum of their lengths, as a smi-encoded value.
+  __ mov(string_length, zero_reg);
+  __ Addu(element,
+          elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(elements_end, array_length, kPointerSizeLog2);
+  __ Addu(elements_end, element, elements_end);
+  // Loop condition: while (element < elements_end).
+  // Live values in registers:
+  //   elements: Fixed array of strings.
+  //   array_length: Length of the fixed array of strings (not smi)
+  //   separator: Separator string
+  //   string_length: Accumulated sum of string lengths (smi).
+  //   element: Current array element.
+  //   elements_end: Array end.
+  if (FLAG_debug_code) {
+    __ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin",
+        array_length, Operand(zero_reg));
+  }
+  __ bind(&loop);
+  __ lw(string, MemOperand(element));
+  __ Addu(element, element, kPointerSize);
+  __ JumpIfSmi(string, &bailout);
+  __ lw(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
+  __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+  __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+  __ lw(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset));
+  __ AdduAndCheckForOverflow(string_length, string_length, scratch1, scratch3);
+  __ BranchOnOverflow(&bailout, scratch3);
+  __ Branch(&loop, lt, element, Operand(elements_end));
+
+  // If array_length is 1, return elements[0], a string.
+  __ Branch(&not_size_one_array, ne, array_length, Operand(1));
+  __ lw(v0, FieldMemOperand(elements, FixedArray::kHeaderSize));
+  __ Branch(&done);
+
+  __ bind(&not_size_one_array);
+
+  // Live values in registers:
+  //   separator: Separator string
+  //   array_length: Length of the array.
+  //   string_length: Sum of string lengths (smi).
+  //   elements: FixedArray of strings.
+
+  // Check that the separator is a flat ASCII string.
+  __ JumpIfSmi(separator, &bailout);
+  __ lw(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
+  __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+  __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+
+  // Add (separator length times array_length) - separator length to the
+  // string_length to get the length of the result string. array_length is not
+  // smi but the other values are, so the result is a smi.
+  __ lw(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+  __ Subu(string_length, string_length, Operand(scratch1));
+  __ Mult(array_length, scratch1);
+  // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
+  // zero.
+  __ mfhi(scratch2);
+  __ Branch(&bailout, ne, scratch2, Operand(zero_reg));
+  __ mflo(scratch2);
+  __ And(scratch3, scratch2, Operand(0x80000000));
+  __ Branch(&bailout, ne, scratch3, Operand(zero_reg));
+  __ AdduAndCheckForOverflow(string_length, string_length, scratch2, scratch3);
+  __ BranchOnOverflow(&bailout, scratch3);
+  __ SmiUntag(string_length);
+
+  // Get first element in the array to free up the elements register to be used
+  // for the result.
+  __ Addu(element,
+          elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  result = elements;  // End of live range for elements.
+  elements = no_reg;
+  // Live values in registers:
+  //   element: First array element
+  //   separator: Separator string
+  //   string_length: Length of result string (not smi)
+  //   array_length: Length of the array.
+  __ AllocateAsciiString(result,
+                         string_length,
+                         scratch1,
+                         scratch2,
+                         elements_end,
+                         &bailout);
+  // Prepare for looping. Set up elements_end to end of the array. Set
+  // result_pos to the position of the result where to write the first
+  // character.
+  __ sll(elements_end, array_length, kPointerSizeLog2);
+  __ Addu(elements_end, element, elements_end);
+  result_pos = array_length;  // End of live range for array_length.
+  array_length = no_reg;
+  __ Addu(result_pos,
+          result,
+          Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+
+  // Check the length of the separator.
+  __ lw(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+  __ li(at, Operand(Smi::FromInt(1)));
+  __ Branch(&one_char_separator, eq, scratch1, Operand(at));
+  __ Branch(&long_separator, gt, scratch1, Operand(at));
+
+  // Empty separator case.
+  __ bind(&empty_separator_loop);
+  // Live values in registers:
+  //   result_pos: the position to which we are currently copying characters.
+  //   element: Current array element.
+  //   elements_end: Array end.
+
+  // Copy next array element to the result.
+  __ lw(string, MemOperand(element));
+  __ Addu(element, element, kPointerSize);
+  __ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
+  __ SmiUntag(string_length);
+  __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+  __ CopyBytes(string, result_pos, string_length, scratch1);
+  // End while (element < elements_end).
+  __ Branch(&empty_separator_loop, lt, element, Operand(elements_end));
+  ASSERT(result.is(v0));
+  __ Branch(&done);
+
+  // One-character separator case.
+  __ bind(&one_char_separator);
+  // Replace separator with its ascii character value.
+  __ lbu(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
+  // Jump into the loop after the code that copies the separator, so the first
+  // element is not preceded by a separator.
+  __ jmp(&one_char_separator_loop_entry);
+
+  __ bind(&one_char_separator_loop);
+  // Live values in registers:
+  //   result_pos: the position to which we are currently copying characters.
+  //   element: Current array element.
+  //   elements_end: Array end.
+  //   separator: Single separator ascii char (in lower byte).
+
+  // Copy the separator character to the result.
+  __ sb(separator, MemOperand(result_pos));
+  __ Addu(result_pos, result_pos, 1);
+
+  // Copy next array element to the result.
+  __ bind(&one_char_separator_loop_entry);
+  __ lw(string, MemOperand(element));
+  __ Addu(element, element, kPointerSize);
+  __ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
+  __ SmiUntag(string_length);
+  __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+  __ CopyBytes(string, result_pos, string_length, scratch1);
+  // End while (element < elements_end).
+  __ Branch(&one_char_separator_loop, lt, element, Operand(elements_end));
+  ASSERT(result.is(v0));
+  __ Branch(&done);
+
+  // Long separator case (separator is more than one character). Entry is at the
+  // label long_separator below.
+  __ bind(&long_separator_loop);
+  // Live values in registers:
+  //   result_pos: the position to which we are currently copying characters.
+  //   element: Current array element.
+  //   elements_end: Array end.
+  //   separator: Separator string.
+
+  // Copy the separator to the result.
+  __ lw(string_length, FieldMemOperand(separator, String::kLengthOffset));
+  __ SmiUntag(string_length);
+  __ Addu(string,
+          separator,
+          Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ CopyBytes(string, result_pos, string_length, scratch1);
+
+  __ bind(&long_separator);
+  __ lw(string, MemOperand(element));
+  __ Addu(element, element, kPointerSize);
+  __ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
+  __ SmiUntag(string_length);
+  __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+  __ CopyBytes(string, result_pos, string_length, scratch1);
+  // End while (element < elements_end).
+  __ Branch(&long_separator_loop, lt, element, Operand(elements_end));
+  ASSERT(result.is(v0));
+  __ Branch(&done);
+
+  __ bind(&bailout);
+  __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+  __ bind(&done);
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
-  UNIMPLEMENTED_MIPS();
+  Handle<String> name = expr->name();
+  if (name->length() > 0 && name->Get(0) == '_') {
+    Comment cmnt(masm_, "[ InlineRuntimeCall");
+    EmitInlineRuntimeCall(expr);
+    return;
+  }
+
+  Comment cmnt(masm_, "[ CallRuntime");
+  ZoneList<Expression*>* args = expr->arguments();
+
+  if (expr->is_jsruntime()) {
+    // Prepare for calling JS runtime function.
+    __ lw(a0, GlobalObjectOperand());
+    __ lw(a0, FieldMemOperand(a0, GlobalObject::kBuiltinsOffset));
+    __ push(a0);
+  }
+
+  // Push the arguments ("left-to-right").
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    VisitForStackValue(args->at(i));
+  }
+
+  if (expr->is_jsruntime()) {
+    // Call the JS runtime function.
+    __ li(a2, Operand(expr->name()));
+    RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
+    Handle<Code> ic =
+        isolate()->stub_cache()->ComputeCallInitialize(arg_count,
+                                                       NOT_IN_LOOP,
+                                                       mode);
+    EmitCallIC(ic, mode, expr->id());
+    // Restore context register.
+    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  } else {
+    // Call the C runtime function.
+    __ CallRuntime(expr->function(), arg_count);
+  }
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
-  UNIMPLEMENTED_MIPS();
+  switch (expr->op()) {
+    case Token::DELETE: {
+      Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
+      Property* prop = expr->expression()->AsProperty();
+      Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+
+      if (prop != NULL) {
+        if (prop->is_synthetic()) {
+          // Result of deleting parameters is false, even when they rewrite
+          // to accesses on the arguments object.
+          context()->Plug(false);
+        } else {
+          VisitForStackValue(prop->obj());
+          VisitForStackValue(prop->key());
+          __ li(a1, Operand(Smi::FromInt(strict_mode_flag())));
+          __ push(a1);
+          __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+          context()->Plug(v0);
+        }
+      } else if (var != NULL) {
+        // Delete of an unqualified identifier is disallowed in strict mode
+        // but "delete this" is.
+        ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
+        if (var->is_global()) {
+          __ lw(a2, GlobalObjectOperand());
+          __ li(a1, Operand(var->name()));
+          __ li(a0, Operand(Smi::FromInt(kNonStrictMode)));
+          __ Push(a2, a1, a0);
+          __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+          context()->Plug(v0);
+        } else if (var->AsSlot() != NULL &&
+                   var->AsSlot()->type() != Slot::LOOKUP) {
+          // Result of deleting non-global, non-dynamic variables is false.
+          // The subexpression does not have side effects.
+          context()->Plug(false);
+        } else {
+          // Non-global variable.  Call the runtime to try to delete from the
+          // context where the variable was introduced.
+          __ push(context_register());
+          __ li(a2, Operand(var->name()));
+          __ push(a2);
+          __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+          context()->Plug(v0);
+        }
+      } else {
+        // Result of deleting non-property, non-variable reference is true.
+        // The subexpression may have side effects.
+        VisitForEffect(expr->expression());
+        context()->Plug(true);
+      }
+      break;
+    }
+
+    case Token::VOID: {
+      Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+      VisitForEffect(expr->expression());
+      context()->Plug(Heap::kUndefinedValueRootIndex);
+      break;
+    }
+
+    case Token::NOT: {
+      Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+      if (context()->IsEffect()) {
+        // Unary NOT has no side effects so it's only necessary to visit the
+        // subexpression.  Match the optimizing compiler by not branching.
+        VisitForEffect(expr->expression());
+      } else {
+        Label materialize_true, materialize_false;
+        Label* if_true = NULL;
+        Label* if_false = NULL;
+        Label* fall_through = NULL;
+
+        // Notice that the labels are swapped.
+        context()->PrepareTest(&materialize_true, &materialize_false,
+                               &if_false, &if_true, &fall_through);
+        if (context()->IsTest()) ForwardBailoutToChild(expr);
+        VisitForControl(expr->expression(), if_true, if_false, fall_through);
+        context()->Plug(if_false, if_true);  // Labels swapped.
+      }
+      break;
+    }
+
+    case Token::TYPEOF: {
+      Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+      { StackValueContext context(this);
+        VisitForTypeofValue(expr->expression());
+      }
+      __ CallRuntime(Runtime::kTypeof, 1);
+      context()->Plug(v0);
+      break;
+    }
+
+    case Token::ADD: {
+      Comment cmt(masm_, "[ UnaryOperation (ADD)");
+      VisitForAccumulatorValue(expr->expression());
+      Label no_conversion;
+      __ JumpIfSmi(result_register(), &no_conversion);
+      __ mov(a0, result_register());
+      ToNumberStub convert_stub;
+      __ CallStub(&convert_stub);
+      __ bind(&no_conversion);
+      context()->Plug(result_register());
+      break;
+    }
+
+    case Token::SUB:
+      EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
+      break;
+
+    case Token::BIT_NOT:
+      EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
+      break;
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
+                                           const char* comment) {
+  // TODO(svenpanne): Allowing format strings in Comment would be nice here...
+  Comment cmt(masm_, comment);
+  bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
+  UnaryOverwriteMode overwrite =
+      can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
+  UnaryOpStub stub(expr->op(), overwrite);
+  // GenericUnaryOpStub expects the argument to be in a0.
+  VisitForAccumulatorValue(expr->expression());
+  SetSourcePosition(expr->position());
+  __ mov(a0, result_register());
+  EmitCallIC(stub.GetCode(), NULL, expr->id());
+  context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ CountOperation");
+  SetSourcePosition(expr->position());
+
+  // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
+  // as the left-hand side.
+  if (!expr->expression()->IsValidLeftHandSide()) {
+    VisitForEffect(expr->expression());
+    return;
+  }
+
+  // Expression can only be a property, a global or a (parameter or local)
+  // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+  enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+  LhsKind assign_type = VARIABLE;
+  Property* prop = expr->expression()->AsProperty();
+  // In case of a property we use the uninitialized expression context
+  // of the key to detect a named property.
+  if (prop != NULL) {
+    assign_type =
+        (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+  }
+
+  // Evaluate expression and get value.
+  if (assign_type == VARIABLE) {
+    ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+    AccumulatorValueContext context(this);
+    EmitVariableLoad(expr->expression()->AsVariableProxy()->var());
+  } else {
+    // Reserve space for result of postfix operation.
+    if (expr->is_postfix() && !context()->IsEffect()) {
+      __ li(at, Operand(Smi::FromInt(0)));
+      __ push(at);
+    }
+    if (assign_type == NAMED_PROPERTY) {
+      // Put the object both on the stack and in the accumulator.
+      VisitForAccumulatorValue(prop->obj());
+      __ push(v0);
+      EmitNamedPropertyLoad(prop);
+    } else {
+      if (prop->is_arguments_access()) {
+        VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
+        __ lw(v0, EmitSlotSearch(obj_proxy->var()->AsSlot(), v0));
+        __ push(v0);
+        __ li(v0, Operand(prop->key()->AsLiteral()->handle()));
+      } else {
+        VisitForStackValue(prop->obj());
+        VisitForAccumulatorValue(prop->key());
+      }
+      __ lw(a1, MemOperand(sp, 0));
+      __ push(v0);
+      EmitKeyedPropertyLoad(prop);
+    }
+  }
+
+  // We need a second deoptimization point after loading the value
+  // in case evaluating the property load my have a side effect.
+  if (assign_type == VARIABLE) {
+    PrepareForBailout(expr->expression(), TOS_REG);
+  } else {
+    PrepareForBailoutForId(expr->CountId(), TOS_REG);
+  }
+
+  // Call ToNumber only if operand is not a smi.
+  Label no_conversion;
+  __ JumpIfSmi(v0, &no_conversion);
+  __ mov(a0, v0);
+  ToNumberStub convert_stub;
+  __ CallStub(&convert_stub);
+  __ bind(&no_conversion);
+
+  // Save result for postfix expressions.
+  if (expr->is_postfix()) {
+    if (!context()->IsEffect()) {
+      // Save the result on the stack. If we have a named or keyed property
+      // we store the result under the receiver that is currently on top
+      // of the stack.
+      switch (assign_type) {
+        case VARIABLE:
+          __ push(v0);
+          break;
+        case NAMED_PROPERTY:
+          __ sw(v0, MemOperand(sp, kPointerSize));
+          break;
+        case KEYED_PROPERTY:
+          __ sw(v0, MemOperand(sp, 2 * kPointerSize));
+          break;
+      }
+    }
+  }
+  __ mov(a0, result_register());
+
+  // Inline smi case if we are in a loop.
+  Label stub_call, done;
+  JumpPatchSite patch_site(masm_);
+
+  int count_value = expr->op() == Token::INC ? 1 : -1;
+  __ li(a1, Operand(Smi::FromInt(count_value)));
+
+  if (ShouldInlineSmiCase(expr->op())) {
+    __ AdduAndCheckForOverflow(v0, a0, a1, t0);
+    __ BranchOnOverflow(&stub_call, t0);  // Do stub on overflow.
+
+    // We could eliminate this smi check if we split the code at
+    // the first smi check before calling ToNumber.
+    patch_site.EmitJumpIfSmi(v0, &done);
+    __ bind(&stub_call);
+  }
+
+  // Record position before stub call.
+  SetSourcePosition(expr->position());
+
+  BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
+  EmitCallIC(stub.GetCode(), &patch_site, expr->CountId());
+  __ bind(&done);
+
+  // Store the value returned in v0.
+  switch (assign_type) {
+    case VARIABLE:
+      if (expr->is_postfix()) {
+        { EffectContext context(this);
+          EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+                                 Token::ASSIGN);
+          PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+          context.Plug(v0);
+        }
+        // For all contexts except EffectConstant we have the result on
+        // top of the stack.
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
+        }
+      } else {
+        EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+                               Token::ASSIGN);
+        PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+        context()->Plug(v0);
+      }
+      break;
+    case NAMED_PROPERTY: {
+      __ mov(a0, result_register());  // Value.
+      __ li(a2, Operand(prop->key()->AsLiteral()->handle()));  // Name.
+      __ pop(a1);  // Receiver.
+      Handle<Code> ic = is_strict_mode()
+          ? isolate()->builtins()->StoreIC_Initialize_Strict()
+          : isolate()->builtins()->StoreIC_Initialize();
+      EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      if (expr->is_postfix()) {
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
+        }
+      } else {
+        context()->Plug(v0);
+      }
+      break;
+    }
+    case KEYED_PROPERTY: {
+      __ mov(a0, result_register());  // Value.
+      __ pop(a1);  // Key.
+      __ pop(a2);  // Receiver.
+      Handle<Code> ic = is_strict_mode()
+          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+          : isolate()->builtins()->KeyedStoreIC_Initialize();
+      EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      if (expr->is_postfix()) {
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
+        }
+      } else {
+        context()->Plug(v0);
+      }
+      break;
+    }
+  }
 }
 
 
 void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
-  UNIMPLEMENTED_MIPS();
+  VariableProxy* proxy = expr->AsVariableProxy();
+  if (proxy != NULL && !proxy->var()->is_this() && proxy->var()->is_global()) {
+    Comment cmnt(masm_, "Global variable");
+    __ lw(a0, GlobalObjectOperand());
+    __ li(a2, Operand(proxy->name()));
+    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+    // Use a regular load, not a contextual load, to avoid a reference
+    // error.
+    EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+    PrepareForBailout(expr, TOS_REG);
+    context()->Plug(v0);
+  } else if (proxy != NULL &&
+             proxy->var()->AsSlot() != NULL &&
+             proxy->var()->AsSlot()->type() == Slot::LOOKUP) {
+    Label done, slow;
+
+    // Generate code for loading from variables potentially shadowed
+    // by eval-introduced variables.
+    Slot* slot = proxy->var()->AsSlot();
+    EmitDynamicLoadFromSlotFastCase(slot, INSIDE_TYPEOF, &slow, &done);
+
+    __ bind(&slow);
+    __ li(a0, Operand(proxy->name()));
+    __ Push(cp, a0);
+    __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+    PrepareForBailout(expr, TOS_REG);
+    __ bind(&done);
+
+    context()->Plug(v0);
+  } else {
+    // This expression cannot throw a reference error at the top level.
+    context()->HandleExpression(expr);
+  }
 }
 
 
@@ -660,50 +4050,305 @@
                                           Label* if_true,
                                           Label* if_false,
                                           Label* fall_through) {
-  UNIMPLEMENTED_MIPS();
-  return false;
+  if (op != Token::EQ && op != Token::EQ_STRICT) return false;
+
+  // Check for the pattern: typeof <expression> == <string literal>.
+  Literal* right_literal = right->AsLiteral();
+  if (right_literal == NULL) return false;
+  Handle<Object> right_literal_value = right_literal->handle();
+  if (!right_literal_value->IsString()) return false;
+  UnaryOperation* left_unary = left->AsUnaryOperation();
+  if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
+  Handle<String> check = Handle<String>::cast(right_literal_value);
+
+  { AccumulatorValueContext context(this);
+    VisitForTypeofValue(left_unary->expression());
+  }
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+  if (check->Equals(isolate()->heap()->number_symbol())) {
+    __ JumpIfSmi(v0, if_true);
+    __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+    Split(eq, v0, Operand(at), if_true, if_false, fall_through);
+  } else if (check->Equals(isolate()->heap()->string_symbol())) {
+    __ JumpIfSmi(v0, if_false);
+    // Check for undetectable objects => false.
+    __ GetObjectType(v0, v0, a1);
+    __ Branch(if_false, ge, a1, Operand(FIRST_NONSTRING_TYPE));
+    __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+    __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+    Split(eq, a1, Operand(zero_reg),
+          if_true, if_false, fall_through);
+  } else if (check->Equals(isolate()->heap()->boolean_symbol())) {
+    __ LoadRoot(at, Heap::kTrueValueRootIndex);
+    __ Branch(if_true, eq, v0, Operand(at));
+    __ LoadRoot(at, Heap::kFalseValueRootIndex);
+    Split(eq, v0, Operand(at), if_true, if_false, fall_through);
+  } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+    __ Branch(if_true, eq, v0, Operand(at));
+    __ JumpIfSmi(v0, if_false);
+    // Check for undetectable objects => true.
+    __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+    __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+    __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+    Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
+  } else if (check->Equals(isolate()->heap()->function_symbol())) {
+    __ JumpIfSmi(v0, if_false);
+    __ GetObjectType(v0, a1, v0);  // Leave map in a1.
+    Split(ge, v0, Operand(FIRST_FUNCTION_CLASS_TYPE),
+        if_true, if_false, fall_through);
+
+  } else if (check->Equals(isolate()->heap()->object_symbol())) {
+    __ JumpIfSmi(v0, if_false);
+    __ LoadRoot(at, Heap::kNullValueRootIndex);
+    __ Branch(if_true, eq, v0, Operand(at));
+    // Check for JS objects => true.
+    __ GetObjectType(v0, v0, a1);
+    __ Branch(if_false, lo, a1, Operand(FIRST_JS_OBJECT_TYPE));
+    __ lbu(a1, FieldMemOperand(v0, Map::kInstanceTypeOffset));
+    __ Branch(if_false, hs, a1, Operand(FIRST_FUNCTION_CLASS_TYPE));
+    // Check for undetectable objects => false.
+    __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+    __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+    Split(eq, a1, Operand(zero_reg), if_true, if_false, fall_through);
+  } else {
+    if (if_false != fall_through) __ jmp(if_false);
+  }
+
+  return true;
 }
 
 
 void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ CompareOperation");
+  SetSourcePosition(expr->position());
+
+  // Always perform the comparison for its control flow.  Pack the result
+  // into the expression's context after the comparison is performed.
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  // First we try a fast inlined version of the compare when one of
+  // the operands is a literal.
+  Token::Value op = expr->op();
+  Expression* left = expr->left();
+  Expression* right = expr->right();
+  if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
+    context()->Plug(if_true, if_false);
+    return;
+  }
+
+  VisitForStackValue(expr->left());
+  switch (op) {
+    case Token::IN:
+      VisitForStackValue(expr->right());
+      __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+      PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+      __ LoadRoot(t0, Heap::kTrueValueRootIndex);
+      Split(eq, v0, Operand(t0), if_true, if_false, fall_through);
+      break;
+
+    case Token::INSTANCEOF: {
+      VisitForStackValue(expr->right());
+      InstanceofStub stub(InstanceofStub::kNoFlags);
+      __ CallStub(&stub);
+      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      // The stub returns 0 for true.
+      Split(eq, v0, Operand(zero_reg), if_true, if_false, fall_through);
+      break;
+    }
+
+    default: {
+      VisitForAccumulatorValue(expr->right());
+      Condition cc = eq;
+      bool strict = false;
+      switch (op) {
+        case Token::EQ_STRICT:
+          strict = true;
+          // Fall through.
+        case Token::EQ:
+          cc = eq;
+          __ mov(a0, result_register());
+          __ pop(a1);
+          break;
+        case Token::LT:
+          cc = lt;
+          __ mov(a0, result_register());
+          __ pop(a1);
+          break;
+        case Token::GT:
+          // Reverse left and right sides to obtain ECMA-262 conversion order.
+          cc = lt;
+          __ mov(a1, result_register());
+          __ pop(a0);
+         break;
+        case Token::LTE:
+          // Reverse left and right sides to obtain ECMA-262 conversion order.
+          cc = ge;
+          __ mov(a1, result_register());
+          __ pop(a0);
+          break;
+        case Token::GTE:
+          cc = ge;
+          __ mov(a0, result_register());
+          __ pop(a1);
+          break;
+        case Token::IN:
+        case Token::INSTANCEOF:
+        default:
+          UNREACHABLE();
+      }
+
+      bool inline_smi_code = ShouldInlineSmiCase(op);
+      JumpPatchSite patch_site(masm_);
+      if (inline_smi_code) {
+        Label slow_case;
+        __ Or(a2, a0, Operand(a1));
+        patch_site.EmitJumpIfNotSmi(a2, &slow_case);
+        Split(cc, a1, Operand(a0), if_true, if_false, NULL);
+        __ bind(&slow_case);
+      }
+      // Record position and call the compare IC.
+      SetSourcePosition(expr->position());
+      Handle<Code> ic = CompareIC::GetUninitialized(op);
+      EmitCallIC(ic, &patch_site, expr->id());
+      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
+    }
+  }
+
+  // Convert the result of the comparison into one expected for this
+  // expression's context.
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
-  UNIMPLEMENTED_MIPS();
+  Comment cmnt(masm_, "[ CompareToNull");
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  VisitForAccumulatorValue(expr->expression());
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  __ mov(a0, result_register());
+  __ LoadRoot(a1, Heap::kNullValueRootIndex);
+  if (expr->is_strict()) {
+    Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
+  } else {
+    __ Branch(if_true, eq, a0, Operand(a1));
+    __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+    __ Branch(if_true, eq, a0, Operand(a1));
+    __ And(at, a0, Operand(kSmiTagMask));
+    __ Branch(if_false, eq, at, Operand(zero_reg));
+    // It can be an undetectable object.
+    __ lw(a1, FieldMemOperand(a0, HeapObject::kMapOffset));
+    __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
+    __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+    Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
+  }
+  context()->Plug(if_true, if_false);
 }
 
 
 void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
-  UNIMPLEMENTED_MIPS();
+  __ lw(v0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  context()->Plug(v0);
 }
 
 
 Register FullCodeGenerator::result_register() {
-  UNIMPLEMENTED_MIPS();
   return v0;
 }
 
 
 Register FullCodeGenerator::context_register() {
-  UNIMPLEMENTED_MIPS();
   return cp;
 }
 
 
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
-  UNIMPLEMENTED_MIPS();
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
+                                   RelocInfo::Mode mode,
+                                   unsigned ast_id) {
+  ASSERT(mode == RelocInfo::CODE_TARGET ||
+         mode == RelocInfo::CODE_TARGET_CONTEXT);
+  Counters* counters = isolate()->counters();
+  switch (ic->kind()) {
+    case Code::LOAD_IC:
+      __ IncrementCounter(counters->named_load_full(), 1, a1, a2);
+      break;
+    case Code::KEYED_LOAD_IC:
+      __ IncrementCounter(counters->keyed_load_full(), 1, a1, a2);
+      break;
+    case Code::STORE_IC:
+      __ IncrementCounter(counters->named_store_full(), 1, a1, a2);
+      break;
+    case Code::KEYED_STORE_IC:
+      __ IncrementCounter(counters->keyed_store_full(), 1, a1, a2);
+    default:
+      break;
+  }
+  if (ast_id == kNoASTId || mode == RelocInfo::CODE_TARGET_CONTEXT) {
+    __ Call(ic, mode);
+  } else {
+    ASSERT(mode == RelocInfo::CODE_TARGET);
+    mode = RelocInfo::CODE_TARGET_WITH_ID;
+    __ CallWithAstId(ic, mode, ast_id);
+  }
+}
+
+
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
+                                   JumpPatchSite* patch_site,
+                                   unsigned ast_id) {
+  Counters* counters = isolate()->counters();
+  switch (ic->kind()) {
+    case Code::LOAD_IC:
+      __ IncrementCounter(counters->named_load_full(), 1, a1, a2);
+      break;
+    case Code::KEYED_LOAD_IC:
+      __ IncrementCounter(counters->keyed_load_full(), 1, a1, a2);
+      break;
+    case Code::STORE_IC:
+      __ IncrementCounter(counters->named_store_full(), 1, a1, a2);
+      break;
+    case Code::KEYED_STORE_IC:
+      __ IncrementCounter(counters->keyed_store_full(), 1, a1, a2);
+    default:
+      break;
+  }
+
+  if (ast_id == kNoASTId) {
+    __ Call(ic, RelocInfo::CODE_TARGET);
+  } else {
+    __ CallWithAstId(ic, RelocInfo::CODE_TARGET_WITH_ID, ast_id);
+  }
+  if (patch_site != NULL && patch_site->is_bound()) {
+    patch_site->EmitPatchInfo();
+  } else {
+    __ nop();  // Signals no inlined code.
+  }
 }
 
 
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+  __ sw(value, MemOperand(fp, frame_offset));
 }
 
 
 void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
-  UNIMPLEMENTED_MIPS();
+  __ lw(dst, ContextOperand(cp, context_index));
 }
 
 
@@ -711,12 +4356,28 @@
 // Non-local control flow support.
 
 void FullCodeGenerator::EnterFinallyBlock() {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(!result_register().is(a1));
+  // Store result register while executing finally block.
+  __ push(result_register());
+  // Cook return address in link register to stack (smi encoded Code* delta).
+  __ Subu(a1, ra, Operand(masm_->CodeObject()));
+  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+  ASSERT_EQ(0, kSmiTag);
+  __ Addu(a1, a1, Operand(a1));  // Convert to smi.
+  __ push(a1);
 }
 
 
 void FullCodeGenerator::ExitFinallyBlock() {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(!result_register().is(a1));
+  // Restore result register from stack.
+  __ pop(a1);
+  // Uncook return address and return.
+  __ pop(result_register());
+  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+  __ sra(a1, a1, 1);  // Un-smi-tag value.
+  __ Addu(at, a1, Operand(masm_->CodeObject()));
+  __ Jump(at);
 }
 
 
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index fa8a7bb..12c81c2 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,7 +31,7 @@
 
 #if defined(V8_TARGET_ARCH_MIPS)
 
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "code-stubs.h"
 #include "ic-inl.h"
 #include "runtime.h"
@@ -48,52 +48,783 @@
 #define __ ACCESS_MASM(masm)
 
 
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
+                                            Register type,
+                                            Label* global_object) {
+  // Register usage:
+  //   type: holds the receiver instance type on entry.
+  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
+  __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE));
+  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
+}
+
+
+// Generated code falls through if the receiver is a regular non-global
+// JS object with slow properties and no interceptors.
+static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
+                                                  Register receiver,
+                                                  Register elements,
+                                                  Register scratch0,
+                                                  Register scratch1,
+                                                  Label* miss) {
+  // Register usage:
+  //   receiver: holds the receiver on entry and is unchanged.
+  //   elements: holds the property dictionary on fall through.
+  // Scratch registers:
+  //   scratch0: used to holds the receiver map.
+  //   scratch1: used to holds the receiver instance type, receiver bit mask
+  //     and elements map.
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, miss);
+
+  // Check that the receiver is a valid JS object.
+  __ GetObjectType(receiver, scratch0, scratch1);
+  __ Branch(miss, lt, scratch1, Operand(FIRST_JS_OBJECT_TYPE));
+
+  // If this assert fails, we have to check upper bound too.
+  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+
+  GenerateGlobalInstanceTypeCheck(masm, scratch1, miss);
+
+  // Check that the global object does not require access checks.
+  __ lbu(scratch1, FieldMemOperand(scratch0, Map::kBitFieldOffset));
+  __ And(scratch1, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
+                           (1 << Map::kHasNamedInterceptor)));
+  __ Branch(miss, ne, scratch1, Operand(zero_reg));
+
+  __ lw(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ LoadRoot(scratch0, Heap::kHashTableMapRootIndex);
+  __ Branch(miss, ne, scratch1, Operand(scratch0));
+}
+
+
+// Helper function used from LoadIC/CallIC GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+//           label is done.
+// name:     Property name. It is not clobbered if a jump to the miss label is
+//           done
+// result:   Register for the result. It is only updated if a jump to the miss
+//           label is not done. Can be the same as elements or name clobbering
+//           one of these in the case of not jumping to the miss label.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+// The address returned from GenerateStringDictionaryProbes() in scratch2
+// is used.
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+                                   Label* miss,
+                                   Register elements,
+                                   Register name,
+                                   Register result,
+                                   Register scratch1,
+                                   Register scratch2) {
+  // Main use of the scratch registers.
+  // scratch1: Used as temporary and to hold the capacity of the property
+  //           dictionary.
+  // scratch2: Used as temporary.
+  Label done;
+
+  // Probe the dictionary.
+  StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+                                                     miss,
+                                                     &done,
+                                                     elements,
+                                                     name,
+                                                     scratch1,
+                                                     scratch2);
+
+  // If probing finds an entry check that the value is a normal
+  // property.
+  __ bind(&done);  // scratch2 == elements + 4 * index.
+  const int kElementsStartOffset = StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+  __ And(at,
+         scratch1,
+         Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
+  __ Branch(miss, ne, at, Operand(zero_reg));
+
+  // Get the value at the masked, scaled index and return.
+  __ lw(result,
+        FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
+}
+
+
+// Helper function used from StoreIC::GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+//           label is done.
+// name:     Property name. It is not clobbered if a jump to the miss label is
+//           done
+// value:    The value to store.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+// The address returned from GenerateStringDictionaryProbes() in scratch2
+// is used.
+static void GenerateDictionaryStore(MacroAssembler* masm,
+                                    Label* miss,
+                                    Register elements,
+                                    Register name,
+                                    Register value,
+                                    Register scratch1,
+                                    Register scratch2) {
+  // Main use of the scratch registers.
+  // scratch1: Used as temporary and to hold the capacity of the property
+  //           dictionary.
+  // scratch2: Used as temporary.
+  Label done;
+
+  // Probe the dictionary.
+  StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+                                                     miss,
+                                                     &done,
+                                                     elements,
+                                                     name,
+                                                     scratch1,
+                                                     scratch2);
+
+  // If probing finds an entry in the dictionary check that the value
+  // is a normal property that is not read only.
+  __ bind(&done);  // scratch2 == elements + 4 * index.
+  const int kElementsStartOffset = StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  const int kTypeAndReadOnlyMask
+      = (PropertyDetails::TypeField::mask() |
+         PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
+  __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+  __ And(at, scratch1, Operand(kTypeAndReadOnlyMask));
+  __ Branch(miss, ne, at, Operand(zero_reg));
+
+  // Store the value at the masked, scaled index and return.
+  const int kValueOffset = kElementsStartOffset + kPointerSize;
+  __ Addu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
+  __ sw(value, MemOperand(scratch2));
+
+  // Update the write barrier. Make sure not to clobber the value.
+  __ mov(scratch1, value);
+  __ RecordWrite(elements, scratch2, scratch1);
+}
+
+
+static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
+                                         Label* miss,
+                                         Register elements,
+                                         Register key,
+                                         Register result,
+                                         Register reg0,
+                                         Register reg1,
+                                         Register reg2) {
+  // Register use:
+  //
+  // elements - holds the slow-case elements of the receiver on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // key      - holds the smi key on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  //
+  // result   - holds the result on exit if the load succeeded.
+  //            Allowed to be the same as 'key' or 'result'.
+  //            Unchanged on bailout so 'key' or 'result' can be used
+  //            in further computation.
+  //
+  // Scratch registers:
+  //
+  // reg0 - holds the untagged key on entry and holds the hash once computed.
+  //
+  // reg1 - Used to hold the capacity mask of the dictionary.
+  //
+  // reg2 - Used for the index into the dictionary.
+  // at   - Temporary (avoid MacroAssembler instructions also using 'at').
+  Label done;
+
+  // Compute the hash code from the untagged key.  This must be kept in sync
+  // with ComputeIntegerHash in utils.h.
+  //
+  // hash = ~hash + (hash << 15);
+  __ nor(reg1, reg0, zero_reg);
+  __ sll(at, reg0, 15);
+  __ addu(reg0, reg1, at);
+
+  // hash = hash ^ (hash >> 12);
+  __ srl(at, reg0, 12);
+  __ xor_(reg0, reg0, at);
+
+  // hash = hash + (hash << 2);
+  __ sll(at, reg0, 2);
+  __ addu(reg0, reg0, at);
+
+  // hash = hash ^ (hash >> 4);
+  __ srl(at, reg0, 4);
+  __ xor_(reg0, reg0, at);
+
+  // hash = hash * 2057;
+  __ li(reg1, Operand(2057));
+  __ mul(reg0, reg0, reg1);
+
+  // hash = hash ^ (hash >> 16);
+  __ srl(at, reg0, 16);
+  __ xor_(reg0, reg0, at);
+
+  // Compute the capacity mask.
+  __ lw(reg1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
+  __ sra(reg1, reg1, kSmiTagSize);
+  __ Subu(reg1, reg1, Operand(1));
+
+  // Generate an unrolled loop that performs a few probes before giving up.
+  static const int kProbes = 4;
+  for (int i = 0; i < kProbes; i++) {
+    // Use reg2 for index calculations and keep the hash intact in reg0.
+    __ mov(reg2, reg0);
+    // Compute the masked index: (hash + i + i * i) & mask.
+    if (i > 0) {
+      __ Addu(reg2, reg2, Operand(NumberDictionary::GetProbeOffset(i)));
+    }
+    __ and_(reg2, reg2, reg1);
+
+    // Scale the index by multiplying by the element size.
+    ASSERT(NumberDictionary::kEntrySize == 3);
+    __ sll(at, reg2, 1);  // 2x.
+    __ addu(reg2, reg2, at);  // reg2 = reg2 * 3.
+
+    // Check if the key is identical to the name.
+    __ sll(at, reg2, kPointerSizeLog2);
+    __ addu(reg2, elements, at);
+
+    __ lw(at, FieldMemOperand(reg2, NumberDictionary::kElementsStartOffset));
+    if (i != kProbes - 1) {
+      __ Branch(&done, eq, key, Operand(at));
+    } else {
+      __ Branch(miss, ne, key, Operand(at));
+    }
+  }
+
+  __ bind(&done);
+  // Check that the value is a normal property.
+  // reg2: elements + (index * kPointerSize).
+  const int kDetailsOffset =
+      NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+  __ lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
+  __ And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
+  __ Branch(miss, ne, at, Operand(zero_reg));
+
+  // Get the value at the masked, scaled index and return.
+  const int kValueOffset =
+      NumberDictionary::kElementsStartOffset + kPointerSize;
+  __ lw(result, FieldMemOperand(reg2, kValueOffset));
+}
+
+
 void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  //  -- a0    : receiver
+  //  -- sp[0] : receiver
+  // -----------------------------------
+  Label miss;
+
+  StubCompiler::GenerateLoadArrayLength(masm, a0, a3, &miss);
+  __ bind(&miss);
+  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
 }
 
 
 void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- lr    : return address
+  //  -- a0    : receiver
+  //  -- sp[0] : receiver
+  // -----------------------------------
+  Label miss;
+
+  StubCompiler::GenerateLoadStringLength(masm, a0, a1, a3, &miss,
+                                         support_wrappers);
+  // Cache miss: Jump to runtime.
+  __ bind(&miss);
+  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
 }
 
 
 void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- lr    : return address
+  //  -- a0    : receiver
+  //  -- sp[0] : receiver
+  // -----------------------------------
+  Label miss;
+
+  StubCompiler::GenerateLoadFunctionPrototype(masm, a0, a1, a3, &miss);
+  __ bind(&miss);
+  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
+}
+
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+                                           Register receiver,
+                                           Register map,
+                                           Register scratch,
+                                           int interceptor_bit,
+                                           Label* slow) {
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver, slow);
+  // Get the map of the receiver.
+  __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  // Check bit field.
+  __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+  __ And(at, scratch, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
+  __ Branch(slow, ne, at, Operand(zero_reg));
+  // Check that the object is some kind of JS object EXCEPT JS Value type.
+  // In the case that the object is a value-wrapper object,
+  // we enter the runtime system to make sure that indexing into string
+  // objects work as intended.
+  ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+  __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
+}
+
+
+// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
+static void GenerateFastArrayLoad(MacroAssembler* masm,
+                                  Register receiver,
+                                  Register key,
+                                  Register elements,
+                                  Register scratch1,
+                                  Register scratch2,
+                                  Register result,
+                                  Label* not_fast_array,
+                                  Label* out_of_range) {
+  // Register use:
+  //
+  // receiver - holds the receiver on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // key      - holds the smi key on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // elements - holds the elements of the receiver on exit.
+  //
+  // result   - holds the result on exit if the load succeeded.
+  //            Allowed to be the the same as 'receiver' or 'key'.
+  //            Unchanged on bailout so 'receiver' and 'key' can be safely
+  //            used by further computation.
+  //
+  // Scratch registers:
+  //
+  // scratch1 - used to hold elements map and elements length.
+  //            Holds the elements map if not_fast_array branch is taken.
+  //
+  // scratch2 - used to hold the loaded value.
+
+  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  if (not_fast_array != NULL) {
+    // Check that the object is in fast mode (not dictionary).
+    __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+    __ Branch(not_fast_array, ne, scratch1, Operand(at));
+  } else {
+    __ AssertFastElements(elements);
+  }
+
+  // Check that the key (index) is within bounds.
+  __ lw(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ Branch(out_of_range, hs, key, Operand(scratch1));
+
+  // Fast case: Do the load.
+  __ Addu(scratch1, elements,
+          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  // The key is a smi.
+  ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
+  __ addu(at, at, scratch1);
+  __ lw(scratch2, MemOperand(at));
+
+  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+  // In case the loaded value is the_hole we have to consult GetProperty
+  // to ensure the prototype chain is searched.
+  __ Branch(out_of_range, eq, scratch2, Operand(at));
+  __ mov(result, scratch2);
+}
+
+
+// Checks whether a key is an array index string or a symbol string.
+// Falls through if a key is a symbol.
+static void GenerateKeyStringCheck(MacroAssembler* masm,
+                                   Register key,
+                                   Register map,
+                                   Register hash,
+                                   Label* index_string,
+                                   Label* not_symbol) {
+  // The key is not a smi.
+  // Is it a string?
+  __ GetObjectType(key, map, hash);
+  __ Branch(not_symbol, ge, hash, Operand(FIRST_NONSTRING_TYPE));
+
+  // Is the string an array index, with cached numeric value?
+  __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
+  __ And(at, hash, Operand(String::kContainsCachedArrayIndexMask));
+  __ Branch(index_string, eq, at, Operand(zero_reg));
+
+  // Is the string a symbol?
+  // map: key map
+  __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  ASSERT(kSymbolTag != 0);
+  __ And(at, hash, Operand(kIsSymbolMask));
+  __ Branch(not_symbol, eq, at, Operand(zero_reg));
 }
 
 
 // Defined in ic.cc.
 Object* CallIC_Miss(Arguments args);
 
+// The generated code does not accept smi keys.
+// The generated code falls through if both probes miss.
+static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+                                          int argc,
+                                          Code::Kind kind,
+                                          Code::ExtraICState extra_ic_state) {
+  // ----------- S t a t e -------------
+  //  -- a1    : receiver
+  //  -- a2    : name
+  // -----------------------------------
+  Label number, non_number, non_string, boolean, probe, miss;
 
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
-  UNIMPLEMENTED_MIPS();
+  // Probe the stub cache.
+  Code::Flags flags = Code::ComputeFlags(kind,
+                                         NOT_IN_LOOP,
+                                         MONOMORPHIC,
+                                         extra_ic_state,
+                                         NORMAL,
+                                         argc);
+  Isolate::Current()->stub_cache()->GenerateProbe(
+      masm, flags, a1, a2, a3, t0, t1);
+
+  // If the stub cache probing failed, the receiver might be a value.
+  // For value objects, we use the map of the prototype objects for
+  // the corresponding JSValue for the cache and that is what we need
+  // to probe.
+  //
+  // Check for number.
+  __ JumpIfSmi(a1, &number, t1);
+  __ GetObjectType(a1, a3, a3);
+  __ Branch(&non_number, ne, a3, Operand(HEAP_NUMBER_TYPE));
+  __ bind(&number);
+  StubCompiler::GenerateLoadGlobalFunctionPrototype(
+      masm, Context::NUMBER_FUNCTION_INDEX, a1);
+  __ Branch(&probe);
+
+  // Check for string.
+  __ bind(&non_number);
+  __ Branch(&non_string, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
+  StubCompiler::GenerateLoadGlobalFunctionPrototype(
+      masm, Context::STRING_FUNCTION_INDEX, a1);
+  __ Branch(&probe);
+
+  // Check for boolean.
+  __ bind(&non_string);
+  __ LoadRoot(t0, Heap::kTrueValueRootIndex);
+  __ Branch(&boolean, eq, a1, Operand(t0));
+  __ LoadRoot(t1, Heap::kFalseValueRootIndex);
+  __ Branch(&miss, ne, a1, Operand(t1));
+  __ bind(&boolean);
+  StubCompiler::GenerateLoadGlobalFunctionPrototype(
+      masm, Context::BOOLEAN_FUNCTION_INDEX, a1);
+
+  // Probe the stub cache for the value object.
+  __ bind(&probe);
+  Isolate::Current()->stub_cache()->GenerateProbe(
+      masm, flags, a1, a2, a3, t0, t1);
+
+  __ bind(&miss);
 }
 
 
-void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
-  UNIMPLEMENTED_MIPS();
+static void GenerateFunctionTailCall(MacroAssembler* masm,
+                                     int argc,
+                                     Label* miss,
+                                     Register scratch) {
+  // a1: function
+
+  // Check that the value isn't a smi.
+  __ JumpIfSmi(a1, miss);
+
+  // Check that the value is a JSFunction.
+  __ GetObjectType(a1, scratch, scratch);
+  __ Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
+
+  // Invoke the function.
+  ParameterCount actual(argc);
+  __ InvokeFunction(a1, actual, JUMP_FUNCTION);
+}
+
+
+static void GenerateCallNormal(MacroAssembler* masm, int argc) {
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  // Get the receiver of the function from the stack into a1.
+  __ lw(a1, MemOperand(sp, argc * kPointerSize));
+
+  GenerateStringDictionaryReceiverCheck(masm, a1, a0, a3, t0, &miss);
+
+  // a0: elements
+  // Search the dictionary - put result in register a1.
+  GenerateDictionaryLoad(masm, &miss, a0, a2, a1, a3, t0);
+
+  GenerateFunctionTailCall(masm, argc, &miss, t0);
+
+  // Cache miss: Jump to runtime.
+  __ bind(&miss);
+}
+
+
+static void GenerateCallMiss(MacroAssembler* masm,
+                             int argc,
+                             IC::UtilityId id,
+                             Code::ExtraICState extra_ic_state) {
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Isolate* isolate = masm->isolate();
+
+  if (id == IC::kCallIC_Miss) {
+    __ IncrementCounter(isolate->counters()->call_miss(), 1, a3, t0);
+  } else {
+    __ IncrementCounter(isolate->counters()->keyed_call_miss(), 1, a3, t0);
+  }
+
+  // Get the receiver of the function from the stack.
+  __ lw(a3, MemOperand(sp, argc*kPointerSize));
+
+  __ EnterInternalFrame();
+
+  // Push the receiver and the name of the function.
+  __ Push(a3, a2);
+
+  // Call the entry.
+  __ li(a0, Operand(2));
+  __ li(a1, Operand(ExternalReference(IC_Utility(id), isolate)));
+
+  CEntryStub stub(1);
+  __ CallStub(&stub);
+
+  // Move result to a1 and leave the internal frame.
+  __ mov(a1, v0);
+  __ LeaveInternalFrame();
+
+  // Check if the receiver is a global object of some sort.
+  // This can happen only for regular CallIC but not KeyedCallIC.
+  if (id == IC::kCallIC_Miss) {
+    Label invoke, global;
+    __ lw(a2, MemOperand(sp, argc * kPointerSize));
+    __ andi(t0, a2, kSmiTagMask);
+    __ Branch(&invoke, eq, t0, Operand(zero_reg));
+    __ GetObjectType(a2, a3, a3);
+    __ Branch(&global, eq, a3, Operand(JS_GLOBAL_OBJECT_TYPE));
+    __ Branch(&invoke, ne, a3, Operand(JS_BUILTINS_OBJECT_TYPE));
+
+    // Patch the receiver on the stack.
+    __ bind(&global);
+    __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
+    __ sw(a2, MemOperand(sp, argc * kPointerSize));
+    __ bind(&invoke);
+  }
+  // Invoke the function.
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  ParameterCount actual(argc);
+  __ InvokeFunction(a1,
+                    actual,
+                    JUMP_FUNCTION,
+                    NullCallWrapper(),
+                    call_kind);
+}
+
+
+void CallIC::GenerateMiss(MacroAssembler* masm,
+                          int argc,
+                          Code::ExtraICState extra_ic_state) {
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
+}
+
+
+void CallIC::GenerateMegamorphic(MacroAssembler* masm,
+                                 int argc,
+                                 Code::ExtraICState extra_ic_state) {
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  // Get the receiver of the function from the stack into a1.
+  __ lw(a1, MemOperand(sp, argc * kPointerSize));
+  GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
+  GenerateMiss(masm, argc, extra_ic_state);
 }
 
 
 void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  GenerateCallNormal(masm, argc);
+  GenerateMiss(masm, argc, Code::kNoExtraICState);
 }
 
 
 void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
 }
 
 
 void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  // Get the receiver of the function from the stack into a1.
+  __ lw(a1, MemOperand(sp, argc * kPointerSize));
+
+  Label do_call, slow_call, slow_load, slow_reload_receiver;
+  Label check_number_dictionary, check_string, lookup_monomorphic_cache;
+  Label index_smi, index_string;
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(a2, &check_string);
+  __ bind(&index_smi);
+  // Now the key is known to be a smi. This place is also jumped to from below
+  // where a numeric string is converted to a smi.
+
+  GenerateKeyedLoadReceiverCheck(
+      masm, a1, a0, a3, Map::kHasIndexedInterceptor, &slow_call);
+
+  GenerateFastArrayLoad(
+      masm, a1, a2, t0, a3, a0, a1, &check_number_dictionary, &slow_load);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1, a0, a3);
+
+  __ bind(&do_call);
+  // receiver in a1 is not used after this point.
+  // a2: key
+  // a1: function
+
+  GenerateFunctionTailCall(masm, argc, &slow_call, a0);
+
+  __ bind(&check_number_dictionary);
+  // a2: key
+  // a3: elements map
+  // t0: elements pointer
+  // Check whether the elements is a number dictionary.
+  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+  __ Branch(&slow_load, ne, a3, Operand(at));
+  __ sra(a0, a2, kSmiTagSize);
+  // a0: untagged index
+  GenerateNumberDictionaryLoad(masm, &slow_load, t0, a2, a1, a0, a3, t1);
+  __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, a0, a3);
+  __ jmp(&do_call);
+
+  __ bind(&slow_load);
+  // This branch is taken when calling KeyedCallIC_Miss is neither required
+  // nor beneficial.
+  __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, a0, a3);
+  __ EnterInternalFrame();
+  __ push(a2);  // Save the key.
+  __ Push(a1, a2);  // Pass the receiver and the key.
+  __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+  __ pop(a2);  // Restore the key.
+  __ LeaveInternalFrame();
+  __ mov(a1, v0);
+  __ jmp(&do_call);
+
+  __ bind(&check_string);
+  GenerateKeyStringCheck(masm, a2, a0, a3, &index_string, &slow_call);
+
+  // The key is known to be a symbol.
+  // If the receiver is a regular JS object with slow properties then do
+  // a quick inline probe of the receiver's dictionary.
+  // Otherwise do the monomorphic cache probe.
+  GenerateKeyedLoadReceiverCheck(
+      masm, a1, a0, a3, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
+
+  __ lw(a0, FieldMemOperand(a1, JSObject::kPropertiesOffset));
+  __ lw(a3, FieldMemOperand(a0, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+  __ Branch(&lookup_monomorphic_cache, ne, a3, Operand(at));
+
+  GenerateDictionaryLoad(masm, &slow_load, a0, a2, a1, a3, t0);
+  __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1, a0, a3);
+  __ jmp(&do_call);
+
+  __ bind(&lookup_monomorphic_cache);
+  __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1, a0, a3);
+  GenerateMonomorphicCacheProbe(masm,
+                                argc,
+                                Code::KEYED_CALL_IC,
+                                Code::kNoExtraICState);
+  // Fall through on miss.
+
+  __ bind(&slow_call);
+  // This branch is taken if:
+  // - the receiver requires boxing or access check,
+  // - the key is neither smi nor symbol,
+  // - the value loaded is not a function,
+  // - there is hope that the runtime will create a monomorphic call stub,
+  //   that will get fetched next time.
+  __ IncrementCounter(counters->keyed_call_generic_slow(), 1, a0, a3);
+  GenerateMiss(masm, argc);
+
+  __ bind(&index_string);
+  __ IndexFromHash(a3, a2);
+  // Now jump to the place where smi keys are handled.
+  __ jmp(&index_smi);
 }
 
 
 void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  // Check if the name is a string.
+  Label miss;
+  __ JumpIfSmi(a2, &miss);
+  __ IsObjectJSStringType(a2, a0, &miss);
+
+  GenerateCallNormal(masm, argc);
+  __ bind(&miss);
+  GenerateMiss(masm, argc);
 }
 
 
@@ -101,122 +832,626 @@
 Object* LoadIC_Miss(Arguments args);
 
 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  //  -- a0    : receiver
+  //  -- sp[0] : receiver
+  // -----------------------------------
+
+  // Probe the stub cache.
+  Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
+                                         NOT_IN_LOOP,
+                                         MONOMORPHIC);
+  Isolate::Current()->stub_cache()->GenerateProbe(
+      masm, flags, a0, a2, a3, t0, t1);
+
+  // Cache miss: Jump to runtime.
+  GenerateMiss(masm);
 }
 
 
 void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- lr    : return address
+  //  -- a0    : receiver
+  //  -- sp[0] : receiver
+  // -----------------------------------
+  Label miss;
+
+  GenerateStringDictionaryReceiverCheck(masm, a0, a1, a3, t0, &miss);
+
+  // a1: elements
+  GenerateDictionaryLoad(masm, &miss, a1, a2, v0, a3, t0);
+  __ Ret();
+
+  // Cache miss: Jump to runtime.
+  __ bind(&miss);
+  GenerateMiss(masm);
 }
 
 
 void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  //  -- a0    : receiver
+  //  -- sp[0] : receiver
+  // -----------------------------------
+  Isolate* isolate = masm->isolate();
+
+  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
+
+  __ mov(a3, a0);
+  __ Push(a3, a2);
+
+  // Perform tail call to the entry.
+  ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
+  __ TailCallExternalReference(ref, 2, 1);
 }
 
 
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
-  UNIMPLEMENTED_MIPS();
-  return false;
-}
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+  Isolate* isolate = masm->isolate();
 
+  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
 
-bool LoadIC::PatchInlinedContextualLoad(Address address,
-                                        Object* map,
-                                        Object* cell,
-                                        bool is_dont_delete) {
-  UNIMPLEMENTED_MIPS();
-  return false;
-}
+  __ Push(a1, a0);
 
+  // Perform tail call to the entry.
+  ExternalReference ref = force_generic
+      ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate)
+      : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
 
-bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
-  UNIMPLEMENTED_MIPS();
-  return false;
-}
-
-
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
-  UNIMPLEMENTED_MIPS();
-  return false;
-}
-
-
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
-  UNIMPLEMENTED_MIPS();
-  return false;
-}
-
-
-Object* KeyedLoadIC_Miss(Arguments args);
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  __ TailCallExternalReference(ref, 2, 1);
 }
 
 
 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+
+  __ Push(a1, a0);
+
+  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
 }
 
 
 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+  Label slow, check_string, index_smi, index_string, property_array_property;
+  Label probe_dictionary, check_number_dictionary;
+
+  Register key = a0;
+  Register receiver = a1;
+
+  Isolate* isolate = masm->isolate();
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &check_string);
+  __ bind(&index_smi);
+  // Now the key is known to be a smi. This place is also jumped to from below
+  // where a numeric string is converted to a smi.
+
+  GenerateKeyedLoadReceiverCheck(
+      masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
+
+  // Check the "has fast elements" bit in the receiver's map which is
+  // now in a2.
+  __ lbu(a3, FieldMemOperand(a2, Map::kBitField2Offset));
+  __ And(at, a3, Operand(1 << Map::kHasFastElements));
+  __ Branch(&check_number_dictionary, eq, at, Operand(zero_reg));
+
+  GenerateFastArrayLoad(
+      masm, receiver, key, t0, a3, a2, v0, NULL, &slow);
+
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, a2, a3);
+  __ Ret();
+
+  __ bind(&check_number_dictionary);
+  __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ lw(a3, FieldMemOperand(t0, JSObject::kMapOffset));
+
+  // Check whether the elements is a number dictionary.
+  // a0: key
+  // a3: elements map
+  // t0: elements
+  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+  __ Branch(&slow, ne, a3, Operand(at));
+  __ sra(a2, a0, kSmiTagSize);
+  GenerateNumberDictionaryLoad(masm, &slow, t0, a0, v0, a2, a3, t1);
+  __ Ret();
+
+  // Slow case, key and receiver still in a0 and a1.
+  __ bind(&slow);
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
+                      1,
+                      a2,
+                      a3);
+  GenerateRuntimeGetProperty(masm);
+
+  __ bind(&check_string);
+  GenerateKeyStringCheck(masm, key, a2, a3, &index_string, &slow);
+
+  GenerateKeyedLoadReceiverCheck(
+       masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
+
+
+  // If the receiver is a fast-case object, check the keyed lookup
+  // cache. Otherwise probe the dictionary.
+  __ lw(a3, FieldMemOperand(a1, JSObject::kPropertiesOffset));
+  __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+  __ Branch(&probe_dictionary, eq, t0, Operand(at));
+
+  // Load the map of the receiver, compute the keyed lookup cache hash
+  // based on 32 bits of the map pointer and the string hash.
+  __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ sra(a3, a2, KeyedLookupCache::kMapHashShift);
+  __ lw(t0, FieldMemOperand(a0, String::kHashFieldOffset));
+  __ sra(at, t0, String::kHashShift);
+  __ xor_(a3, a3, at);
+  __ And(a3, a3, Operand(KeyedLookupCache::kCapacityMask));
+
+  // Load the key (consisting of map and symbol) from the cache and
+  // check for match.
+  ExternalReference cache_keys =
+      ExternalReference::keyed_lookup_cache_keys(isolate);
+  __ li(t0, Operand(cache_keys));
+  __ sll(at, a3, kPointerSizeLog2 + 1);
+  __ addu(t0, t0, at);
+  __ lw(t1, MemOperand(t0));  // Move t0 to symbol.
+  __ Addu(t0, t0, Operand(kPointerSize));
+  __ Branch(&slow, ne, a2, Operand(t1));
+  __ lw(t1, MemOperand(t0));
+  __ Branch(&slow, ne, a0, Operand(t1));
+
+  // Get field offset.
+  // a0     : key
+  // a1     : receiver
+  // a2     : receiver's map
+  // a3     : lookup cache index
+  ExternalReference cache_field_offsets =
+      ExternalReference::keyed_lookup_cache_field_offsets(isolate);
+  __ li(t0, Operand(cache_field_offsets));
+  __ sll(at, a3, kPointerSizeLog2);
+  __ addu(at, t0, at);
+  __ lw(t1, MemOperand(at));
+  __ lbu(t2, FieldMemOperand(a2, Map::kInObjectPropertiesOffset));
+  __ Subu(t1, t1, t2);
+  __ Branch(&property_array_property, ge, t1, Operand(zero_reg));
+
+  // Load in-object property.
+  __ lbu(t2, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+  __ addu(t2, t2, t1);  // Index from start of object.
+  __ Subu(a1, a1, Operand(kHeapObjectTag));  // Remove the heap tag.
+  __ sll(at, t2, kPointerSizeLog2);
+  __ addu(at, a1, at);
+  __ lw(v0, MemOperand(at));
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
+                      1,
+                      a2,
+                      a3);
+  __ Ret();
+
+  // Load property array property.
+  __ bind(&property_array_property);
+  __ lw(a1, FieldMemOperand(a1, JSObject::kPropertiesOffset));
+  __ Addu(a1, a1, FixedArray::kHeaderSize - kHeapObjectTag);
+  __ sll(t0, t1, kPointerSizeLog2);
+  __ Addu(t0, t0, a1);
+  __ lw(v0, MemOperand(t0));
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
+                      1,
+                      a2,
+                      a3);
+  __ Ret();
+
+
+  // Do a quick inline probe of the receiver's dictionary, if it
+  // exists.
+  __ bind(&probe_dictionary);
+  // a1: receiver
+  // a0: key
+  // a3: elements
+  __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ lbu(a2, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+  GenerateGlobalInstanceTypeCheck(masm, a2, &slow);
+  // Load the property to v0.
+  GenerateDictionaryLoad(masm, &slow, a3, a0, v0, a2, t0);
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
+                      1,
+                      a2,
+                      a3);
+  __ Ret();
+
+  __ bind(&index_string);
+  __ IndexFromHash(a3, key);
+  // Now jump to the place where smi keys are handled.
+  __ Branch(&index_smi);
 }
 
 
 void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key (index)
+  //  -- a1     : receiver
+  // -----------------------------------
+  Label miss;
+
+  Register receiver = a1;
+  Register index = a0;
+  Register scratch1 = a2;
+  Register scratch2 = a3;
+  Register result = v0;
+
+  StringCharAtGenerator char_at_generator(receiver,
+                                          index,
+                                          scratch1,
+                                          scratch2,
+                                          result,
+                                          &miss,  // When not a string.
+                                          &miss,  // When not a number.
+                                          &miss,  // When index out of range.
+                                          STRING_INDEX_IS_ARRAY_INDEX);
+  char_at_generator.GenerateFast(masm);
+  __ Ret();
+
+  StubRuntimeCallHelper call_helper;
+  char_at_generator.GenerateSlow(masm, call_helper);
+
+  __ bind(&miss);
+  GenerateMiss(masm, false);
 }
 
 
 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
                                               StrictModeFlag strict_mode) {
-  UNIMPLEMENTED_MIPS();
+  // ---------- S t a t e --------------
+  //  -- a0     : value
+  //  -- a1     : key
+  //  -- a2     : receiver
+  //  -- ra     : return address
+  // -----------------------------------
+
+  // Push receiver, key and value for runtime call.
+  __ Push(a2, a1, a0);
+  __ li(a1, Operand(Smi::FromInt(NONE)));          // PropertyAttributes.
+  __ li(a0, Operand(Smi::FromInt(strict_mode)));   // Strict mode.
+  __ Push(a1, a0);
+
+  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
 }
 
 
 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
                                    StrictModeFlag strict_mode) {
-  UNIMPLEMENTED_MIPS();
+  // ---------- S t a t e --------------
+  //  -- a0     : value
+  //  -- a1     : key
+  //  -- a2     : receiver
+  //  -- ra     : return address
+  // -----------------------------------
+
+  Label slow, fast, array, extra, exit;
+
+  // Register usage.
+  Register value = a0;
+  Register key = a1;
+  Register receiver = a2;
+  Register elements = a3;  // Elements array of the receiver.
+  // t0 is used as ip in the arm version.
+  // t3-t4 are used as temporaries.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &slow);
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver, &slow);
+
+  // Get the map of the object.
+  __ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  // Check that the receiver does not require access checks.  We need
+  // to do this because this generic stub does not perform map checks.
+  __ lbu(t0, FieldMemOperand(t3, Map::kBitFieldOffset));
+  __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded));
+  __ Branch(&slow, ne, t0, Operand(zero_reg));
+  // Check if the object is a JS array or not.
+  __ lbu(t3, FieldMemOperand(t3, Map::kInstanceTypeOffset));
+
+  __ Branch(&array, eq, t3, Operand(JS_ARRAY_TYPE));
+  // Check that the object is some kind of JS object.
+  __ Branch(&slow, lt, t3, Operand(FIRST_JS_OBJECT_TYPE));
+
+  // Object case: Check key against length in the elements array.
+  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  // Check that the object is in fast mode and writable.
+  __ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ LoadRoot(t0, Heap::kFixedArrayMapRootIndex);
+  __ Branch(&slow, ne, t3, Operand(t0));
+  // Check array bounds. Both the key and the length of FixedArray are smis.
+  __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ Branch(&fast, lo, key, Operand(t0));
+  // Fall thru to slow if un-tagged index >= length.
+
+  // Slow case, handle jump to runtime.
+  __ bind(&slow);
+
+  // Entry registers are intact.
+  // a0: value.
+  // a1: key.
+  // a2: receiver.
+
+  GenerateRuntimeSetProperty(masm, strict_mode);
+
+  // Extra capacity case: Check if there is extra capacity to
+  // perform the store and update the length. Used for adding one
+  // element to the array by writing to array[array.length].
+
+  __ bind(&extra);
+  // Only support writing to array[array.length].
+  __ Branch(&slow, ne, key, Operand(t0));
+  // Check for room in the elements backing store.
+  // Both the key and the length of FixedArray are smis.
+  __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ Branch(&slow, hs, key, Operand(t0));
+  // Calculate key + 1 as smi.
+  ASSERT_EQ(0, kSmiTag);
+  __ Addu(t3, key, Operand(Smi::FromInt(1)));
+  __ sw(t3, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ Branch(&fast);
+
+
+  // Array case: Get the length and the elements array from the JS
+  // array. Check that the array is in fast mode (and writable); if it
+  // is the length is always a smi.
+
+  __ bind(&array);
+  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ LoadRoot(t0, Heap::kFixedArrayMapRootIndex);
+  __ Branch(&slow, ne, t3, Operand(t0));
+
+  // Check the key against the length in the array.
+  __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ Branch(&extra, hs, key, Operand(t0));
+  // Fall through to fast case.
+
+  __ bind(&fast);
+  // Fast case, store the value to the elements backing store.
+  __ Addu(t4, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(t1, key, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(t4, t4, Operand(t1));
+  __ sw(value, MemOperand(t4));
+  // Skip write barrier if the written value is a smi.
+  __ JumpIfSmi(value, &exit);
+
+  // Update write barrier for the elements array address.
+  __ Subu(t3, t4, Operand(elements));
+
+  __ RecordWrite(elements, Operand(t3), t4, t5);
+  __ bind(&exit);
+
+  __ mov(v0, a0);  // Return the value written.
+  __ Ret();
 }
 
 
 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+  Label slow;
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(a1, &slow);
+
+  // Check that the key is an array index, that is Uint32.
+  __ And(t0, a0, Operand(kSmiTagMask | kSmiSignMask));
+  __ Branch(&slow, ne, t0, Operand(zero_reg));
+
+  // Get the map of the receiver.
+  __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+
+  // Check that it has indexed interceptor and access checks
+  // are not enabled for this object.
+  __ lbu(a3, FieldMemOperand(a2, Map::kBitFieldOffset));
+  __ And(a3, a3, Operand(kSlowCaseBitFieldMask));
+  __ Branch(&slow, ne, a3, Operand(1 << Map::kHasIndexedInterceptor));
+  // Everything is fine, call runtime.
+  __ Push(a1, a0);  // Receiver, key.
+
+  // Perform tail call to the entry.
+  __ TailCallExternalReference(ExternalReference(
+       IC_Utility(kKeyedLoadPropertyWithInterceptor), masm->isolate()), 2, 1);
+
+  __ bind(&slow);
+  GenerateMiss(masm, false);
 }
 
 
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+  // ---------- S t a t e --------------
+  //  -- a0     : value
+  //  -- a1     : key
+  //  -- a2     : receiver
+  //  -- ra     : return address
+  // -----------------------------------
+
+  // Push receiver, key and value for runtime call.
+  __ Push(a2, a1, a0);
+
+  ExternalReference ref = force_generic
+      ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
+                          masm->isolate())
+      : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
+  // ---------- S t a t e --------------
+  //  -- a0     : value
+  //  -- a1     : key
+  //  -- a2     : receiver
+  //  -- ra     : return address
+  // -----------------------------------
+
+  // Push receiver, key and value for runtime call.
+  // We can't use MultiPush as the order of the registers is important.
+  __ Push(a2, a1, a0);
+
+  // The slow case calls into the runtime to complete the store without causing
+  // an IC miss that would otherwise cause a transition to the generic stub.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+
+  __ TailCallExternalReference(ref, 3, 1);
 }
 
 
 void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
                                   StrictModeFlag strict_mode) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  // Get the receiver from the stack and probe the stub cache.
+  Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
+                                         NOT_IN_LOOP,
+                                         MONOMORPHIC,
+                                         strict_mode);
+  Isolate::Current()->stub_cache()->GenerateProbe(
+      masm, flags, a1, a2, a3, t0, t1);
+
+  // Cache miss: Jump to runtime.
+  GenerateMiss(masm);
 }
 
 
 void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  __ Push(a1, a2, a0);
+  // Perform tail call to the entry.
+  ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss),
+                                            masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
 }
 
 
 void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  //
+  // This accepts as a receiver anything JSObject::SetElementsLength accepts
+  // (currently anything except for external and pixel arrays which means
+  // anything with elements of FixedArray type.), but currently is restricted
+  // to JSArray.
+  // Value must be a number, but only smis are accepted as the most common case.
+
+  Label miss;
+
+  Register receiver = a1;
+  Register value = a0;
+  Register scratch = a3;
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, &miss);
+
+  // Check that the object is a JS array.
+  __ GetObjectType(receiver, scratch, scratch);
+  __ Branch(&miss, ne, scratch, Operand(JS_ARRAY_TYPE));
+
+  // Check that elements are FixedArray.
+  // We rely on StoreIC_ArrayLength below to deal with all types of
+  // fast elements (including COW).
+  __ lw(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
+  __ GetObjectType(scratch, scratch, scratch);
+  __ Branch(&miss, ne, scratch, Operand(FIXED_ARRAY_TYPE));
+
+  // Check that value is a smi.
+  __ JumpIfNotSmi(value, &miss);
+
+  // Prepare tail call to StoreIC_ArrayLength.
+  __ Push(receiver, value);
+
+  ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength),
+                                            masm->isolate());
+  __ TailCallExternalReference(ref, 2, 1);
+
+  __ bind(&miss);
+
+  GenerateMiss(masm);
 }
 
 
 void StoreIC::GenerateNormal(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  GenerateStringDictionaryReceiverCheck(masm, a1, a3, t0, t1, &miss);
+
+  GenerateDictionaryStore(masm, &miss, a3, a2, a0, t0, t1);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->store_normal_hit(), 1, t0, t1);
+  __ Ret();
+
+  __ bind(&miss);
+  __ IncrementCounter(counters->store_normal_miss(), 1, t0, t1);
+  GenerateMiss(masm);
 }
 
 
 void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
                                   StrictModeFlag strict_mode) {
-  UNIMPLEMENTED_MIPS();
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  __ Push(a1, a2, a0);
+
+  __ li(a1, Operand(Smi::FromInt(NONE)));  // PropertyAttributes.
+  __ li(a0, Operand(Smi::FromInt(strict_mode)));
+  __ Push(a1, a0);
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
 }
 
 
@@ -224,18 +1459,119 @@
 
 
 Condition CompareIC::ComputeCondition(Token::Value op) {
-  UNIMPLEMENTED_MIPS();
-  return kNoCondition;
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return eq;
+    case Token::LT:
+      return lt;
+    case Token::GT:
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return lt;
+    case Token::LTE:
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return ge;
+    case Token::GTE:
+      return ge;
+    default:
+      UNREACHABLE();
+      return kNoCondition;
+  }
 }
 
 
 void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
-  UNIMPLEMENTED_MIPS();
+  HandleScope scope;
+  Handle<Code> rewritten;
+  State previous_state = GetState();
+  State state = TargetState(previous_state, false, x, y);
+  if (state == GENERIC) {
+    CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
+    rewritten = stub.GetCode();
+  } else {
+    ICCompareStub stub(op_, state);
+    rewritten = stub.GetCode();
+  }
+  set_target(*rewritten);
+
+#ifdef DEBUG
+  if (FLAG_trace_ic) {
+    PrintF("[CompareIC (%s->%s)#%s]\n",
+           GetStateName(previous_state),
+           GetStateName(state),
+           Token::Name(op_));
+  }
+#endif
+
+  // Activate inlined smi code.
+  if (previous_state == UNINITIALIZED) {
+    PatchInlinedSmiCode(address());
+  }
 }
 
 
 void PatchInlinedSmiCode(Address address) {
-  // Currently there is no smi inlining in the MIPS full code generator.
+  Address andi_instruction_address =
+      address + Assembler::kCallTargetAddressOffset;
+
+  // If the instruction following the call is not a andi at, rx, #yyy, nothing
+  // was inlined.
+  Instr instr = Assembler::instr_at(andi_instruction_address);
+  if (!Assembler::IsAndImmediate(instr)) {
+    return;
+  }
+
+  // The delta to the start of the map check instruction and the
+  // condition code uses at the patched jump.
+  int delta = Assembler::GetImmediate16(instr);
+  delta += Assembler::GetRs(instr) * kImm16Mask;
+  // If the delta is 0 the instruction is andi at, zero_reg, #0 which also
+  // signals that nothing was inlined.
+  if (delta == 0) {
+    return;
+  }
+
+#ifdef DEBUG
+  if (FLAG_trace_ic) {
+    PrintF("[  patching ic at %p, andi=%p, delta=%d\n",
+           address, andi_instruction_address, delta);
+  }
+#endif
+
+  Address patch_address =
+      andi_instruction_address - delta * Instruction::kInstrSize;
+  Instr instr_at_patch = Assembler::instr_at(patch_address);
+  Instr branch_instr =
+      Assembler::instr_at(patch_address + Instruction::kInstrSize);
+  ASSERT(Assembler::IsAndImmediate(instr_at_patch));
+  ASSERT_EQ(0, Assembler::GetImmediate16(instr_at_patch));
+  ASSERT(Assembler::IsBranch(branch_instr));
+  if (Assembler::IsBeq(branch_instr)) {
+    // This is patching a "jump if not smi" site to be active.
+    // Changing:
+    //   andi at, rx, 0
+    //   Branch <target>, eq, at, Operand(zero_reg)
+    // to:
+    //   andi at, rx, #kSmiTagMask
+    //   Branch <target>, ne, at, Operand(zero_reg)
+    CodePatcher patcher(patch_address, 2);
+    Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
+    patcher.masm()->andi(at, reg, kSmiTagMask);
+    patcher.ChangeBranchCondition(ne);
+  } else {
+    ASSERT(Assembler::IsBne(branch_instr));
+    // This is patching a "jump if smi" site to be active.
+    // Changing:
+    //   andi at, rx, 0
+    //   Branch <target>, ne, at, Operand(zero_reg)
+    // to:
+    //   andi at, rx, #kSmiTagMask
+    //   Branch <target>, eq, at, Operand(zero_reg)
+    CodePatcher patcher(patch_address, 2);
+    Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
+    patcher.masm()->andi(at, reg, kSmiTagMask);
+    patcher.ChangeBranchCondition(eq);
+  }
 }
 
 
diff --git a/src/mips/jump-target-mips.cc b/src/mips/jump-target-mips.cc
deleted file mode 100644
index bd6d60b..0000000
--- a/src/mips/jump-target-mips.cc
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// JumpTarget implementation.
-
-#define __ ACCESS_MASM(cgen()->masm())
-
-// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
-#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT(                                \
-    (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) ||          \
-    (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
-
-
-void JumpTarget::DoJump() {
-  UNIMPLEMENTED_MIPS();
-}
-
-// Original prototype for mips, needs arch-indep change. Leave out for now.
-// void JumpTarget::DoBranch(Condition cc, Hint ignored,
-//     Register src1, const Operand& src2) {
-void JumpTarget::DoBranch(Condition cc, Hint ignored) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void JumpTarget::Call() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void JumpTarget::DoBind() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-#undef __
-#undef BRANCH_ARGS_CHECK
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/lithium-codegen-mips.h b/src/mips/lithium-codegen-mips.h
index 345d912..2aec684 100644
--- a/src/mips/lithium-codegen-mips.h
+++ b/src/mips/lithium-codegen-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index e11dfab..ebc1e43 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -78,7 +78,7 @@
 
   bool HasEnvironment() const {
     UNIMPLEMENTED();
-    return NULL;
+    return false;
   }
 
   virtual void PrintTo(StringStream* stream) const { UNIMPLEMENTED(); }
@@ -213,15 +213,13 @@
 
 class LChunk: public ZoneObject {
  public:
-  explicit LChunk(CompilationInfo* info, HGraph* graph) { }
+  explicit LChunk(HGraph* graph) { }
 
   HGraph* graph() const {
     UNIMPLEMENTED();
     return NULL;
   }
 
-  CompilationInfo* info() const { return NULL; }
-
   const ZoneList<LPointerMap*>* pointer_maps() const {
     UNIMPLEMENTED();
     return NULL;
@@ -271,6 +269,11 @@
 
   void MarkEmptyBlocks() { UNIMPLEMENTED(); }
 
+  CompilationInfo* info() const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
 #ifdef DEBUG
   void Verify() { UNIMPLEMENTED(); }
 #endif
@@ -279,7 +282,7 @@
 
 class LChunkBuilder BASE_EMBEDDED {
  public:
-  LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator) { }
+  LChunkBuilder(CompilationInfo*&, HGraph* graph, LAllocator* allocator) { }
 
   // Build the sequence for the graph.
   LChunk* Build() {
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index bd4ab48..8b342a2 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -25,29 +25,32 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include <limits.h>  // For LONG_MIN, LONG_MAX
+#include <limits.h>  // For LONG_MIN, LONG_MAX.
 
 #include "v8.h"
 
 #if defined(V8_TARGET_ARCH_MIPS)
 
 #include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "debug.h"
 #include "runtime.h"
 
 namespace v8 {
 namespace internal {
 
-MacroAssembler::MacroAssembler(void* buffer, int size)
-    : Assembler(buffer, size),
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+    : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
-      allow_stub_calls_(true),
-      code_object_(HEAP->undefined_value()) {
+      allow_stub_calls_(true) {
+  if (isolate() != NULL) {
+    code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+                                  isolate());
+  }
 }
 
 
-// Arguments macros
+// Arguments macros.
 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
 #define COND_ARGS cond, r1, r2
 
@@ -161,7 +164,7 @@
 void MacroAssembler::RecordWriteHelper(Register object,
                                        Register address,
                                        Register scratch) {
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     // Check that the object is not in new space.
     Label not_in_new_space;
     InNewSpace(object, scratch, ne, &not_in_new_space);
@@ -190,6 +193,77 @@
   sw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
 }
 
+// Push and pop all registers that can hold pointers.
+void MacroAssembler::PushSafepointRegisters() {
+  // Safepoints expect a block of kNumSafepointRegisters values on the
+  // stack, so adjust the stack for unsaved registers.
+  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+  ASSERT(num_unsaved >= 0);
+  Subu(sp, sp, Operand(num_unsaved * kPointerSize));
+  MultiPush(kSafepointSavedRegisters);
+}
+
+void MacroAssembler::PopSafepointRegisters() {
+  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+  MultiPop(kSafepointSavedRegisters);
+  Addu(sp, sp, Operand(num_unsaved * kPointerSize));
+}
+
+void MacroAssembler::PushSafepointRegistersAndDoubles() {
+  PushSafepointRegisters();
+  Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
+  for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
+    FPURegister reg = FPURegister::FromAllocationIndex(i);
+    sdc1(reg, MemOperand(sp, i * kDoubleSize));
+  }
+}
+
+void MacroAssembler::PopSafepointRegistersAndDoubles() {
+  for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
+    FPURegister reg = FPURegister::FromAllocationIndex(i);
+    ldc1(reg, MemOperand(sp, i * kDoubleSize));
+  }
+  Addu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
+  PopSafepointRegisters();
+}
+
+void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
+                                                             Register dst) {
+  sw(src, SafepointRegistersAndDoublesSlot(dst));
+}
+
+
+void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
+  sw(src, SafepointRegisterSlot(dst));
+}
+
+
+void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
+  lw(dst, SafepointRegisterSlot(src));
+}
+
+
+int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
+  // The registers are pushed starting with the highest encoding,
+  // which means that lowest encodings are closest to the stack pointer.
+  return kSafepointRegisterStackIndexMap[reg_code];
+}
+
+
+MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
+  return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
+}
+
+
+MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
+  // General purpose registers are pushed last on the stack.
+  int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
+  int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
+  return MemOperand(sp, doubles_size + register_offset);
+}
+
+
+
 
 void MacroAssembler::InNewSpace(Register object,
                                 Register scratch,
@@ -230,7 +304,7 @@
 
   // Clobber all input registers when running with the debug-code flag
   // turned on to provoke errors.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     li(object, Operand(BitCast<int32_t>(kZapValue)));
     li(scratch0, Operand(BitCast<int32_t>(kZapValue)));
     li(scratch1, Operand(BitCast<int32_t>(kZapValue)));
@@ -262,7 +336,7 @@
 
   // Clobber all input registers when running with the debug-code flag
   // turned on to provoke errors.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     li(object, Operand(BitCast<int32_t>(kZapValue)));
     li(address, Operand(BitCast<int32_t>(kZapValue)));
     li(scratch, Operand(BitCast<int32_t>(kZapValue)));
@@ -271,7 +345,7 @@
 
 
 // -----------------------------------------------------------------------------
-// Allocation support
+// Allocation support.
 
 
 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
@@ -297,15 +371,15 @@
   lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
 
   // Check the context is a global context.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
-    Push(holder_reg);  // Temporarily save holder on the stack.
+    push(holder_reg);  // Temporarily save holder on the stack.
     // Read the first word and compare to the global_context_map.
     lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
     LoadRoot(at, Heap::kGlobalContextMapRootIndex);
     Check(eq, "JSGlobalObject::global_context should be a global context.",
           holder_reg, Operand(at));
-    Pop(holder_reg);  // Restore holder.
+    pop(holder_reg);  // Restore holder.
   }
 
   // Check if both contexts are the same.
@@ -313,9 +387,9 @@
   Branch(&same_contexts, eq, scratch, Operand(at));
 
   // Check the context is a global context.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
-    Push(holder_reg);  // Temporarily save holder on the stack.
+    push(holder_reg);  // Temporarily save holder on the stack.
     mov(holder_reg, at);  // Move at to its holding place.
     LoadRoot(at, Heap::kNullValueRootIndex);
     Check(ne, "JSGlobalProxy::context() should not be null.",
@@ -326,7 +400,7 @@
     Check(eq, "JSGlobalObject::global_context should be a global context.",
           holder_reg, Operand(at));
     // Restore at is not needed. at is reloaded below.
-    Pop(holder_reg);  // Restore holder.
+    pop(holder_reg);  // Restore holder.
     // Restore at to holder's context.
     lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
   }
@@ -346,7 +420,7 @@
 
 
 // ---------------------------------------------------------------------------
-// Instruction macros
+// Instruction macros.
 
 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
   if (rt.is_reg()) {
@@ -500,6 +574,15 @@
 }
 
 
+void MacroAssembler::Neg(Register rs, const Operand& rt) {
+  ASSERT(rt.is_reg());
+  ASSERT(!at.is(rs));
+  ASSERT(!at.is(rt.rm()));
+  li(at, -1);
+  xor_(rs, rt.rm(), at);
+}
+
+
 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
   if (rt.is_reg()) {
     slt(rd, rs, rt.rm());
@@ -581,24 +664,13 @@
     }
     // We need always the same number of instructions as we may need to patch
     // this code to load another value which may need 2 instructions to load.
-    if (is_int16(j.imm32_)) {
-      nop();
-      addiu(rd, zero_reg, j.imm32_);
-    } else if (!(j.imm32_ & kHiMask)) {
-      nop();
-      ori(rd, zero_reg, j.imm32_);
-    } else if (!(j.imm32_ & kImm16Mask)) {
-      nop();
-      lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
-    } else {
-      lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
-      ori(rd, rd, (j.imm32_ & kImm16Mask));
-    }
+    lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
+    ori(rd, rd, (j.imm32_ & kImm16Mask));
   }
 }
 
 
-// Exception-generating instructions and debugging support
+// Exception-generating instructions and debugging support.
 void MacroAssembler::stop(const char* msg) {
   // TO_UPGRADE: Just a break for now. Maybe we could upgrade it.
   // We use the 0x54321 value to be able to find it easily when reading memory.
@@ -727,11 +799,11 @@
   ASSERT(!rs.is(t9));
   ASSERT(!rs.is(t8));
 
-  // Save rs's MSB to t8
+  // Save rs's MSB to t8.
   And(t8, rs, 0x80000000);
   // Remove rs's MSB.
   And(t9, rs, 0x7FFFFFFF);
-  // Move t9 to fd
+  // Move t9 to fd.
   mtc1(t9, fd);
 
   // Convert fd to a real FP value.
@@ -839,7 +911,7 @@
   Subu(scratch2, scratch2, Operand(zero_exponent));
   // Dest already has a Smi zero.
   Branch(&done, lt, scratch2, Operand(zero_reg));
-  if (!Isolate::Current()->cpu_features()->IsSupported(FPU)) {
+  if (!CpuFeatures::IsSupported(FPU)) {
     // We have a shifted exponent between 0 and 30 in scratch2.
     srl(dest, scratch2, HeapNumber::kExponentShift);
     // We now have the exponent in dest.  Subtract from 30 to get
@@ -848,7 +920,7 @@
     subu(dest, at, dest);
   }
   bind(&right_exponent);
-  if (Isolate::Current()->cpu_features()->IsSupported(FPU)) {
+  if (CpuFeatures::IsSupported(FPU)) {
     CpuFeatures::Scope scope(FPU);
     // MIPS FPU instructions implementing double precision to integer
     // conversion using round to zero. Since the FP value was qualified
@@ -898,6 +970,102 @@
 }
 
 
+void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
+                                                 Register input_high,
+                                                 Register input_low,
+                                                 Register scratch) {
+  Label done, normal_exponent, restore_sign;
+  // Extract the biased exponent in result.
+  Ext(result,
+      input_high,
+      HeapNumber::kExponentShift,
+      HeapNumber::kExponentBits);
+
+  // Check for Infinity and NaNs, which should return 0.
+  Subu(scratch, result, HeapNumber::kExponentMask);
+  movz(result, zero_reg, scratch);
+  Branch(&done, eq, scratch, Operand(zero_reg));
+
+  // Express exponent as delta to (number of mantissa bits + 31).
+  Subu(result,
+       result,
+       Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
+
+  // If the delta is strictly positive, all bits would be shifted away,
+  // which means that we can return 0.
+  Branch(&normal_exponent, le, result, Operand(zero_reg));
+  mov(result, zero_reg);
+  Branch(&done);
+
+  bind(&normal_exponent);
+  const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
+  // Calculate shift.
+  Addu(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits));
+
+  // Save the sign.
+  Register sign = result;
+  result = no_reg;
+  And(sign, input_high, Operand(HeapNumber::kSignMask));
+
+  // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
+  // to check for this specific case.
+  Label high_shift_needed, high_shift_done;
+  Branch(&high_shift_needed, lt, scratch, Operand(32));
+  mov(input_high, zero_reg);
+  Branch(&high_shift_done);
+  bind(&high_shift_needed);
+
+  // Set the implicit 1 before the mantissa part in input_high.
+  Or(input_high,
+     input_high,
+     Operand(1 << HeapNumber::kMantissaBitsInTopWord));
+  // Shift the mantissa bits to the correct position.
+  // We don't need to clear non-mantissa bits as they will be shifted away.
+  // If they weren't, it would mean that the answer is in the 32bit range.
+  sllv(input_high, input_high, scratch);
+
+  bind(&high_shift_done);
+
+  // Replace the shifted bits with bits from the lower mantissa word.
+  Label pos_shift, shift_done;
+  li(at, 32);
+  subu(scratch, at, scratch);
+  Branch(&pos_shift, ge, scratch, Operand(zero_reg));
+
+  // Negate scratch.
+  Subu(scratch, zero_reg, scratch);
+  sllv(input_low, input_low, scratch);
+  Branch(&shift_done);
+
+  bind(&pos_shift);
+  srlv(input_low, input_low, scratch);
+
+  bind(&shift_done);
+  Or(input_high, input_high, Operand(input_low));
+  // Restore sign if necessary.
+  mov(scratch, sign);
+  result = sign;
+  sign = no_reg;
+  Subu(result, zero_reg, input_high);
+  movz(result, input_high, scratch);
+  bind(&done);
+}
+
+
+void MacroAssembler::GetLeastBitsFromSmi(Register dst,
+                                         Register src,
+                                         int num_least_bits) {
+  Ext(dst, src, kSmiTagSize, num_least_bits);
+}
+
+
+void MacroAssembler::GetLeastBitsFromInt32(Register dst,
+                                           Register src,
+                                           int num_least_bits) {
+  And(dst, src, Operand((1 << num_least_bits) - 1));
+}
+
+
 // Emulated condtional branches do not emit a nop in the branch delay slot.
 //
 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
@@ -937,7 +1105,7 @@
       case ne:
         bne(rs, r2, offset);
         break;
-      // Signed comparison
+      // Signed comparison.
       case greater:
         if (r2.is(zero_reg)) {
           bgtz(rs, offset);
@@ -1028,7 +1196,7 @@
         li(r2, rt);
         bne(rs, r2, offset);
         break;
-      // Signed comparison
+      // Signed comparison.
       case greater:
         if (rt.imm32_ == 0) {
           bgtz(rs, offset);
@@ -1170,7 +1338,7 @@
         offset = shifted_branch_offset(L, false);
         bne(rs, r2, offset);
         break;
-      // Signed comparison
+      // Signed comparison.
       case greater:
         if (r2.is(zero_reg)) {
           offset = shifted_branch_offset(L, false);
@@ -1276,7 +1444,7 @@
         offset = shifted_branch_offset(L, false);
         bne(rs, r2, offset);
         break;
-      // Signed comparison
+      // Signed comparison.
       case greater:
         if (rt.imm32_ == 0) {
           offset = shifted_branch_offset(L, false);
@@ -1444,7 +1612,7 @@
       bal(offset);
       break;
 
-    // Signed comparison
+    // Signed comparison.
     case greater:
       slt(scratch, r2, rs);
       addiu(scratch, scratch, -1);
@@ -1539,7 +1707,7 @@
       bal(offset);
       break;
 
-    // Signed comparison
+    // Signed comparison.
     case greater:
       slt(scratch, r2, rs);
       addiu(scratch, scratch, -1);
@@ -1642,7 +1810,7 @@
         Branch(2, NegateCondition(cond), rs, rt);
         j(target.imm32_);  // Will generate only one instruction.
       }
-    } else {  // MustUseReg(target)
+    } else {  // MustUseReg(target).
       li(t9, target);
       if (cond == cc_always) {
         jr(t9);
@@ -1658,15 +1826,28 @@
 }
 
 
+int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
+  return 4 * kInstrSize;
+}
+
+
+int MacroAssembler::CallSize(Register reg) {
+  return 2 * kInstrSize;
+}
+
+
 // Note: To call gcc-compiled C code on mips, you must call thru t9.
 void MacroAssembler::Call(const Operand& target, BranchDelaySlot bdslot) {
   BlockTrampolinePoolScope block_trampoline_pool(this);
   if (target.is_reg()) {
       jalr(target.rm());
-  } else {    // !target.is_reg()
+  } else {  // !target.is_reg().
     if (!MustUseReg(target.rmode_)) {
       jal(target.imm32_);
-    } else {  // MustUseReg(target)
+    } else {  // MustUseReg(target).
+      // Must record previous source positions before the
+      // li() generates a new code target.
+      positions_recorder()->WriteRecordedPositions();
       li(t9, target);
       jalr(t9);
     }
@@ -1690,7 +1871,7 @@
       Branch(2, NegateCondition(cond), rs, rt);
       jalr(target.rm());
     }
-  } else {    // !target.is_reg()
+  } else {  // !target.is_reg().
     if (!MustUseReg(target.rmode_)) {
       if (cond == cc_always) {
         jal(target.imm32_);
@@ -1714,6 +1895,20 @@
 }
 
 
+void MacroAssembler::CallWithAstId(Handle<Code> code,
+                                   RelocInfo::Mode rmode,
+                                   unsigned ast_id,
+                                   Condition cond,
+                                   Register r1,
+                                   const Operand& r2) {
+  ASSERT(rmode == RelocInfo::CODE_TARGET_WITH_ID);
+  ASSERT(ast_id != kNoASTId);
+  ASSERT(ast_id_for_reloc_info_ == kNoASTId);
+  ast_id_for_reloc_info_ = ast_id;
+  Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
+}
+
+
 void MacroAssembler::Drop(int count,
                           Condition cond,
                           Register reg,
@@ -1779,13 +1974,6 @@
 }
 
 
-void MacroAssembler::Move(Register dst, Register src) {
-  if (!dst.is(src)) {
-    mov(dst, src);
-  }
-}
-
-
 #ifdef ENABLE_DEBUGGER_SUPPORT
 
 void MacroAssembler::DebugBreak() {
@@ -1800,7 +1988,7 @@
 
 
 // ---------------------------------------------------------------------------
-// Exception handling
+// Exception handling.
 
 void MacroAssembler::PushTryHandler(CodeLocation try_location,
                                     HandlerType type) {
@@ -1868,6 +2056,159 @@
 }
 
 
+void MacroAssembler::Throw(Register value) {
+  // v0 is expected to hold the exception.
+  Move(v0, value);
+
+  // Adjust this code if not the case.
+  STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+  // Drop the sp to the top of the handler.
+  li(a3, Operand(ExternalReference(Isolate::k_handler_address,
+                                      isolate())));
+  lw(sp, MemOperand(a3));
+
+  // Restore the next handler and frame pointer, discard handler state.
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+  pop(a2);
+  sw(a2, MemOperand(a3));
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+  MultiPop(a3.bit() | fp.bit());
+
+  // Before returning we restore the context from the frame pointer if
+  // not NULL.  The frame pointer is NULL in the exception handler of a
+  // JS entry frame.
+  // Set cp to NULL if fp is NULL.
+  Label done;
+  Branch(USE_DELAY_SLOT, &done, eq, fp, Operand(zero_reg));
+  mov(cp, zero_reg);   // In branch delay slot.
+  lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  bind(&done);
+
+#ifdef DEBUG
+  // When emitting debug_code, set ra as return address for the jump.
+  // 5 instructions: add: 1, pop: 2, jump: 2.
+  const int kOffsetRaInstructions = 5;
+  Label find_ra;
+
+  if (emit_debug_code()) {
+    // Compute ra for the Jump(t9).
+    const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
+
+    // This branch-and-link sequence is needed to get the current PC on mips,
+    // saved to the ra register. Then adjusted for instruction count.
+    bal(&find_ra);  // bal exposes branch-delay.
+    nop();  // Branch delay slot nop.
+    bind(&find_ra);
+    addiu(ra, ra, kOffsetRaBytes);
+  }
+#endif
+
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+  pop(t9);  // 2 instructions: lw, add sp.
+  Jump(t9);  // 2 instructions: jr, nop (in delay slot).
+
+  if (emit_debug_code()) {
+    // Make sure that the expected number of instructions were generated.
+    ASSERT_EQ(kOffsetRaInstructions,
+              InstructionsGeneratedSince(&find_ra));
+  }
+}
+
+
+void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
+                                      Register value) {
+  // Adjust this code if not the case.
+  STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+  // v0 is expected to hold the exception.
+  Move(v0, value);
+
+  // Drop sp to the top stack handler.
+  li(a3, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
+  lw(sp, MemOperand(a3));
+
+  // Unwind the handlers until the ENTRY handler is found.
+  Label loop, done;
+  bind(&loop);
+  // Load the type of the current stack handler.
+  const int kStateOffset = StackHandlerConstants::kStateOffset;
+  lw(a2, MemOperand(sp, kStateOffset));
+  Branch(&done, eq, a2, Operand(StackHandler::ENTRY));
+  // Fetch the next handler in the list.
+  const int kNextOffset = StackHandlerConstants::kNextOffset;
+  lw(sp, MemOperand(sp, kNextOffset));
+  jmp(&loop);
+  bind(&done);
+
+  // Set the top handler address to next handler past the current ENTRY handler.
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+  pop(a2);
+  sw(a2, MemOperand(a3));
+
+  if (type == OUT_OF_MEMORY) {
+    // Set external caught exception to false.
+    ExternalReference external_caught(
+           Isolate::k_external_caught_exception_address, isolate());
+    li(a0, Operand(false, RelocInfo::NONE));
+    li(a2, Operand(external_caught));
+    sw(a0, MemOperand(a2));
+
+    // Set pending exception and v0 to out of memory exception.
+    Failure* out_of_memory = Failure::OutOfMemoryException();
+    li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+    li(a2, Operand(ExternalReference(Isolate::k_pending_exception_address,
+                                        isolate())));
+    sw(v0, MemOperand(a2));
+  }
+
+  // Stack layout at this point. See also StackHandlerConstants.
+  // sp ->   state (ENTRY)
+  //         fp
+  //         ra
+
+  // Discard handler state (a2 is not used) and restore frame pointer.
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+  MultiPop(a2.bit() | fp.bit());  // a2: discarded state.
+  // Before returning we restore the context from the frame pointer if
+  // not NULL.  The frame pointer is NULL in the exception handler of a
+  // JS entry frame.
+  Label cp_null;
+  Branch(USE_DELAY_SLOT, &cp_null, eq, fp, Operand(zero_reg));
+  mov(cp, zero_reg);   // In the branch delay slot.
+  lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  bind(&cp_null);
+
+#ifdef DEBUG
+  // When emitting debug_code, set ra as return address for the jump.
+  // 5 instructions: add: 1, pop: 2, jump: 2.
+  const int kOffsetRaInstructions = 5;
+  Label find_ra;
+
+  if (emit_debug_code()) {
+    // Compute ra for the Jump(t9).
+    const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
+
+    // This branch-and-link sequence is needed to get the current PC on mips,
+    // saved to the ra register. Then adjusted for instruction count.
+    bal(&find_ra);  // bal exposes branch-delay slot.
+    nop();  // Branch delay slot nop.
+    bind(&find_ra);
+    addiu(ra, ra, kOffsetRaBytes);
+  }
+#endif
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+  pop(t9);  // 2 instructions: lw, add sp.
+  Jump(t9);  // 2 instructions: jr, nop (in delay slot).
+
+  if (emit_debug_code()) {
+    // Make sure that the expected number of instructions were generated.
+    ASSERT_EQ(kOffsetRaInstructions,
+              InstructionsGeneratedSince(&find_ra));
+  }
+}
+
+
 void MacroAssembler::AllocateInNewSpace(int object_size,
                                         Register result,
                                         Register scratch1,
@@ -1875,7 +2216,7 @@
                                         Label* gc_required,
                                         AllocationFlags flags) {
   if (!FLAG_inline_new) {
-    if (FLAG_debug_code) {
+    if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
       li(result, 0x7091);
       li(scratch1, 0x7191);
@@ -1923,7 +2264,7 @@
     lw(result, MemOperand(topaddr));
     lw(t9, MemOperand(topaddr, kPointerSize));
   } else {
-    if (FLAG_debug_code) {
+    if (emit_debug_code()) {
       // Assert that result actually contains top on entry. t9 is used
       // immediately below so this use of t9 does not cause difference with
       // respect to register content between debug and release mode.
@@ -1954,7 +2295,7 @@
                                         Label* gc_required,
                                         AllocationFlags flags) {
   if (!FLAG_inline_new) {
-    if (FLAG_debug_code) {
+    if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
       li(result, 0x7091);
       li(scratch1, 0x7191);
@@ -1992,7 +2333,7 @@
     lw(result, MemOperand(topaddr));
     lw(t9, MemOperand(topaddr, kPointerSize));
   } else {
-    if (FLAG_debug_code) {
+    if (emit_debug_code()) {
       // Assert that result actually contains top on entry. t9 is used
       // immediately below so this use of t9 does not cause difference with
       // respect to register content between debug and release mode.
@@ -2015,7 +2356,7 @@
   Branch(gc_required, Ugreater, scratch2, Operand(t9));
 
   // Update allocation top. result temporarily holds the new top.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     And(t9, scratch2, Operand(kObjectAlignmentMask));
     Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg));
   }
@@ -2206,12 +2547,70 @@
 }
 
 
+void MacroAssembler::CopyBytes(Register src,
+                               Register dst,
+                               Register length,
+                               Register scratch) {
+  Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
+
+  // Align src before copying in word size chunks.
+  bind(&align_loop);
+  Branch(&done, eq, length, Operand(zero_reg));
+  bind(&align_loop_1);
+  And(scratch, src, kPointerSize - 1);
+  Branch(&word_loop, eq, scratch, Operand(zero_reg));
+  lbu(scratch, MemOperand(src));
+  Addu(src, src, 1);
+  sb(scratch, MemOperand(dst));
+  Addu(dst, dst, 1);
+  Subu(length, length, Operand(1));
+  Branch(&byte_loop_1, ne, length, Operand(zero_reg));
+
+  // Copy bytes in word size chunks.
+  bind(&word_loop);
+  if (emit_debug_code()) {
+    And(scratch, src, kPointerSize - 1);
+    Assert(eq, "Expecting alignment for CopyBytes",
+        scratch, Operand(zero_reg));
+  }
+  Branch(&byte_loop, lt, length, Operand(kPointerSize));
+  lw(scratch, MemOperand(src));
+  Addu(src, src, kPointerSize);
+
+  // TODO(kalmard) check if this can be optimized to use sw in most cases.
+  // Can't use unaligned access - copy byte by byte.
+  sb(scratch, MemOperand(dst, 0));
+  srl(scratch, scratch, 8);
+  sb(scratch, MemOperand(dst, 1));
+  srl(scratch, scratch, 8);
+  sb(scratch, MemOperand(dst, 2));
+  srl(scratch, scratch, 8);
+  sb(scratch, MemOperand(dst, 3));
+  Addu(dst, dst, 4);
+
+  Subu(length, length, Operand(kPointerSize));
+  Branch(&word_loop);
+
+  // Copy the last bytes if any left.
+  bind(&byte_loop);
+  Branch(&done, eq, length, Operand(zero_reg));
+  bind(&byte_loop_1);
+  lbu(scratch, MemOperand(src));
+  Addu(src, src, 1);
+  sb(scratch, MemOperand(dst));
+  Addu(dst, dst, 1);
+  Subu(length, length, Operand(1));
+  Branch(&byte_loop_1, ne, length, Operand(zero_reg));
+  bind(&done);
+}
+
+
 void MacroAssembler::CheckMap(Register obj,
                               Register scratch,
                               Handle<Map> map,
                               Label* fail,
-                              bool is_heap_object) {
-  if (!is_heap_object) {
+                              SmiCheckType smi_check_type) {
+  if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, fail);
   }
   lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
@@ -2220,12 +2619,27 @@
 }
 
 
+void MacroAssembler::DispatchMap(Register obj,
+                                 Register scratch,
+                                 Handle<Map> map,
+                                 Handle<Code> success,
+                                 SmiCheckType smi_check_type) {
+  Label fail;
+  if (smi_check_type == DO_SMI_CHECK) {
+    JumpIfSmi(obj, &fail);
+  }
+  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+  Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
+  bind(&fail);
+}
+
+
 void MacroAssembler::CheckMap(Register obj,
                               Register scratch,
                               Heap::RootListIndex index,
                               Label* fail,
-                              bool is_heap_object) {
-  if (!is_heap_object) {
+                              SmiCheckType smi_check_type) {
+  if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, fail);
   }
   lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
@@ -2234,8 +2648,74 @@
 }
 
 
+void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
+  CpuFeatures::Scope scope(FPU);
+  if (IsMipsSoftFloatABI) {
+    Move(dst, v0, v1);
+  } else {
+    Move(dst, f0);  // Reg f0 is o32 ABI FP return value.
+  }
+}
+
+
+void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
+  CpuFeatures::Scope scope(FPU);
+  if (!IsMipsSoftFloatABI) {
+    Move(f12, dreg);
+  } else {
+    Move(a0, a1, dreg);
+  }
+}
+
+
+void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
+                                             DoubleRegister dreg2) {
+  CpuFeatures::Scope scope(FPU);
+  if (!IsMipsSoftFloatABI) {
+    if (dreg2.is(f12)) {
+      ASSERT(!dreg1.is(f14));
+      Move(f14, dreg2);
+      Move(f12, dreg1);
+    } else {
+      Move(f12, dreg1);
+      Move(f14, dreg2);
+    }
+  } else {
+    Move(a0, a1, dreg1);
+    Move(a2, a3, dreg2);
+  }
+}
+
+
+void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
+                                             Register reg) {
+  CpuFeatures::Scope scope(FPU);
+  if (!IsMipsSoftFloatABI) {
+    Move(f12, dreg);
+    Move(a2, reg);
+  } else {
+    Move(a2, reg);
+    Move(a0, a1, dreg);
+  }
+}
+
+
+void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
+  // This macro takes the dst register to make the code more readable
+  // at the call sites. However, the dst register has to be t1 to
+  // follow the calling convention which requires the call type to be
+  // in t1.
+  ASSERT(dst.is(t1));
+  if (call_kind == CALL_AS_FUNCTION) {
+    li(dst, Operand(Smi::FromInt(1)));
+  } else {
+    li(dst, Operand(Smi::FromInt(0)));
+  }
+}
+
+
 // -----------------------------------------------------------------------------
-// JavaScript invokes
+// JavaScript invokes.
 
 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
                                     const ParameterCount& actual,
@@ -2243,7 +2723,8 @@
                                     Register code_reg,
                                     Label* done,
                                     InvokeFlag flag,
-                                    PostCallGenerator* post_call_generator) {
+                                    const CallWrapper& call_wrapper,
+                                    CallKind call_kind) {
   bool definitely_matches = false;
   Label regular_invoke;
 
@@ -2278,13 +2759,11 @@
         li(a2, Operand(expected.immediate()));
       }
     }
+  } else if (actual.is_immediate()) {
+    Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
+    li(a0, Operand(actual.immediate()));
   } else {
-    if (actual.is_immediate()) {
-      Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
-      li(a0, Operand(actual.immediate()));
-    } else {
-      Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
-    }
+    Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
   }
 
   if (!definitely_matches) {
@@ -2296,10 +2775,13 @@
     Handle<Code> adaptor =
         isolate()->builtins()->ArgumentsAdaptorTrampoline();
     if (flag == CALL_FUNCTION) {
+      call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
+      SetCallKind(t1, call_kind);
       Call(adaptor, RelocInfo::CODE_TARGET);
-      if (post_call_generator != NULL) post_call_generator->Generate();
+      call_wrapper.AfterCall();
       jmp(done);
     } else {
+      SetCallKind(t1, call_kind);
       Jump(adaptor, RelocInfo::CODE_TARGET);
     }
     bind(&regular_invoke);
@@ -2311,15 +2793,18 @@
                                 const ParameterCount& expected,
                                 const ParameterCount& actual,
                                 InvokeFlag flag,
-                                PostCallGenerator* post_call_generator) {
+                                const CallWrapper& call_wrapper,
+                                CallKind call_kind) {
   Label done;
 
   InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
-                 post_call_generator);
+                 call_wrapper, call_kind);
   if (flag == CALL_FUNCTION) {
+    SetCallKind(t1, call_kind);
     Call(code);
   } else {
     ASSERT(flag == JUMP_FUNCTION);
+    SetCallKind(t1, call_kind);
     Jump(code);
   }
   // Continue here if InvokePrologue does handle the invocation due to
@@ -2332,13 +2817,17 @@
                                 const ParameterCount& expected,
                                 const ParameterCount& actual,
                                 RelocInfo::Mode rmode,
-                                InvokeFlag flag) {
+                                InvokeFlag flag,
+                                CallKind call_kind) {
   Label done;
 
-  InvokePrologue(expected, actual, code, no_reg, &done, flag);
+  InvokePrologue(expected, actual, code, no_reg, &done, flag,
+                 NullCallWrapper(), call_kind);
   if (flag == CALL_FUNCTION) {
+    SetCallKind(t1, call_kind);
     Call(code, rmode);
   } else {
+    SetCallKind(t1, call_kind);
     Jump(code, rmode);
   }
   // Continue here if InvokePrologue does handle the invocation due to
@@ -2350,7 +2839,8 @@
 void MacroAssembler::InvokeFunction(Register function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
-                                    PostCallGenerator* post_call_generator) {
+                                    const CallWrapper& call_wrapper,
+                                    CallKind call_kind) {
   // Contract with called JS functions requires that function is passed in a1.
   ASSERT(function.is(a1));
   Register expected_reg = a2;
@@ -2365,7 +2855,7 @@
   lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
 
   ParameterCount expected(expected_reg);
-  InvokeCode(code_reg, expected, actual, flag, post_call_generator);
+  InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
 }
 
 
@@ -2478,7 +2968,7 @@
 
 
 // -----------------------------------------------------------------------------
-// Runtime calls
+// Runtime calls.
 
 void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
                               Register r1, const Operand& r2) {
@@ -2487,11 +2977,136 @@
 }
 
 
+MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond,
+                                         Register r1, const Operand& r2) {
+  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  Object* result;
+  { MaybeObject* maybe_result = stub->TryGetCode();
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2);
+  return result;
+}
+
+
+
 void MacroAssembler::TailCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls());  // stub calls are not allowed in some stubs
+  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
   Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
 }
 
+MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub,
+                                             Condition cond,
+                                             Register r1,
+                                             const Operand& r2) {
+  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  Object* result;
+  { MaybeObject* maybe_result = stub->TryGetCode();
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2);
+  return result;
+}
+
+
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+  return ref0.address() - ref1.address();
+}
+
+
+MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
+    ExternalReference function, int stack_space) {
+  ExternalReference next_address =
+      ExternalReference::handle_scope_next_address();
+  const int kNextOffset = 0;
+  const int kLimitOffset = AddressOffset(
+      ExternalReference::handle_scope_limit_address(),
+      next_address);
+  const int kLevelOffset = AddressOffset(
+      ExternalReference::handle_scope_level_address(),
+      next_address);
+
+  // Allocate HandleScope in callee-save registers.
+  li(s3, Operand(next_address));
+  lw(s0, MemOperand(s3, kNextOffset));
+  lw(s1, MemOperand(s3, kLimitOffset));
+  lw(s2, MemOperand(s3, kLevelOffset));
+  Addu(s2, s2, Operand(1));
+  sw(s2, MemOperand(s3, kLevelOffset));
+
+  // The O32 ABI requires us to pass a pointer in a0 where the returned struct
+  // (4 bytes) will be placed. This is also built into the Simulator.
+  // Set up the pointer to the returned value (a0). It was allocated in
+  // EnterExitFrame.
+  addiu(a0, fp, ExitFrameConstants::kStackSpaceOffset);
+
+  // Native call returns to the DirectCEntry stub which redirects to the
+  // return address pushed on stack (could have moved after GC).
+  // DirectCEntry stub itself is generated early and never moves.
+  DirectCEntryStub stub;
+  stub.GenerateCall(this, function);
+
+  // As mentioned above, on MIPS a pointer is returned - we need to dereference
+  // it to get the actual return value (which is also a pointer).
+  lw(v0, MemOperand(v0));
+
+  Label promote_scheduled_exception;
+  Label delete_allocated_handles;
+  Label leave_exit_frame;
+
+  // If result is non-zero, dereference to get the result value
+  // otherwise set it to undefined.
+  Label skip;
+  LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+  Branch(&skip, eq, v0, Operand(zero_reg));
+  lw(a0, MemOperand(v0));
+  bind(&skip);
+  mov(v0, a0);
+
+  // No more valid handles (the result handle was the last one). Restore
+  // previous handle scope.
+  sw(s0, MemOperand(s3, kNextOffset));
+  if (emit_debug_code()) {
+    lw(a1, MemOperand(s3, kLevelOffset));
+    Check(eq, "Unexpected level after return from api call", a1, Operand(s2));
+  }
+  Subu(s2, s2, Operand(1));
+  sw(s2, MemOperand(s3, kLevelOffset));
+  lw(at, MemOperand(s3, kLimitOffset));
+  Branch(&delete_allocated_handles, ne, s1, Operand(at));
+
+  // Check if the function scheduled an exception.
+  bind(&leave_exit_frame);
+  LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+  li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
+  lw(t1, MemOperand(at));
+  Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
+  li(s0, Operand(stack_space));
+  LeaveExitFrame(false, s0);
+  Ret();
+
+  bind(&promote_scheduled_exception);
+  MaybeObject* result = TryTailCallExternalReference(
+      ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0, 1);
+  if (result->IsFailure()) {
+    return result;
+  }
+
+  // HandleScope limit has changed. Delete allocated extensions.
+  bind(&delete_allocated_handles);
+  sw(s1, MemOperand(s3, kLimitOffset));
+  mov(s0, v0);
+  mov(a0, v0);
+  PrepareCallCFunction(1, s1);
+  li(a0, Operand(ExternalReference::isolate_address()));
+  CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
+      1);
+  mov(v0, s0);
+  jmp(&leave_exit_frame);
+
+  return result;
+}
+
 
 void MacroAssembler::IllegalOperation(int num_arguments) {
   if (num_arguments > 0) {
@@ -2554,7 +3169,6 @@
 }
 
 
-
 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
                                             FPURegister value,
                                             Register scratch1) {
@@ -2564,6 +3178,84 @@
 }
 
 
+void MacroAssembler::AdduAndCheckForOverflow(Register dst,
+                                             Register left,
+                                             Register right,
+                                             Register overflow_dst,
+                                             Register scratch) {
+  ASSERT(!dst.is(overflow_dst));
+  ASSERT(!dst.is(scratch));
+  ASSERT(!overflow_dst.is(scratch));
+  ASSERT(!overflow_dst.is(left));
+  ASSERT(!overflow_dst.is(right));
+  ASSERT(!left.is(right));
+
+  // TODO(kalmard) There must be a way to optimize dst == left and dst == right
+  // cases.
+
+  if (dst.is(left)) {
+    addu(overflow_dst, left, right);
+    xor_(dst, overflow_dst, left);
+    xor_(scratch, overflow_dst, right);
+    and_(scratch, scratch, dst);
+    mov(dst, overflow_dst);
+    mov(overflow_dst, scratch);
+  } else if (dst.is(right)) {
+    addu(overflow_dst, left, right);
+    xor_(dst, overflow_dst, right);
+    xor_(scratch, overflow_dst, left);
+    and_(scratch, scratch, dst);
+    mov(dst, overflow_dst);
+    mov(overflow_dst, scratch);
+  } else {
+    addu(dst, left, right);
+    xor_(overflow_dst, dst, left);
+    xor_(scratch, dst, right);
+    and_(overflow_dst, scratch, overflow_dst);
+  }
+}
+
+
+void MacroAssembler::SubuAndCheckForOverflow(Register dst,
+                                             Register left,
+                                             Register right,
+                                             Register overflow_dst,
+                                             Register scratch) {
+  ASSERT(!dst.is(overflow_dst));
+  ASSERT(!dst.is(scratch));
+  ASSERT(!overflow_dst.is(scratch));
+  ASSERT(!overflow_dst.is(left));
+  ASSERT(!overflow_dst.is(right));
+  ASSERT(!left.is(right));
+  ASSERT(!scratch.is(left));
+  ASSERT(!scratch.is(right));
+
+  // TODO(kalmard) There must be a way to optimize dst == left and dst == right
+  // cases.
+
+  if (dst.is(left)) {
+    subu(overflow_dst, left, right);
+    xor_(scratch, overflow_dst, left);
+    xor_(dst, left, right);
+    and_(scratch, scratch, dst);
+    mov(dst, overflow_dst);
+    mov(overflow_dst, scratch);
+  } else if (dst.is(right)) {
+    subu(overflow_dst, left, right);
+    xor_(dst, left, right);
+    xor_(scratch, overflow_dst, left);
+    and_(scratch, scratch, dst);
+    mov(dst, overflow_dst);
+    mov(overflow_dst, scratch);
+  } else {
+    subu(dst, left, right);
+    xor_(overflow_dst, dst, left);
+    xor_(scratch, left, right);
+    and_(overflow_dst, scratch, overflow_dst);
+  }
+}
+
+
 void MacroAssembler::CallRuntime(const Runtime::Function* f,
                                  int num_arguments) {
   // All parameters are on the stack. v0 has the return value after call.
@@ -2623,6 +3315,16 @@
   JumpToExternalReference(ext);
 }
 
+MaybeObject* MacroAssembler::TryTailCallExternalReference(
+    const ExternalReference& ext, int num_arguments, int result_size) {
+  // TODO(1236192): Most runtime routines don't need the number of
+  // arguments passed in because it is constant. At some point we
+  // should remove this need and make the runtime routine entry code
+  // smarter.
+  li(a0, num_arguments);
+  return TryJumpToExternalReference(ext);
+}
+
 
 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
                                      int num_arguments,
@@ -2640,15 +3342,24 @@
 }
 
 
+MaybeObject* MacroAssembler::TryJumpToExternalReference(
+    const ExternalReference& builtin) {
+  li(a1, Operand(builtin));
+  CEntryStub stub(1);
+  return TryTailCallStub(&stub);
+}
+
+
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
-                                   InvokeJSFlags flags,
-                                   PostCallGenerator* post_call_generator) {
+                                   InvokeFlag flag,
+                                   const CallWrapper& call_wrapper) {
   GetBuiltinEntry(t9, id);
-  if (flags == CALL_JS) {
+  if (flag == CALL_FUNCTION) {
+    call_wrapper.BeforeCall(CallSize(t9));
     Call(t9);
-    if (post_call_generator != NULL) post_call_generator->Generate();
+    call_wrapper.AfterCall();
   } else {
-    ASSERT(flags == JUMP_JS);
+    ASSERT(flag == JUMP_FUNCTION);
     Jump(t9);
   }
 }
@@ -2708,18 +3419,18 @@
 
 
 // -----------------------------------------------------------------------------
-// Debugging
+// Debugging.
 
 void MacroAssembler::Assert(Condition cc, const char* msg,
                             Register rs, Operand rt) {
-  if (FLAG_debug_code)
+  if (emit_debug_code())
     Check(cc, msg, rs, rt);
 }
 
 
 void MacroAssembler::AssertRegisterIsRoot(Register reg,
                                           Heap::RootListIndex index) {
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     LoadRoot(at, index);
     Check(eq, "Register did not match expected root", reg, Operand(at));
   }
@@ -2727,10 +3438,10 @@
 
 
 void MacroAssembler::AssertFastElements(Register elements) {
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     ASSERT(!elements.is(at));
     Label ok;
-    Push(elements);
+    push(elements);
     lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
     LoadRoot(at, Heap::kFixedArrayMapRootIndex);
     Branch(&ok, eq, elements, Operand(at));
@@ -2738,7 +3449,7 @@
     Branch(&ok, eq, elements, Operand(at));
     Abort("JSObject with fast elements map has slow elements");
     bind(&ok);
-    Pop(elements);
+    pop(elements);
   }
 }
 
@@ -2748,7 +3459,7 @@
   Label L;
   Branch(&L, cc, rs, rt);
   Abort(msg);
-  // will not return here
+  // Will not return here.
   bind(&L);
 }
 
@@ -2774,11 +3485,11 @@
   AllowStubCallsScope allow_scope(this, true);
 
   li(a0, Operand(p0));
-  Push(a0);
+  push(a0);
   li(a0, Operand(Smi::FromInt(p1 - p0)));
-  Push(a0);
+  push(a0);
   CallRuntime(Runtime::kAbort, 2);
-  // will not return here
+  // Will not return here.
   if (is_trampoline_pool_blocked()) {
     // If the calling code cares about the exact number of
     // instructions generated, we insert padding here to keep the size
@@ -2805,11 +3516,22 @@
       lw(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
       lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
     }
-    // The context may be an intermediate context, not a function context.
-    lw(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
-  } else {  // Slot is in the current function context.
-    // The context may be an intermediate context, not a function context.
-    lw(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+  } else {
+    // Slot is in the current function context.  Move it into the
+    // destination register in case we store into it (the write barrier
+    // cannot be allowed to destroy the context in esi).
+    Move(dst, cp);
+  }
+
+  // We should not have found a 'with' context by walking the context chain
+  // (i.e., the static scope chain and runtime context chain do not agree).
+  // A variable occurring in such a scope should have slot type LOOKUP and
+  // not CONTEXT.
+  if (emit_debug_code()) {
+    lw(t9, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+    Check(eq, "Yo dawg, I heard you liked function contexts "
+              "so I put function contexts in all your contexts",
+               dst, Operand(t9));
   }
 }
 
@@ -2830,9 +3552,9 @@
                                                   Register scratch) {
   // Load the initial map. The global functions all have initial maps.
   lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     Label ok, fail;
-    CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, false);
+    CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
     Branch(&ok);
     bind(&fail);
     Abort("Global functions must have initial map");
@@ -2862,38 +3584,34 @@
 }
 
 
-void MacroAssembler::EnterExitFrame(Register hold_argc,
-                                    Register hold_argv,
-                                    Register hold_function,
-                                    bool save_doubles) {
-  // a0 is argc.
-  sll(t8, a0, kPointerSizeLog2);
-  addu(hold_argv, sp, t8);
-  addiu(hold_argv, hold_argv, -kPointerSize);
+void MacroAssembler::EnterExitFrame(bool save_doubles,
+                                    int stack_space) {
+  // Setup the frame structure on the stack.
+  STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
+  STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
+  STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
 
-  // Compute callee's stack pointer before making changes and save it as
-  // t9 register so that it is restored as sp register on exit, thereby
-  // popping the args.
-  // t9 = sp + kPointerSize * #args
-  addu(t9, sp, t8);
-
-  // Compute the argv pointer and keep it in a callee-saved register.
-  // This only seems to be needed for crankshaft and may cause problems
-  // so it's disabled for now.
-  // Subu(s6, t9, Operand(kPointerSize));
-
-  // Align the stack at this point.
-  AlignStack(0);
+  // This is how the stack will look:
+  // fp + 2 (==kCallerSPDisplacement) - old stack's end
+  // [fp + 1 (==kCallerPCOffset)] - saved old ra
+  // [fp + 0 (==kCallerFPOffset)] - saved old fp
+  // [fp - 1 (==kSPOffset)] - sp of the called function
+  // [fp - 2 (==kCodeOffset)] - CodeObject
+  // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
+  //   new stack (will contain saved ra)
 
   // Save registers.
-  addiu(sp, sp, -12);
-  sw(t9, MemOperand(sp, 8));
-  sw(ra, MemOperand(sp, 4));
-  sw(fp, MemOperand(sp, 0));
-  mov(fp, sp);  // Setup new frame pointer.
+  addiu(sp, sp, -4 * kPointerSize);
+  sw(ra, MemOperand(sp, 3 * kPointerSize));
+  sw(fp, MemOperand(sp, 2 * kPointerSize));
+  addiu(fp, sp, 2 * kPointerSize);  // Setup new frame pointer.
 
-  li(t8, Operand(CodeObject()));
-  Push(t8);  // Accessed from ExitFrame::code_slot.
+  if (emit_debug_code()) {
+    sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
+  }
+
+  li(t8, Operand(CodeObject()));  // Accessed from ExitFrame::code_slot.
+  sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
 
   // Save the frame pointer and the context in top.
   li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
@@ -2901,47 +3619,49 @@
   li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
   sw(cp, MemOperand(t8));
 
-  // Setup argc and the builtin function in callee-saved registers.
-  mov(hold_argc, a0);
-  mov(hold_function, a1);
-
-  // Optionally save all double registers.
+  const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
   if (save_doubles) {
-#ifdef DEBUG
-    int frame_alignment = ActivationFrameAlignment();
-#endif
-    // The stack alignment code above made sp unaligned, so add space for one
-    // more double register and use aligned addresses.
+    // The stack  must be allign to 0 modulo 8 for stores with sdc1.
     ASSERT(kDoubleSize == frame_alignment);
-    // Mark the frame as containing doubles by pushing a non-valid return
-    // address, i.e. 0.
-    ASSERT(ExitFrameConstants::kMarkerOffset == -2 * kPointerSize);
-    push(zero_reg);  // Marker and alignment word.
-    int space = FPURegister::kNumRegisters * kDoubleSize + kPointerSize;
+    if (frame_alignment > 0) {
+      ASSERT(IsPowerOf2(frame_alignment));
+      And(sp, sp, Operand(-frame_alignment));  // Align stack.
+    }
+    int space = FPURegister::kNumRegisters * kDoubleSize;
     Subu(sp, sp, Operand(space));
     // Remember: we only need to save every 2nd double FPU value.
     for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
       FPURegister reg = FPURegister::from_code(i);
-      sdc1(reg, MemOperand(sp, i * kDoubleSize + kPointerSize));
+      sdc1(reg, MemOperand(sp, i * kDoubleSize));
     }
-    // Note that f0 will be accessible at fp - 2*kPointerSize -
-    // FPURegister::kNumRegisters * kDoubleSize, since the code slot and the
-    // alignment word were pushed after the fp.
   }
+
+  // Reserve place for the return address, stack space and an optional slot
+  // (used by the DirectCEntryStub to hold the return value if a struct is
+  // returned) and align the frame preparing for calling the runtime function.
+  ASSERT(stack_space >= 0);
+  Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
+  if (frame_alignment > 0) {
+    ASSERT(IsPowerOf2(frame_alignment));
+    And(sp, sp, Operand(-frame_alignment));  // Align stack.
+  }
+
+  // Set the exit frame sp value to point just before the return address
+  // location.
+  addiu(at, sp, kPointerSize);
+  sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
 }
 
 
-void MacroAssembler::LeaveExitFrame(bool save_doubles) {
+void MacroAssembler::LeaveExitFrame(bool save_doubles,
+                                    Register argument_count) {
   // Optionally restore all double registers.
   if (save_doubles) {
-    // TODO(regis): Use vldrm instruction.
     // Remember: we only need to restore every 2nd double FPU value.
+    lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
     for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
       FPURegister reg = FPURegister::from_code(i);
-      // Register f30-f31 is just below the marker.
-      const int offset = ExitFrameConstants::kMarkerOffset;
-      ldc1(reg, MemOperand(fp,
-          (i - FPURegister::kNumRegisters) * kDoubleSize + offset));
+      ldc1(reg, MemOperand(t8, i  * kDoubleSize + kPointerSize));
     }
   }
 
@@ -2958,11 +3678,13 @@
 
   // Pop the arguments, restore registers, and return.
   mov(sp, fp);  // Respect ABI stack constraint.
-  lw(fp, MemOperand(sp, 0));
-  lw(ra, MemOperand(sp, 4));
-  lw(sp, MemOperand(sp, 8));
-  jr(ra);
-  nop();  // Branch delay slot nop.
+  lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
+  lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
+  addiu(sp, sp, 8);
+  if (argument_count.is_valid()) {
+    sll(t8, argument_count, kPointerSizeLog2);
+    addu(sp, sp, t8);
+  }
 }
 
 
@@ -2996,39 +3718,24 @@
 #endif  // defined(V8_HOST_ARCH_MIPS)
 }
 
+void MacroAssembler::AssertStackIsAligned() {
+  if (emit_debug_code()) {
+      const int frame_alignment = ActivationFrameAlignment();
+      const int frame_alignment_mask = frame_alignment - 1;
 
-void MacroAssembler::AlignStack(int offset) {
-  // On MIPS an offset of 0 aligns to 0 modulo 8 bytes,
-  //     and an offset of 1 aligns to 4 modulo 8 bytes.
-#if defined(V8_HOST_ARCH_MIPS)
-  // Running on the real platform. Use the alignment as mandated by the local
-  // environment.
-  // Note: This will break if we ever start generating snapshots on one MIPS
-  // platform for another MIPS platform with a different alignment.
-  int activation_frame_alignment = OS::ActivationFrameAlignment();
-#else  // defined(V8_HOST_ARCH_MIPS)
-  // If we are using the simulator then we should always align to the expected
-  // alignment. As the simulator is used to generate snapshots we do not know
-  // if the target platform will need alignment, so we will always align at
-  // this point here.
-  int activation_frame_alignment = 2 * kPointerSize;
-#endif  // defined(V8_HOST_ARCH_MIPS)
-  if (activation_frame_alignment != kPointerSize) {
-    // This code needs to be made more general if this assert doesn't hold.
-    ASSERT(activation_frame_alignment == 2 * kPointerSize);
-    if (offset == 0) {
-      andi(t8, sp, activation_frame_alignment - 1);
-      Push(zero_reg, eq, t8, zero_reg);
-    } else {
-      andi(t8, sp, activation_frame_alignment - 1);
-      addiu(t8, t8, -4);
-      Push(zero_reg, eq, t8, zero_reg);
+      if (frame_alignment > kPointerSize) {
+        Label alignment_as_expected;
+        ASSERT(IsPowerOf2(frame_alignment));
+        andi(at, sp, frame_alignment_mask);
+        Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
+        // Don't use Check here, as it will call Runtime_Abort re-entering here.
+        stop("Unexpected stack alignment");
+        bind(&alignment_as_expected);
+      }
     }
-  }
 }
 
 
-
 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
     Register reg,
     Register scratch,
@@ -3078,6 +3785,18 @@
 }
 
 
+void MacroAssembler::AbortIfNotString(Register object) {
+  STATIC_ASSERT(kSmiTag == 0);
+  And(t0, object, Operand(kSmiTagMask));
+  Assert(ne, "Operand is not a string", t0, Operand(zero_reg));
+  push(object);
+  lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
+  lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
+  Assert(lo, "Operand is not a string", object, Operand(FIRST_NONSTRING_TYPE));
+  pop(object);
+}
+
+
 void MacroAssembler::AbortIfNotRootValue(Register src,
                                          Heap::RootListIndex root_value_index,
                                          const char* message) {
@@ -3169,9 +3888,6 @@
 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
   int frame_alignment = ActivationFrameAlignment();
 
-  // Reserve space for Isolate address which is always passed as last parameter
-  num_arguments += 1;
-
   // Up to four simple arguments are passed in registers a0..a3.
   // Those four arguments must have reserved argument slots on the stack for
   // mips, even though those argument slots are not normally used.
@@ -3198,7 +3914,7 @@
 
 void MacroAssembler::CallCFunction(ExternalReference function,
                                    int num_arguments) {
-  CallCFunctionHelper(no_reg, function, at, num_arguments);
+  CallCFunctionHelper(no_reg, function, t8, num_arguments);
 }
 
 
@@ -3216,21 +3932,6 @@
                                          ExternalReference function_reference,
                                          Register scratch,
                                          int num_arguments) {
-  // Push Isolate address as the last argument.
-  if (num_arguments < kRegisterPassedArguments) {
-    Register arg_to_reg[] = {a0, a1, a2, a3};
-    Register r = arg_to_reg[num_arguments];
-    li(r, Operand(ExternalReference::isolate_address()));
-  } else {
-    int stack_passed_arguments = num_arguments - kRegisterPassedArguments +
-                                 (StandardFrameConstants::kCArgsSlotsSize /
-                                  kPointerSize);
-    // Push Isolate address on the stack after the arguments.
-    li(scratch, Operand(ExternalReference::isolate_address()));
-    sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
-  }
-  num_arguments += 1;
-
   // Make sure that the stack is aligned before calling a C function unless
   // running in the simulator. The simulator has its own alignment check which
   // provides more information.
@@ -3257,13 +3958,12 @@
   // Just call directly. The function called cannot cause a GC, or
   // allow preemption, so the return address in the link register
   // stays correct.
-  if (!function.is(t9)) {
-    mov(t9, function);
-    function = t9;
-  }
 
   if (function.is(no_reg)) {
-    li(t9, Operand(function_reference));
+    function = t9;
+    li(function, Operand(function_reference));
+  } else if (!function.is(t9)) {
+    mov(t9, function);
     function = t9;
   }
 
@@ -3286,12 +3986,22 @@
 #undef BRANCH_ARGS_CHECK
 
 
-#ifdef ENABLE_DEBUGGER_SUPPORT
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+                                             Register descriptors) {
+  lw(descriptors,
+     FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
+  Label not_smi;
+  JumpIfNotSmi(descriptors, &not_smi);
+  li(descriptors, Operand(FACTORY->empty_descriptor_array()));
+  bind(&not_smi);
+}
+
+
 CodePatcher::CodePatcher(byte* address, int instructions)
     : address_(address),
       instructions_(instructions),
       size_(instructions * Assembler::kInstrSize),
-      masm_(address, size_ + Assembler::kGap) {
+      masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
   // Create a new macro assembler pointing to the address of the code to patch.
   // The size is adjusted with kGap on order for the assembler to generate size
   // bytes of instructions without failing with buffer size constraints.
@@ -3309,8 +4019,8 @@
 }
 
 
-void CodePatcher::Emit(Instr x) {
-  masm()->emit(x);
+void CodePatcher::Emit(Instr instr) {
+  masm()->emit(instr);
 }
 
 
@@ -3319,7 +4029,26 @@
 }
 
 
-#endif  // ENABLE_DEBUGGER_SUPPORT
+void CodePatcher::ChangeBranchCondition(Condition cond) {
+  Instr instr = Assembler::instr_at(masm_.pc_);
+  ASSERT(Assembler::IsBranch(instr));
+  uint32_t opcode = Assembler::GetOpcodeField(instr);
+  // Currently only the 'eq' and 'ne' cond values are supported and the simple
+  // branch instructions (with opcode being the branch type).
+  // There are some special cases (see Assembler::IsBranch()) so extending this
+  // would be tricky.
+  ASSERT(opcode == BEQ ||
+         opcode == BNE ||
+        opcode == BLEZ ||
+        opcode == BGTZ ||
+        opcode == BEQL ||
+        opcode == BNEL ||
+       opcode == BLEZL ||
+       opcode == BGTZL);
+  opcode = (cond == eq) ? BEQ : BNE;
+  instr = (instr & ~kOpcodeMask) | opcode;
+  masm_.emit(instr);
+}
 
 
 } }  // namespace v8::internal
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index 7ff9e17..bcb459e 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,13 +30,13 @@
 
 #include "assembler.h"
 #include "mips/assembler-mips.h"
+#include "v8globals.h"
 
 namespace v8 {
 namespace internal {
 
 // Forward declaration.
 class JumpTarget;
-class PostCallGenerator;
 
 // Reserved Register Usage Summary.
 //
@@ -53,17 +53,12 @@
 // Registers aliases
 // cp is assumed to be a callee saved register.
 const Register roots = s6;  // Roots array pointer.
-const Register cp = s7;     // JavaScript context pointer
-const Register fp = s8_fp;  // Alias fp
-// Register used for condition evaluation.
+const Register cp = s7;     // JavaScript context pointer.
+const Register fp = s8_fp;  // Alias for fp.
+// Registers used for condition evaluation.
 const Register condReg1 = s4;
 const Register condReg2 = s5;
 
-enum InvokeJSFlags {
-  CALL_JS,
-  JUMP_JS
-};
-
 
 // Flags used for the AllocateInNewSpace functions.
 enum AllocationFlags {
@@ -98,15 +93,19 @@
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
-  MacroAssembler(void* buffer, int size);
+  // The isolate parameter can be NULL if the macro assembler should
+  // not use isolate-dependent functionality. In this case, it's the
+  // responsibility of the caller to never invoke such function on the
+  // macro assembler.
+  MacroAssembler(Isolate* isolate, void* buffer, int size);
 
-// Arguments macros
+// Arguments macros.
 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
 #define COND_ARGS cond, r1, r2
 
-// ** Prototypes
+// Prototypes.
 
-// * Prototypes for functions with no target (eg Ret()).
+// Prototypes for functions with no target (eg Ret()).
 #define DECLARE_NOTARGET_PROTOTYPE(Name) \
   void Name(BranchDelaySlot bd = PROTECT); \
   void Name(COND_TYPED_ARGS, BranchDelaySlot bd = PROTECT); \
@@ -114,7 +113,7 @@
     Name(COND_ARGS, bd); \
   }
 
-// * Prototypes for functions with a target.
+// Prototypes for functions with a target.
 
 // Cases when relocation may be needed.
 #define DECLARE_RELOC_PROTOTYPE(Name, target_type) \
@@ -152,7 +151,7 @@
     Name(target, COND_ARGS, bd); \
   }
 
-// ** Target prototypes.
+// Target prototypes.
 
 #define DECLARE_JUMP_CALL_PROTOTYPES(Name) \
   DECLARE_NORELOC_PROTOTYPE(Name, Register) \
@@ -181,6 +180,16 @@
 #undef DECLARE_JUMP_CALL_PROTOTYPES
 #undef DECLARE_BRANCH_PROTOTYPES
 
+  void CallWithAstId(Handle<Code> code,
+                     RelocInfo::Mode rmode,
+                     unsigned ast_id,
+                     Condition cond = al,
+                     Register r1 = zero_reg,
+                     const Operand& r2 = Operand(zero_reg));
+
+  int CallSize(Register reg);
+  int CallSize(Handle<Code> code, RelocInfo::Mode rmode);
+
   // Emit code to discard a non-negative number of pointer-sized elements
   // from the stack, clobbering only the sp register.
   void Drop(int count,
@@ -198,9 +207,28 @@
   void Swap(Register reg1, Register reg2, Register scratch = no_reg);
 
   void Call(Label* target);
-  // May do nothing if the registers are identical.
-  void Move(Register dst, Register src);
 
+  inline void Move(Register dst, Register src) {
+    if (!dst.is(src)) {
+      mov(dst, src);
+    }
+  }
+
+  inline void Move(FPURegister dst, FPURegister src) {
+    if (!dst.is(src)) {
+      mov_d(dst, src);
+    }
+  }
+
+  inline void Move(Register dst_low, Register dst_high, FPURegister src) {
+    mfc1(dst_low, src);
+    mfc1(dst_high, FPURegister::from_code(src.code() + 1));
+  }
+
+  inline void Move(FPURegister dst, Register src_low, Register src_high) {
+    mtc1(src_low, dst);
+    mtc1(src_high, FPURegister::from_code(dst.code() + 1));
+  }
 
   // Jump unconditionally to given label.
   // We NEED a nop in the branch delay slot, as it used by v8, for example in
@@ -262,7 +290,7 @@
 
 
   // ---------------------------------------------------------------------------
-  // Inline caching support
+  // Inline caching support.
 
   // Generate code for checking access rights - used for security checks
   // on access to global objects across environments. The holder register
@@ -306,7 +334,7 @@
 
 
   // ---------------------------------------------------------------------------
-  // Allocation support
+  // Allocation support.
 
   // Allocate an object in new space. The object_size is specified
   // either in bytes or in words if the allocation flag SIZE_IN_WORDS
@@ -373,7 +401,7 @@
                                    Label* gc_required);
 
   // ---------------------------------------------------------------------------
-  // Instruction macros
+  // Instruction macros.
 
 #define DEFINE_INSTRUCTION(instr)                                              \
   void instr(Register rd, Register rs, const Operand& rt);                     \
@@ -405,6 +433,7 @@
   DEFINE_INSTRUCTION(Or);
   DEFINE_INSTRUCTION(Xor);
   DEFINE_INSTRUCTION(Nor);
+  DEFINE_INSTRUCTION2(Neg);
 
   DEFINE_INSTRUCTION(Slt);
   DEFINE_INSTRUCTION(Sltu);
@@ -416,12 +445,12 @@
 #undef DEFINE_INSTRUCTION2
 
 
-  //------------Pseudo-instructions-------------
+  // ---------------------------------------------------------------------------
+  // Pseudo-instructions.
 
   void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
 
-
-  // load int32 in the rd register
+  // Load int32 in the rd register.
   void li(Register rd, Operand j, bool gen2instr = false);
   inline void li(Register rd, int32_t j, bool gen2instr = false) {
     li(rd, Operand(j), gen2instr);
@@ -430,100 +459,88 @@
     li(dst, Operand(value), gen2instr);
   }
 
-  // Exception-generating instructions and debugging support
+  // Exception-generating instructions and debugging support.
   void stop(const char* msg);
 
-
   // Push multiple registers on the stack.
   // Registers are saved in numerical order, with higher numbered registers
-  // saved in higher memory addresses
+  // saved in higher memory addresses.
   void MultiPush(RegList regs);
   void MultiPushReversed(RegList regs);
 
-  void Push(Register src) {
+  // Lower case push() for compatibility with arch-independent code.
+  void push(Register src) {
     Addu(sp, sp, Operand(-kPointerSize));
     sw(src, MemOperand(sp, 0));
   }
 
-  // Push two registers.  Pushes leftmost register first (to highest address).
-  void Push(Register src1, Register src2, Condition cond = al) {
-    ASSERT(cond == al);  // Do not support conditional versions yet.
+  // Push two registers. Pushes leftmost register first (to highest address).
+  void Push(Register src1, Register src2) {
     Subu(sp, sp, Operand(2 * kPointerSize));
     sw(src1, MemOperand(sp, 1 * kPointerSize));
     sw(src2, MemOperand(sp, 0 * kPointerSize));
   }
 
-  // Push three registers.  Pushes leftmost register first (to highest address).
-  void Push(Register src1, Register src2, Register src3, Condition cond = al) {
-    ASSERT(cond == al);  // Do not support conditional versions yet.
-    Addu(sp, sp, Operand(3 * -kPointerSize));
+  // Push three registers. Pushes leftmost register first (to highest address).
+  void Push(Register src1, Register src2, Register src3) {
+    Subu(sp, sp, Operand(3 * kPointerSize));
     sw(src1, MemOperand(sp, 2 * kPointerSize));
     sw(src2, MemOperand(sp, 1 * kPointerSize));
     sw(src3, MemOperand(sp, 0 * kPointerSize));
   }
 
-  // Push four registers.  Pushes leftmost register first (to highest address).
-  void Push(Register src1, Register src2,
-            Register src3, Register src4, Condition cond = al) {
-    ASSERT(cond == al);  // Do not support conditional versions yet.
-    Addu(sp, sp, Operand(4 * -kPointerSize));
+  // Push four registers. Pushes leftmost register first (to highest address).
+  void Push(Register src1, Register src2, Register src3, Register src4) {
+    Subu(sp, sp, Operand(4 * kPointerSize));
     sw(src1, MemOperand(sp, 3 * kPointerSize));
     sw(src2, MemOperand(sp, 2 * kPointerSize));
     sw(src3, MemOperand(sp, 1 * kPointerSize));
     sw(src4, MemOperand(sp, 0 * kPointerSize));
   }
 
-  inline void push(Register src) { Push(src); }
-  inline void pop(Register src) { Pop(src); }
-
   void Push(Register src, Condition cond, Register tst1, Register tst2) {
-    // Since we don't have conditionnal execution we use a Branch.
+    // Since we don't have conditional execution we use a Branch.
     Branch(3, cond, tst1, Operand(tst2));
-    Addu(sp, sp, Operand(-kPointerSize));
+    Subu(sp, sp, Operand(kPointerSize));
     sw(src, MemOperand(sp, 0));
   }
 
-
   // Pops multiple values from the stack and load them in the
   // registers specified in regs. Pop order is the opposite as in MultiPush.
   void MultiPop(RegList regs);
   void MultiPopReversed(RegList regs);
-  void Pop(Register dst) {
+
+  // Lower case pop() for compatibility with arch-independent code.
+  void pop(Register dst) {
     lw(dst, MemOperand(sp, 0));
     Addu(sp, sp, Operand(kPointerSize));
   }
+
+  // Pop two registers. Pops rightmost register first (from lower address).
+  void Pop(Register src1, Register src2) {
+    ASSERT(!src1.is(src2));
+    lw(src2, MemOperand(sp, 0 * kPointerSize));
+    lw(src1, MemOperand(sp, 1 * kPointerSize));
+    Addu(sp, sp, 2 * kPointerSize);
+  }
+
   void Pop(uint32_t count = 1) {
     Addu(sp, sp, Operand(count * kPointerSize));
   }
 
-  // ---------------------------------------------------------------------------
-  // These functions are only used by crankshaft, so they are currently
-  // unimplemented.
-
   // Push and pop the registers that can hold pointers, as defined by the
   // RegList constant kSafepointSavedRegisters.
-  void PushSafepointRegisters() {
-    UNIMPLEMENTED_MIPS();
-  }
-
-  void PopSafepointRegisters() {
-    UNIMPLEMENTED_MIPS();
-  }
-
-  void PushSafepointRegistersAndDoubles() {
-    UNIMPLEMENTED_MIPS();
-  }
-
-  void PopSafepointRegistersAndDoubles() {
-    UNIMPLEMENTED_MIPS();
-  }
-
-  static int SafepointRegisterStackIndex(int reg_code) {
-    UNIMPLEMENTED_MIPS();
-    return 0;
-  }
-
-  // ---------------------------------------------------------------------------
+  void PushSafepointRegisters();
+  void PopSafepointRegisters();
+  void PushSafepointRegistersAndDoubles();
+  void PopSafepointRegistersAndDoubles();
+  // Store value in register src in the safepoint stack slot for
+  // register dst.
+  void StoreToSafepointRegisterSlot(Register src, Register dst);
+  void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst);
+  // Load the value of the src register from its safepoint stack slot
+  // into register dst.
+  void LoadFromSafepointRegisterSlot(Register dst, Register src);
 
   // MIPS32 R2 instruction macro.
   void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
@@ -548,8 +565,19 @@
                       FPURegister double_scratch,
                       Label *not_int32);
 
+  // Helper for EmitECMATruncate.
+  // This will truncate a floating-point value outside of the singed 32bit
+  // integer range to a 32bit signed integer.
+  // Expects the double value loaded in input_high and input_low.
+  // Exits with the answer in 'result'.
+  // Note that this code does not work for values in the 32bit range!
+  void EmitOutOfInt32RangeTruncate(Register result,
+                                   Register input_high,
+                                   Register input_low,
+                                   Register scratch);
+
   // -------------------------------------------------------------------------
-  // Activation frames
+  // Activation frames.
 
   void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
   void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
@@ -558,23 +586,21 @@
   void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
 
   // Enter exit frame.
-  // Expects the number of arguments in register a0 and
-  // the builtin function to call in register a1.
-  // On output hold_argc, hold_function, and hold_argv are setup.
-  void EnterExitFrame(Register hold_argc,
-                      Register hold_argv,
-                      Register hold_function,
-                      bool save_doubles);
+  // argc - argument count to be dropped by LeaveExitFrame.
+  // save_doubles - saves FPU registers on stack, currently disabled.
+  // stack_space - extra stack space.
+  void EnterExitFrame(bool save_doubles,
+                      int stack_space = 0);
 
-  // Leave the current exit frame. Expects the return value in v0.
-  void LeaveExitFrame(bool save_doubles);
-
-  // Align the stack by optionally pushing a Smi zero.
-  void AlignStack(int offset);    // TODO(mips) : remove this function.
+  // Leave the current exit frame.
+  void LeaveExitFrame(bool save_doubles, Register arg_count);
 
   // Get the actual activation frame alignment for target environment.
   static int ActivationFrameAlignment();
 
+  // Make sure the stack is aligned. Only emits code in debug mode.
+  void AssertStackIsAligned();
+
   void LoadContext(Register dst, int context_chain_length);
 
   void LoadGlobalFunction(int index, Register function);
@@ -586,27 +612,35 @@
                                     Register scratch);
 
   // -------------------------------------------------------------------------
-  // JavaScript invokes
+  // JavaScript invokes.
+
+  // Setup call kind marking in t1. The method takes t1 as an
+  // explicit first parameter to make the code more readable at the
+  // call sites.
+  void SetCallKind(Register dst, CallKind kind);
 
   // Invoke the JavaScript function code by either calling or jumping.
   void InvokeCode(Register code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
                   InvokeFlag flag,
-                  PostCallGenerator* post_call_generator = NULL);
+                  const CallWrapper& call_wrapper = NullCallWrapper(),
+                  CallKind call_kind = CALL_AS_METHOD);
 
   void InvokeCode(Handle<Code> code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
                   RelocInfo::Mode rmode,
-                  InvokeFlag flag);
+                  InvokeFlag flag,
+                  CallKind call_kind = CALL_AS_METHOD);
 
   // Invoke the JavaScript function in the given register. Changes the
   // current context to the context in the function before invoking.
   void InvokeFunction(Register function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
-                      PostCallGenerator* post_call_generator = NULL);
+                      const CallWrapper& call_wrapper = NullCallWrapper(),
+                      CallKind call_kind = CALL_AS_METHOD);
 
   void InvokeFunction(JSFunction* function,
                       const ParameterCount& actual,
@@ -628,14 +662,14 @@
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // -------------------------------------------------------------------------
-  // Debugger Support
+  // Debugger Support.
 
   void DebugBreak();
 #endif
 
 
   // -------------------------------------------------------------------------
-  // Exception handling
+  // Exception handling.
 
   // Push a new try handler and link into try handler chain.
   // The return address must be passed in register ra.
@@ -646,9 +680,24 @@
   // Must preserve the result register.
   void PopTryHandler();
 
+  // Passes thrown value (in v0) to the handler of top of the try handler chain.
+  void Throw(Register value);
+
+  // Propagates an uncatchable exception to the top of the current JS stack's
+  // handler chain.
+  void ThrowUncatchable(UncatchableExceptionType type, Register value);
+
   // Copies a fixed number of fields of heap objects from src to dst.
   void CopyFields(Register dst, Register src, RegList temps, int field_count);
 
+  // Copies a number of bytes from src to dst. All registers are clobbered. On
+  // exit src and dst will point to the place just after where the last byte was
+  // read or written and length will be zero.
+  void CopyBytes(Register src,
+                 Register dst,
+                 Register length,
+                 Register scratch);
+
   // -------------------------------------------------------------------------
   // Support functions.
 
@@ -669,18 +718,27 @@
   // Check if the map of an object is equal to a specified map (either
   // given directly or as an index into the root list) and branch to
   // label if not. Skip the smi check if not required (object is known
-  // to be a heap object)
+  // to be a heap object).
   void CheckMap(Register obj,
                 Register scratch,
                 Handle<Map> map,
                 Label* fail,
-                bool is_heap_object);
+                SmiCheckType smi_check_type);
 
   void CheckMap(Register obj,
                 Register scratch,
                 Heap::RootListIndex index,
                 Label* fail,
-                bool is_heap_object);
+                SmiCheckType smi_check_type);
+
+  // Check if the map of an object is equal to a specified map and branch to a
+  // specified target if equal. Skip the smi check if not required (object is
+  // known to be a heap object)
+  void DispatchMap(Register obj,
+                   Register scratch,
+                   Handle<Map> map,
+                   Handle<Code> success,
+                   SmiCheckType smi_check_type);
 
   // Generates code for reporting that an illegal operation has
   // occurred.
@@ -692,6 +750,10 @@
   //   index - holds the overwritten index on exit.
   void IndexFromHash(Register hash, Register index);
 
+  // Get the number of least significant bits from a register.
+  void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
+  void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
+
   // Load the value of a number object into a FPU double register. If the
   // object is not a number a jump to the label not_number is performed
   // and the FPU double register is unchanged.
@@ -712,15 +774,70 @@
                               Register scratch1);
 
   // -------------------------------------------------------------------------
-  // Runtime calls
+  // Overflow handling functions.
+  // Usage: first call the appropriate arithmetic function, then call one of the
+  // jump functions with the overflow_dst register as the second parameter.
+
+  void AdduAndCheckForOverflow(Register dst,
+                               Register left,
+                               Register right,
+                               Register overflow_dst,
+                               Register scratch = at);
+
+  void SubuAndCheckForOverflow(Register dst,
+                               Register left,
+                               Register right,
+                               Register overflow_dst,
+                               Register scratch = at);
+
+  void BranchOnOverflow(Label* label,
+                        Register overflow_check,
+                        BranchDelaySlot bd = PROTECT) {
+    Branch(label, lt, overflow_check, Operand(zero_reg), bd);
+  }
+
+  void BranchOnNoOverflow(Label* label,
+                          Register overflow_check,
+                          BranchDelaySlot bd = PROTECT) {
+    Branch(label, ge, overflow_check, Operand(zero_reg), bd);
+  }
+
+  void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
+    Ret(lt, overflow_check, Operand(zero_reg), bd);
+  }
+
+  void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
+    Ret(ge, overflow_check, Operand(zero_reg), bd);
+  }
+
+  // -------------------------------------------------------------------------
+  // Runtime calls.
 
   // Call a code stub.
   void CallStub(CodeStub* stub, Condition cond = cc_always,
                 Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
 
+  // Call a code stub and return the code object called.  Try to generate
+  // the code if necessary.  Do not perform a GC but instead return a retry
+  // after GC failure.
+  MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub,
+                                           Condition cond = cc_always,
+                                           Register r1 = zero_reg,
+                                           const Operand& r2 =
+                                               Operand(zero_reg));
+
   // Tail call a code stub (jump).
   void TailCallStub(CodeStub* stub);
 
+  // Tail call a code stub (jump) and return the code object called.  Try to
+  // generate the code if necessary.  Do not perform a GC but instead return
+  // a retry after GC failure.
+  MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub,
+                                               Condition cond = cc_always,
+                                               Register r1 = zero_reg,
+                                               const Operand& r2 =
+                                                   Operand(zero_reg));
+
   void CallJSExitStub(CodeStub* stub);
 
   // Call a runtime routine.
@@ -741,6 +858,12 @@
                                  int num_arguments,
                                  int result_size);
 
+  // Tail call of a runtime routine (jump). Try to generate the code if
+  // necessary. Do not perform a GC but instead return a retry after GC
+  // failure.
+  MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
+      const ExternalReference& ext, int num_arguments, int result_size);
+
   // Convenience function: tail call a runtime routine (jump).
   void TailCallRuntime(Runtime::FunctionId fid,
                        int num_arguments,
@@ -768,15 +891,31 @@
   // function).
   void CallCFunction(ExternalReference function, int num_arguments);
   void CallCFunction(Register function, Register scratch, int num_arguments);
+  void GetCFunctionDoubleResult(const DoubleRegister dst);
+
+  // There are two ways of passing double arguments on MIPS, depending on
+  // whether soft or hard floating point ABI is used. These functions
+  // abstract parameter passing for the three different ways we call
+  // C functions from generated code.
+  void SetCallCDoubleArguments(DoubleRegister dreg);
+  void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
+  void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
+
+  // Calls an API function. Allocates HandleScope, extracts returned value
+  // from handle and propagates exceptions. Restores context.
+  MaybeObject* TryCallApiFunctionAndReturn(ExternalReference function,
+                                           int stack_space);
 
   // Jump to the builtin routine.
   void JumpToExternalReference(const ExternalReference& builtin);
 
+  MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
+
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
   void InvokeBuiltin(Builtins::JavaScript id,
-                     InvokeJSFlags flags,
-                     PostCallGenerator* post_call_generator = NULL);
+                     InvokeFlag flag,
+                     const CallWrapper& call_wrapper = NullCallWrapper());
 
   // Store the code object for the given builtin in the target register and
   // setup the function in a1.
@@ -787,14 +926,17 @@
 
   struct Unresolved {
     int pc;
-    uint32_t flags;  // see Bootstrapper::FixupFlags decoders/encoders.
+    uint32_t flags;  // See Bootstrapper::FixupFlags decoders/encoders.
     const char* name;
   };
 
-  Handle<Object> CodeObject() { return code_object_; }
+  Handle<Object> CodeObject() {
+    ASSERT(!code_object_.is_null());
+    return code_object_;
+  }
 
   // -------------------------------------------------------------------------
-  // StatsCounter support
+  // StatsCounter support.
 
   void SetCounter(StatsCounter* counter, int value,
                   Register scratch1, Register scratch2);
@@ -805,7 +947,7 @@
 
 
   // -------------------------------------------------------------------------
-  // Debugging
+  // Debugging.
 
   // Calls Abort(msg) if the condition cc is not satisfied.
   // Use --debug_code to enable.
@@ -826,7 +968,7 @@
   bool allow_stub_calls() { return allow_stub_calls_; }
 
   // ---------------------------------------------------------------------------
-  // Number utilities
+  // Number utilities.
 
   // Check whether the value of reg is a power of two and not zero. If not
   // control continues at the label not_power_of_two. If reg is a power of two
@@ -837,7 +979,7 @@
                                  Label* not_power_of_two_or_zero);
 
   // -------------------------------------------------------------------------
-  // Smi utilities
+  // Smi utilities.
 
   // Try to convert int32 to smi. If the value is to large, preserve
   // the original value and jump to not_a_smi. Destroys scratch and
@@ -888,13 +1030,16 @@
   void AbortIfSmi(Register object);
   void AbortIfNotSmi(Register object);
 
+  // Abort execution if argument is a string. Used in debug code.
+  void AbortIfNotString(Register object);
+
   // Abort execution if argument is not the root value with the given index.
   void AbortIfNotRootValue(Register src,
                            Heap::RootListIndex root_value_index,
                            const char* message);
 
   // ---------------------------------------------------------------------------
-  // HeapNumber utilities
+  // HeapNumber utilities.
 
   void JumpIfNotHeapNumber(Register object,
                            Register heap_number_map,
@@ -902,7 +1047,7 @@
                            Label* on_not_heap_number);
 
   // -------------------------------------------------------------------------
-  // String utilities
+  // String utilities.
 
   // Checks if both instance types are sequential ASCII strings and jumps to
   // label if either is not.
@@ -935,6 +1080,8 @@
                                            Register scratch2,
                                            Label* failure);
 
+  void LoadInstanceDescriptors(Register map, Register descriptors);
+
  private:
   void CallCFunctionHelper(Register function,
                            ExternalReference function_reference,
@@ -959,7 +1106,8 @@
                       Register code_reg,
                       Label* done,
                       InvokeFlag flag,
-                      PostCallGenerator* post_call_generator = NULL);
+                      const CallWrapper& call_wrapper = NullCallWrapper(),
+                      CallKind call_kind = CALL_AS_METHOD);
 
   // Get the code for the given builtin. Returns if able to resolve
   // the function in the 'resolved' flag.
@@ -975,15 +1123,22 @@
                            Register scratch1,
                            Register scratch2);
 
+  // Compute memory operands for safepoint stack slots.
+  static int SafepointRegisterStackIndex(int reg_code);
+  MemOperand SafepointRegisterSlot(Register reg);
+  MemOperand SafepointRegistersAndDoublesSlot(Register reg);
 
   bool generating_stub_;
   bool allow_stub_calls_;
   // This handle will be patched with the code object on installation.
   Handle<Object> code_object_;
+
+  // Needs access to SafepointRegisterStackIndex for optimized frame
+  // traversal.
+  friend class OptimizedFrame;
 };
 
 
-#ifdef ENABLE_DEBUGGER_SUPPORT
 // The code patcher is used to patch (typically) small parts of code e.g. for
 // debugging and other types of instrumentation. When using the code patcher
 // the exact number of bytes specified must be emitted. It is not legal to emit
@@ -998,29 +1153,21 @@
   MacroAssembler* masm() { return &masm_; }
 
   // Emit an instruction directly.
-  void Emit(Instr x);
+  void Emit(Instr instr);
 
   // Emit an address directly.
   void Emit(Address addr);
 
+  // Change the condition part of an instruction leaving the rest of the current
+  // instruction unchanged.
+  void ChangeBranchCondition(Condition cond);
+
  private:
   byte* address_;  // The address of the code being patched.
   int instructions_;  // Number of instructions of the expected patch size.
   int size_;  // Number of bytes of the expected patch size.
   MacroAssembler masm_;  // Macro assembler used to generate the code.
 };
-#endif  // ENABLE_DEBUGGER_SUPPORT
-
-
-// Helper class for generating code or data associated with the code
-// right after a call instruction. As an example this can be used to
-// generate safepoint data after calls for crankshaft.
-class PostCallGenerator {
- public:
-  PostCallGenerator() { }
-  virtual ~PostCallGenerator() { }
-  virtual void Generate() = 0;
-};
 
 
 // -----------------------------------------------------------------------------
@@ -1042,6 +1189,16 @@
 }
 
 
+// Generate a MemOperand for storing arguments 5..N on the stack
+// when calling CallCFunction().
+static inline MemOperand CFunctionArgumentOperand(int index) {
+  ASSERT(index > StandardFrameConstants::kCArgSlotCount);
+  // Argument 5 takes the slot just past the four Arg-slots.
+  int offset =
+      (index - 5) * kPointerSize + StandardFrameConstants::kCArgsSlotsSize;
+  return MemOperand(sp, offset);
+}
+
 
 #ifdef GENERATED_CODE_COVERAGE
 #define CODE_COVERAGE_STRINGIFY(x) #x
@@ -1055,4 +1212,3 @@
 } }  // namespace v8::internal
 
 #endif  // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
-
diff --git a/src/mips/regexp-macro-assembler-mips.cc b/src/mips/regexp-macro-assembler-mips.cc
index 9f9e976..cfc8f65 100644
--- a/src/mips/regexp-macro-assembler-mips.cc
+++ b/src/mips/regexp-macro-assembler-mips.cc
@@ -56,49 +56,58 @@
  * - sp : points to tip of C stack.
  *
  * The remaining registers are free for computations.
- *
  * Each call to a public method should retain this convention.
+ *
  * The stack will have the following structure:
- *       - direct_call        (if 1, direct call from JavaScript code, if 0 call
- *                             through the runtime system)
- *       - stack_area_base    (High end of the memory area to use as
- *                             backtracking stack)
- *       - int* capture_array (int[num_saved_registers_], for output).
- *       - stack frame header (16 bytes in size)
- *       --- sp when called ---
- *       - link address
- *       - backup of registers s0..s7
- *       - end of input       (Address of end of string)
- *       - start of input     (Address of first character in string)
- *       - start index        (character index of start)
- *       --- frame pointer ----
- *       - void* input_string (location of a handle containing the string)
- *       - Offset of location before start of input (effectively character
- *         position -1). Used to initialize capture registers to a non-position.
- *       - At start (if 1, we are starting at the start of the
- *         string, otherwise 0)
- *       - register 0         (Only positions must be stored in the first
- *       - register 1          num_saved_registers_ registers)
- *       - ...
- *       - register num_registers-1
- *       --- sp ---
+ *
+ *  - fp[56]  direct_call  (if 1, direct call from JavaScript code,
+ *                          if 0, call through the runtime system).
+ *  - fp[52]  stack_area_base (High end of the memory area to use as
+ *                             backtracking stack).
+ *  - fp[48]  int* capture_array (int[num_saved_registers_], for output).
+ *  - fp[44]  secondary link/return address used by native call.
+ *  --- sp when called ---
+ *  - fp[40]  return address (lr).
+ *  - fp[36]  old frame pointer (r11).
+ *  - fp[0..32]  backup of registers s0..s7.
+ *  --- frame pointer ----
+ *  - fp[-4]  end of input       (Address of end of string).
+ *  - fp[-8]  start of input     (Address of first character in string).
+ *  - fp[-12] start index        (character index of start).
+ *  - fp[-16] void* input_string (location of a handle containing the string).
+ *  - fp[-20] Offset of location before start of input (effectively character
+ *            position -1). Used to initialize capture registers to a
+ *            non-position.
+ *  - fp[-24] At start (if 1, we are starting at the start of the
+ *    string, otherwise 0)
+ *  - fp[-28] register 0         (Only positions must be stored in the first
+ *  -         register 1          num_saved_registers_ registers)
+ *  -         ...
+ *  -         register num_registers-1
+ *  --- sp ---
  *
  * The first num_saved_registers_ registers are initialized to point to
  * "character -1" in the string (i.e., char_size() bytes before the first
  * character of the string). The remaining registers start out as garbage.
  *
  * The data up to the return address must be placed there by the calling
- * code, by calling the code entry as cast to a function with the signature:
+ * code and the remaining arguments are passed in registers, e.g. by calling the
+ * code entry as cast to a function with the signature:
  * int (*match)(String* input_string,
  *              int start_index,
  *              Address start,
  *              Address end,
+ *              Address secondary_return_address,  // Only used by native call.
  *              int* capture_output_array,
- *              bool at_start,
  *              byte* stack_area_base,
- *              bool direct_call)
+ *              bool direct_call = false)
  * The call is performed by NativeRegExpMacroAssembler::Execute()
- * (in regexp-macro-assembler.cc).
+ * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
+ * in mips/simulator-mips.h.
+ * When calling as a non-direct call (i.e., from C++ code), the return address
+ * area is overwritten with the ra register by the RegExp code. When doing a
+ * direct call from generated code, the return address is placed there by
+ * the calling code, as in a normal exit frame.
  */
 
 #define __ ACCESS_MASM(masm_)
@@ -106,7 +115,7 @@
 RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(
     Mode mode,
     int registers_to_save)
-    : masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
+    : masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
       mode_(mode),
       num_registers_(registers_to_save),
       num_saved_registers_(registers_to_save),
@@ -114,9 +123,15 @@
       start_label_(),
       success_label_(),
       backtrack_label_(),
-      exit_label_() {
+      exit_label_(),
+      internal_failure_label_() {
   ASSERT_EQ(0, registers_to_save % 2);
   __ jmp(&entry_label_);   // We'll write the entry code later.
+  // If the code gets too big or corrupted, an internal exception will be
+  // raised, and we will exit right away.
+  __ bind(&internal_failure_label_);
+  __ li(v0, Operand(FAILURE));
+  __ Ret();
   __ bind(&start_label_);  // And then continue from here.
 }
 
@@ -131,6 +146,7 @@
   exit_label_.Unuse();
   check_preempt_label_.Unuse();
   stack_overflow_label_.Unuse();
+  internal_failure_label_.Unuse();
 }
 
 
@@ -140,47 +156,75 @@
 
 
 void RegExpMacroAssemblerMIPS::AdvanceCurrentPosition(int by) {
-  UNIMPLEMENTED_MIPS();
+  if (by != 0) {
+    __ Addu(current_input_offset(),
+           current_input_offset(), Operand(by * char_size()));
+  }
 }
 
 
 void RegExpMacroAssemblerMIPS::AdvanceRegister(int reg, int by) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(reg >= 0);
+  ASSERT(reg < num_registers_);
+  if (by != 0) {
+    __ lw(a0, register_location(reg));
+    __ Addu(a0, a0, Operand(by));
+    __ sw(a0, register_location(reg));
+  }
 }
 
 
 void RegExpMacroAssemblerMIPS::Backtrack() {
-  UNIMPLEMENTED_MIPS();
+  CheckPreemption();
+  // Pop Code* offset from backtrack stack, add Code* and jump to location.
+  Pop(a0);
+  __ Addu(a0, a0, code_pointer());
+  __ Jump(Operand(a0));
 }
 
 
 void RegExpMacroAssemblerMIPS::Bind(Label* label) {
-  UNIMPLEMENTED_MIPS();
+  __ bind(label);
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckCharacter(uint32_t c, Label* on_equal) {
-  UNIMPLEMENTED_MIPS();
+  BranchOrBacktrack(on_equal, eq, current_character(), Operand(c));
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) {
-  UNIMPLEMENTED_MIPS();
+  BranchOrBacktrack(on_greater, gt, current_character(), Operand(limit));
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
-  UNIMPLEMENTED_MIPS();
+  Label not_at_start;
+  // Did we start the match at the start of the string at all?
+  __ lw(a0, MemOperand(frame_pointer(), kAtStart));
+  BranchOrBacktrack(&not_at_start, eq, a0, Operand(zero_reg));
+
+  // If we did, are we still at the start of the input?
+  __ lw(a1, MemOperand(frame_pointer(), kInputStart));
+  __ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
+  BranchOrBacktrack(on_at_start, eq, a0, Operand(a1));
+  __ bind(&not_at_start);
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckNotAtStart(Label* on_not_at_start) {
-  UNIMPLEMENTED_MIPS();
+  // Did we start the match at the start of the string at all?
+  __ lw(a0, MemOperand(frame_pointer(), kAtStart));
+  BranchOrBacktrack(on_not_at_start, eq, a0, Operand(zero_reg));
+  // If we did, are we still at the start of the input?
+  __ lw(a1, MemOperand(frame_pointer(), kInputStart));
+  __ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
+  BranchOrBacktrack(on_not_at_start, ne, a0, Operand(a1));
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckCharacterLT(uc16 limit, Label* on_less) {
-  UNIMPLEMENTED_MIPS();
+  BranchOrBacktrack(on_less, lt, current_character(), Operand(limit));
 }
 
 
@@ -188,26 +232,212 @@
                                               int cp_offset,
                                               Label* on_failure,
                                               bool check_end_of_string) {
-  UNIMPLEMENTED_MIPS();
+  if (on_failure == NULL) {
+    // Instead of inlining a backtrack for each test, (re)use the global
+    // backtrack target.
+    on_failure = &backtrack_label_;
+  }
+
+  if (check_end_of_string) {
+    // Is last character of required match inside string.
+    CheckPosition(cp_offset + str.length() - 1, on_failure);
+  }
+
+  __ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
+  if (cp_offset != 0) {
+    int byte_offset = cp_offset * char_size();
+    __ Addu(a0, a0, Operand(byte_offset));
+  }
+
+  // a0 : Address of characters to match against str.
+  int stored_high_byte = 0;
+  for (int i = 0; i < str.length(); i++) {
+    if (mode_ == ASCII) {
+      __ lbu(a1, MemOperand(a0, 0));
+      __ addiu(a0, a0, char_size());
+      ASSERT(str[i] <= String::kMaxAsciiCharCode);
+      BranchOrBacktrack(on_failure, ne, a1, Operand(str[i]));
+    } else {
+      __ lhu(a1, MemOperand(a0, 0));
+      __ addiu(a0, a0, char_size());
+      uc16 match_char = str[i];
+      int match_high_byte = (match_char >> 8);
+      if (match_high_byte == 0) {
+        BranchOrBacktrack(on_failure, ne, a1, Operand(str[i]));
+      } else {
+        if (match_high_byte != stored_high_byte) {
+          __ li(a2, Operand(match_high_byte));
+          stored_high_byte = match_high_byte;
+        }
+        __ Addu(a3, a2, Operand(match_char & 0xff));
+        BranchOrBacktrack(on_failure, ne, a1, Operand(a3));
+      }
+    }
+  }
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
-  UNIMPLEMENTED_MIPS();
+  Label backtrack_non_equal;
+  __ lw(a0, MemOperand(backtrack_stackpointer(), 0));
+  __ Branch(&backtrack_non_equal, ne, current_input_offset(), Operand(a0));
+  __ Addu(backtrack_stackpointer(),
+          backtrack_stackpointer(),
+          Operand(kPointerSize));
+  __ bind(&backtrack_non_equal);
+  BranchOrBacktrack(on_equal, eq, current_input_offset(), Operand(a0));
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
     int start_reg,
     Label* on_no_match) {
-  UNIMPLEMENTED_MIPS();
+  Label fallthrough;
+  __ lw(a0, register_location(start_reg));  // Index of start of capture.
+  __ lw(a1, register_location(start_reg + 1));  // Index of end of capture.
+  __ Subu(a1, a1, a0);  // Length of capture.
+
+  // If length is zero, either the capture is empty or it is not participating.
+  // In either case succeed immediately.
+  __ Branch(&fallthrough, eq, a1, Operand(zero_reg));
+
+  __ Addu(t5, a1, current_input_offset());
+  // Check that there are enough characters left in the input.
+  BranchOrBacktrack(on_no_match, gt, t5, Operand(zero_reg));
+
+  if (mode_ == ASCII) {
+    Label success;
+    Label fail;
+    Label loop_check;
+
+    // a0 - offset of start of capture.
+    // a1 - length of capture.
+    __ Addu(a0, a0, Operand(end_of_input_address()));
+    __ Addu(a2, end_of_input_address(), Operand(current_input_offset()));
+    __ Addu(a1, a0, Operand(a1));
+
+    // a0 - Address of start of capture.
+    // a1 - Address of end of capture.
+    // a2 - Address of current input position.
+
+    Label loop;
+    __ bind(&loop);
+    __ lbu(a3, MemOperand(a0, 0));
+    __ addiu(a0, a0, char_size());
+    __ lbu(t0, MemOperand(a2, 0));
+    __ addiu(a2, a2, char_size());
+
+    __ Branch(&loop_check, eq, t0, Operand(a3));
+
+    // Mismatch, try case-insensitive match (converting letters to lower-case).
+    __ Or(a3, a3, Operand(0x20));  // Convert capture character to lower-case.
+    __ Or(t0, t0, Operand(0x20));  // Also convert input character.
+    __ Branch(&fail, ne, t0, Operand(a3));
+    __ Subu(a3, a3, Operand('a'));
+    __ Branch(&fail, hi, a3, Operand('z' - 'a'));  // Is a3 a lowercase letter?
+
+    __ bind(&loop_check);
+    __ Branch(&loop, lt, a0, Operand(a1));
+    __ jmp(&success);
+
+    __ bind(&fail);
+    GoTo(on_no_match);
+
+    __ bind(&success);
+    // Compute new value of character position after the matched part.
+    __ Subu(current_input_offset(), a2, end_of_input_address());
+  } else {
+    ASSERT(mode_ == UC16);
+    // Put regexp engine registers on stack.
+    RegList regexp_registers_to_retain = current_input_offset().bit() |
+        current_character().bit() | backtrack_stackpointer().bit();
+    __ MultiPush(regexp_registers_to_retain);
+
+    int argument_count = 4;
+    __ PrepareCallCFunction(argument_count, a2);
+
+    // a0 - offset of start of capture.
+    // a1 - length of capture.
+
+    // Put arguments into arguments registers.
+    // Parameters are
+    //   a0: Address byte_offset1 - Address captured substring's start.
+    //   a1: Address byte_offset2 - Address of current character position.
+    //   a2: size_t byte_length - length of capture in bytes(!).
+    //   a3: Isolate* isolate.
+
+    // Address of start of capture.
+    __ Addu(a0, a0, Operand(end_of_input_address()));
+    // Length of capture.
+    __ mov(a2, a1);
+    // Save length in callee-save register for use on return.
+    __ mov(s3, a1);
+    // Address of current input position.
+    __ Addu(a1, current_input_offset(), Operand(end_of_input_address()));
+    // Isolate.
+    __ li(a3, Operand(ExternalReference::isolate_address()));
+
+    ExternalReference function =
+        ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+    __ CallCFunction(function, argument_count);
+
+    // Restore regexp engine registers.
+    __ MultiPop(regexp_registers_to_retain);
+    __ li(code_pointer(), Operand(masm_->CodeObject()));
+    __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+
+    // Check if function returned non-zero for success or zero for failure.
+    BranchOrBacktrack(on_no_match, eq, v0, Operand(zero_reg));
+    // On success, increment position by length of capture.
+    __ Addu(current_input_offset(), current_input_offset(), Operand(s3));
+  }
+
+  __ bind(&fallthrough);
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckNotBackReference(
     int start_reg,
     Label* on_no_match) {
-  UNIMPLEMENTED_MIPS();
+  Label fallthrough;
+  Label success;
+
+  // Find length of back-referenced capture.
+  __ lw(a0, register_location(start_reg));
+  __ lw(a1, register_location(start_reg + 1));
+  __ Subu(a1, a1, a0);  // Length to check.
+  // Succeed on empty capture (including no capture).
+  __ Branch(&fallthrough, eq, a1, Operand(zero_reg));
+
+  __ Addu(t5, a1, current_input_offset());
+  // Check that there are enough characters left in the input.
+  BranchOrBacktrack(on_no_match, gt, t5, Operand(zero_reg));
+
+  // Compute pointers to match string and capture string.
+  __ Addu(a0, a0, Operand(end_of_input_address()));
+  __ Addu(a2, end_of_input_address(), Operand(current_input_offset()));
+  __ Addu(a1, a1, Operand(a0));
+
+  Label loop;
+  __ bind(&loop);
+  if (mode_ == ASCII) {
+    __ lbu(a3, MemOperand(a0, 0));
+    __ addiu(a0, a0, char_size());
+    __ lbu(t0, MemOperand(a2, 0));
+    __ addiu(a2, a2, char_size());
+  } else {
+    ASSERT(mode_ == UC16);
+    __ lhu(a3, MemOperand(a0, 0));
+    __ addiu(a0, a0, char_size());
+    __ lhu(t0, MemOperand(a2, 0));
+    __ addiu(a2, a2, char_size());
+  }
+  BranchOrBacktrack(on_no_match, ne, a3, Operand(t0));
+  __ Branch(&loop, lt, a0, Operand(a1));
+
+  // Move current character position to position after match.
+  __ Subu(current_input_offset(), a2, end_of_input_address());
+  __ bind(&fallthrough);
 }
 
 
@@ -220,21 +450,23 @@
 
 void RegExpMacroAssemblerMIPS::CheckNotCharacter(uint32_t c,
                                                 Label* on_not_equal) {
-  UNIMPLEMENTED_MIPS();
+  BranchOrBacktrack(on_not_equal, ne, current_character(), Operand(c));
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckCharacterAfterAnd(uint32_t c,
                                                      uint32_t mask,
                                                      Label* on_equal) {
-  UNIMPLEMENTED_MIPS();
+  __ And(a0, current_character(), Operand(mask));
+  BranchOrBacktrack(on_equal, eq, a0, Operand(c));
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterAnd(uint32_t c,
                                                         uint32_t mask,
                                                         Label* on_not_equal) {
-  UNIMPLEMENTED_MIPS();
+  __ And(a0, current_character(), Operand(mask));
+  BranchOrBacktrack(on_not_equal, ne, a0, Operand(c));
 }
 
 
@@ -249,24 +481,360 @@
 
 bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
                                                          Label* on_no_match) {
-  UNIMPLEMENTED_MIPS();
-  return false;
+  // Range checks (c in min..max) are generally implemented by an unsigned
+  // (c - min) <= (max - min) check.
+  switch (type) {
+  case 's':
+    // Match space-characters.
+    if (mode_ == ASCII) {
+      // ASCII space characters are '\t'..'\r' and ' '.
+      Label success;
+      __ Branch(&success, eq, current_character(), Operand(' '));
+      // Check range 0x09..0x0d.
+      __ Subu(a0, current_character(), Operand('\t'));
+      BranchOrBacktrack(on_no_match, hi, a0, Operand('\r' - '\t'));
+      __ bind(&success);
+      return true;
+    }
+    return false;
+  case 'S':
+    // Match non-space characters.
+    if (mode_ == ASCII) {
+      // ASCII space characters are '\t'..'\r' and ' '.
+      BranchOrBacktrack(on_no_match, eq, current_character(), Operand(' '));
+      __ Subu(a0, current_character(), Operand('\t'));
+      BranchOrBacktrack(on_no_match, ls, a0, Operand('\r' - '\t'));
+      return true;
+    }
+    return false;
+  case 'd':
+    // Match ASCII digits ('0'..'9').
+    __ Subu(a0, current_character(), Operand('0'));
+    BranchOrBacktrack(on_no_match, hi, a0, Operand('9' - '0'));
+    return true;
+  case 'D':
+    // Match non ASCII-digits.
+    __ Subu(a0, current_character(), Operand('0'));
+    BranchOrBacktrack(on_no_match, ls, a0, Operand('9' - '0'));
+    return true;
+  case '.': {
+    // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029).
+    __ Xor(a0, current_character(), Operand(0x01));
+    // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c.
+    __ Subu(a0, a0, Operand(0x0b));
+    BranchOrBacktrack(on_no_match, ls, a0, Operand(0x0c - 0x0b));
+    if (mode_ == UC16) {
+      // Compare original value to 0x2028 and 0x2029, using the already
+      // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+      // 0x201d (0x2028 - 0x0b) or 0x201e.
+      __ Subu(a0, a0, Operand(0x2028 - 0x0b));
+      BranchOrBacktrack(on_no_match, ls, a0, Operand(1));
+    }
+    return true;
+  }
+  case 'n': {
+    // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029).
+    __ Xor(a0, current_character(), Operand(0x01));
+    // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c.
+    __ Subu(a0, a0, Operand(0x0b));
+    if (mode_ == ASCII) {
+      BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0c - 0x0b));
+    } else {
+      Label done;
+      BranchOrBacktrack(&done, ls, a0, Operand(0x0c - 0x0b));
+      // Compare original value to 0x2028 and 0x2029, using the already
+      // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+      // 0x201d (0x2028 - 0x0b) or 0x201e.
+      __ Subu(a0, a0, Operand(0x2028 - 0x0b));
+      BranchOrBacktrack(on_no_match, hi, a0, Operand(1));
+      __ bind(&done);
+    }
+    return true;
+  }
+  case 'w': {
+    if (mode_ != ASCII) {
+      // Table is 128 entries, so all ASCII characters can be tested.
+      BranchOrBacktrack(on_no_match, hi, current_character(), Operand('z'));
+    }
+    ExternalReference map = ExternalReference::re_word_character_map();
+    __ li(a0, Operand(map));
+    __ Addu(a0, a0, current_character());
+    __ lbu(a0, MemOperand(a0, 0));
+    BranchOrBacktrack(on_no_match, eq, a0, Operand(zero_reg));
+    return true;
+  }
+  case 'W': {
+    Label done;
+    if (mode_ != ASCII) {
+      // Table is 128 entries, so all ASCII characters can be tested.
+      __ Branch(&done, hi, current_character(), Operand('z'));
+    }
+    ExternalReference map = ExternalReference::re_word_character_map();
+    __ li(a0, Operand(map));
+    __ Addu(a0, a0, current_character());
+    __ lbu(a0, MemOperand(a0, 0));
+    BranchOrBacktrack(on_no_match, ne, a0, Operand(zero_reg));
+    if (mode_ != ASCII) {
+      __ bind(&done);
+    }
+    return true;
+  }
+  case '*':
+    // Match any character.
+    return true;
+  // No custom implementation (yet): s(UC16), S(UC16).
+  default:
+    return false;
+  }
 }
 
 
 void RegExpMacroAssemblerMIPS::Fail() {
-  UNIMPLEMENTED_MIPS();
+  __ li(v0, Operand(FAILURE));
+  __ jmp(&exit_label_);
 }
 
 
 Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
-  UNIMPLEMENTED_MIPS();
-  return Handle<HeapObject>::null();
+  if (masm_->has_exception()) {
+    // If the code gets corrupted due to long regular expressions and lack of
+    // space on trampolines, an internal exception flag is set. If this case
+    // is detected, we will jump into exit sequence right away.
+    __ bind_to(&entry_label_, internal_failure_label_.pos());
+  } else {
+    // Finalize code - write the entry point code now we know how many
+    // registers we need.
+
+    // Entry code:
+    __ bind(&entry_label_);
+    // Push arguments
+    // Save callee-save registers.
+    // Start new stack frame.
+    // Store link register in existing stack-cell.
+    // Order here should correspond to order of offset constants in header file.
+    RegList registers_to_retain = s0.bit() | s1.bit() | s2.bit() |
+        s3.bit() | s4.bit() | s5.bit() | s6.bit() | s7.bit() | fp.bit();
+    RegList argument_registers = a0.bit() | a1.bit() | a2.bit() | a3.bit();
+    __ MultiPush(argument_registers | registers_to_retain | ra.bit());
+    // Set frame pointer in space for it if this is not a direct call
+    // from generated code.
+    __ Addu(frame_pointer(), sp, Operand(4 * kPointerSize));
+    __ push(a0);  // Make room for "position - 1" constant (value irrelevant).
+    __ push(a0);  // Make room for "at start" constant (value irrelevant).
+
+    // Check if we have space on the stack for registers.
+    Label stack_limit_hit;
+    Label stack_ok;
+
+    ExternalReference stack_limit =
+        ExternalReference::address_of_stack_limit(masm_->isolate());
+    __ li(a0, Operand(stack_limit));
+    __ lw(a0, MemOperand(a0));
+    __ Subu(a0, sp, a0);
+    // Handle it if the stack pointer is already below the stack limit.
+    __ Branch(&stack_limit_hit, le, a0, Operand(zero_reg));
+    // Check if there is room for the variable number of registers above
+    // the stack limit.
+    __ Branch(&stack_ok, hs, a0, Operand(num_registers_ * kPointerSize));
+    // Exit with OutOfMemory exception. There is not enough space on the stack
+    // for our working registers.
+    __ li(v0, Operand(EXCEPTION));
+    __ jmp(&exit_label_);
+
+    __ bind(&stack_limit_hit);
+    CallCheckStackGuardState(a0);
+    // If returned value is non-zero, we exit with the returned value as result.
+    __ Branch(&exit_label_, ne, v0, Operand(zero_reg));
+
+    __ bind(&stack_ok);
+    // Allocate space on stack for registers.
+    __ Subu(sp, sp, Operand(num_registers_ * kPointerSize));
+    // Load string end.
+    __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+    // Load input start.
+    __ lw(a0, MemOperand(frame_pointer(), kInputStart));
+    // Find negative length (offset of start relative to end).
+    __ Subu(current_input_offset(), a0, end_of_input_address());
+    // Set a0 to address of char before start of the input string
+    // (effectively string position -1).
+    __ lw(a1, MemOperand(frame_pointer(), kStartIndex));
+    __ Subu(a0, current_input_offset(), Operand(char_size()));
+    __ sll(t5, a1, (mode_ == UC16) ? 1 : 0);
+    __ Subu(a0, a0, t5);
+    // Store this value in a local variable, for use when clearing
+    // position registers.
+    __ sw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+
+    // Determine whether the start index is zero, that is at the start of the
+    // string, and store that value in a local variable.
+    __ mov(t5, a1);
+    __ li(a1, Operand(1));
+    __ movn(a1, zero_reg, t5);
+    __ sw(a1, MemOperand(frame_pointer(), kAtStart));
+
+    if (num_saved_registers_ > 0) {  // Always is, if generated from a regexp.
+      // Fill saved registers with initial value = start offset - 1.
+
+      // Address of register 0.
+      __ Addu(a1, frame_pointer(), Operand(kRegisterZero));
+      __ li(a2, Operand(num_saved_registers_));
+      Label init_loop;
+      __ bind(&init_loop);
+      __ sw(a0, MemOperand(a1));
+      __ Addu(a1, a1, Operand(-kPointerSize));
+      __ Subu(a2, a2, Operand(1));
+      __ Branch(&init_loop, ne, a2, Operand(zero_reg));
+    }
+
+    // Initialize backtrack stack pointer.
+    __ lw(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
+    // Initialize code pointer register
+    __ li(code_pointer(), Operand(masm_->CodeObject()));
+    // Load previous char as initial value of current character register.
+    Label at_start;
+    __ lw(a0, MemOperand(frame_pointer(), kAtStart));
+    __ Branch(&at_start, ne, a0, Operand(zero_reg));
+    LoadCurrentCharacterUnchecked(-1, 1);  // Load previous char.
+    __ jmp(&start_label_);
+    __ bind(&at_start);
+    __ li(current_character(), Operand('\n'));
+    __ jmp(&start_label_);
+
+
+    // Exit code:
+    if (success_label_.is_linked()) {
+      // Save captures when successful.
+      __ bind(&success_label_);
+      if (num_saved_registers_ > 0) {
+        // Copy captures to output.
+        __ lw(a1, MemOperand(frame_pointer(), kInputStart));
+        __ lw(a0, MemOperand(frame_pointer(), kRegisterOutput));
+        __ lw(a2, MemOperand(frame_pointer(), kStartIndex));
+        __ Subu(a1, end_of_input_address(), a1);
+        // a1 is length of input in bytes.
+        if (mode_ == UC16) {
+          __ srl(a1, a1, 1);
+        }
+        // a1 is length of input in characters.
+        __ Addu(a1, a1, Operand(a2));
+        // a1 is length of string in characters.
+
+        ASSERT_EQ(0, num_saved_registers_ % 2);
+        // Always an even number of capture registers. This allows us to
+        // unroll the loop once to add an operation between a load of a register
+        // and the following use of that register.
+        for (int i = 0; i < num_saved_registers_; i += 2) {
+          __ lw(a2, register_location(i));
+          __ lw(a3, register_location(i + 1));
+          if (mode_ == UC16) {
+            __ sra(a2, a2, 1);
+            __ Addu(a2, a2, a1);
+            __ sra(a3, a3, 1);
+            __ Addu(a3, a3, a1);
+          } else {
+            __ Addu(a2, a1, Operand(a2));
+            __ Addu(a3, a1, Operand(a3));
+          }
+          __ sw(a2, MemOperand(a0));
+          __ Addu(a0, a0, kPointerSize);
+          __ sw(a3, MemOperand(a0));
+          __ Addu(a0, a0, kPointerSize);
+        }
+      }
+      __ li(v0, Operand(SUCCESS));
+    }
+    // Exit and return v0.
+    __ bind(&exit_label_);
+    // Skip sp past regexp registers and local variables..
+    __ mov(sp, frame_pointer());
+    // Restore registers s0..s7 and return (restoring ra to pc).
+    __ MultiPop(registers_to_retain | ra.bit());
+    __ Ret();
+
+    // Backtrack code (branch target for conditional backtracks).
+    if (backtrack_label_.is_linked()) {
+      __ bind(&backtrack_label_);
+      Backtrack();
+    }
+
+    Label exit_with_exception;
+
+    // Preempt-code.
+    if (check_preempt_label_.is_linked()) {
+      SafeCallTarget(&check_preempt_label_);
+      // Put regexp engine registers on stack.
+      RegList regexp_registers_to_retain = current_input_offset().bit() |
+          current_character().bit() | backtrack_stackpointer().bit();
+      __ MultiPush(regexp_registers_to_retain);
+      CallCheckStackGuardState(a0);
+      __ MultiPop(regexp_registers_to_retain);
+      // If returning non-zero, we should end execution with the given
+      // result as return value.
+      __ Branch(&exit_label_, ne, v0, Operand(zero_reg));
+
+      // String might have moved: Reload end of string from frame.
+      __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+      __ li(code_pointer(), Operand(masm_->CodeObject()));
+      SafeReturn();
+    }
+
+    // Backtrack stack overflow code.
+    if (stack_overflow_label_.is_linked()) {
+      SafeCallTarget(&stack_overflow_label_);
+      // Reached if the backtrack-stack limit has been hit.
+      // Put regexp engine registers on stack first.
+      RegList regexp_registers = current_input_offset().bit() |
+          current_character().bit();
+      __ MultiPush(regexp_registers);
+      Label grow_failed;
+      // Call GrowStack(backtrack_stackpointer(), &stack_base)
+      static const int num_arguments = 3;
+      __ PrepareCallCFunction(num_arguments, a0);
+      __ mov(a0, backtrack_stackpointer());
+      __ Addu(a1, frame_pointer(), Operand(kStackHighEnd));
+      __ li(a2, Operand(ExternalReference::isolate_address()));
+      ExternalReference grow_stack =
+          ExternalReference::re_grow_stack(masm_->isolate());
+      __ CallCFunction(grow_stack, num_arguments);
+      // Restore regexp registers.
+      __ MultiPop(regexp_registers);
+      // If return NULL, we have failed to grow the stack, and
+      // must exit with a stack-overflow exception.
+      __ Branch(&exit_with_exception, eq, v0, Operand(zero_reg));
+      // Otherwise use return value as new stack pointer.
+      __ mov(backtrack_stackpointer(), v0);
+      // Restore saved registers and continue.
+      __ li(code_pointer(), Operand(masm_->CodeObject()));
+      __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+      SafeReturn();
+    }
+
+    if (exit_with_exception.is_linked()) {
+      // If any of the code above needed to exit with an exception.
+      __ bind(&exit_with_exception);
+      // Exit with Result EXCEPTION(-1) to signal thrown exception.
+      __ li(v0, Operand(EXCEPTION));
+      __ jmp(&exit_label_);
+    }
+  }
+
+  CodeDesc code_desc;
+  masm_->GetCode(&code_desc);
+  Handle<Code> code = FACTORY->NewCode(code_desc,
+                                       Code::ComputeFlags(Code::REGEXP),
+                                       masm_->CodeObject());
+  LOG(Isolate::Current(), RegExpCodeCreateEvent(*code, *source));
+  return Handle<HeapObject>::cast(code);
 }
 
 
 void RegExpMacroAssemblerMIPS::GoTo(Label* to) {
-  UNIMPLEMENTED_MIPS();
+  if (to == NULL) {
+    Backtrack();
+    return;
+  }
+  __ jmp(to);
+  return;
 }
 
 
@@ -281,13 +849,15 @@
 void RegExpMacroAssemblerMIPS::IfRegisterLT(int reg,
                                            int comparand,
                                            Label* if_lt) {
-  UNIMPLEMENTED_MIPS();
+  __ lw(a0, register_location(reg));
+  BranchOrBacktrack(if_lt, lt, a0, Operand(comparand));
 }
 
 
 void RegExpMacroAssemblerMIPS::IfRegisterEqPos(int reg,
                                               Label* if_eq) {
-  UNIMPLEMENTED_MIPS();
+  __ lw(a0, register_location(reg));
+  BranchOrBacktrack(if_eq, eq, a0, Operand(current_input_offset()));
 }
 
 
@@ -301,23 +871,47 @@
                                                    Label* on_end_of_input,
                                                    bool check_bounds,
                                                    int characters) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(cp_offset >= -1);      // ^ and \b can look behind one character.
+  ASSERT(cp_offset < (1<<30));  // Be sane! (And ensure negation works).
+  if (check_bounds) {
+    CheckPosition(cp_offset + characters - 1, on_end_of_input);
+  }
+  LoadCurrentCharacterUnchecked(cp_offset, characters);
 }
 
 
 void RegExpMacroAssemblerMIPS::PopCurrentPosition() {
-  UNIMPLEMENTED_MIPS();
+  Pop(current_input_offset());
 }
 
 
 void RegExpMacroAssemblerMIPS::PopRegister(int register_index) {
-  UNIMPLEMENTED_MIPS();
+  Pop(a0);
+  __ sw(a0, register_location(register_index));
 }
 
 
-
 void RegExpMacroAssemblerMIPS::PushBacktrack(Label* label) {
-  UNIMPLEMENTED_MIPS();
+  if (label->is_bound()) {
+    int target = label->pos();
+    __ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
+  } else {
+    Label after_constant;
+    __ Branch(&after_constant);
+    int offset = masm_->pc_offset();
+    int cp_offset = offset + Code::kHeaderSize - kHeapObjectTag;
+    __ emit(0);
+    masm_->label_at_put(label, offset);
+    __ bind(&after_constant);
+    if (is_int16(cp_offset)) {
+      __ lw(a0, MemOperand(code_pointer(), cp_offset));
+    } else {
+      __ Addu(a0, code_pointer(), cp_offset);
+      __ lw(a0, MemOperand(a0, 0));
+    }
+  }
+  Push(a0);
+  CheckStackLimit();
 }
 
 
@@ -328,55 +922,90 @@
 
 void RegExpMacroAssemblerMIPS::PushRegister(int register_index,
                                            StackCheckFlag check_stack_limit) {
-  UNIMPLEMENTED_MIPS();
+  __ lw(a0, register_location(register_index));
+  Push(a0);
+  if (check_stack_limit) CheckStackLimit();
 }
 
 
 void RegExpMacroAssemblerMIPS::ReadCurrentPositionFromRegister(int reg) {
-  UNIMPLEMENTED_MIPS();
+  __ lw(current_input_offset(), register_location(reg));
 }
 
 
 void RegExpMacroAssemblerMIPS::ReadStackPointerFromRegister(int reg) {
-  UNIMPLEMENTED_MIPS();
+  __ lw(backtrack_stackpointer(), register_location(reg));
+  __ lw(a0, MemOperand(frame_pointer(), kStackHighEnd));
+  __ Addu(backtrack_stackpointer(), backtrack_stackpointer(), Operand(a0));
 }
 
 
 void RegExpMacroAssemblerMIPS::SetCurrentPositionFromEnd(int by) {
-  UNIMPLEMENTED_MIPS();
+  Label after_position;
+  __ Branch(&after_position,
+            ge,
+            current_input_offset(),
+            Operand(-by * char_size()));
+  __ li(current_input_offset(), -by * char_size());
+  // On RegExp code entry (where this operation is used), the character before
+  // the current position is expected to be already loaded.
+  // We have advanced the position, so it's safe to read backwards.
+  LoadCurrentCharacterUnchecked(-1, 1);
+  __ bind(&after_position);
 }
 
 
 void RegExpMacroAssemblerMIPS::SetRegister(int register_index, int to) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(register_index >= num_saved_registers_);  // Reserved for positions!
+  __ li(a0, Operand(to));
+  __ sw(a0, register_location(register_index));
 }
 
 
 void RegExpMacroAssemblerMIPS::Succeed() {
-  UNIMPLEMENTED_MIPS();
+  __ jmp(&success_label_);
 }
 
 
 void RegExpMacroAssemblerMIPS::WriteCurrentPositionToRegister(int reg,
                                                              int cp_offset) {
-  UNIMPLEMENTED_MIPS();
+  if (cp_offset == 0) {
+    __ sw(current_input_offset(), register_location(reg));
+  } else {
+    __ Addu(a0, current_input_offset(), Operand(cp_offset * char_size()));
+    __ sw(a0, register_location(reg));
+  }
 }
 
 
 void RegExpMacroAssemblerMIPS::ClearRegisters(int reg_from, int reg_to) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(reg_from <= reg_to);
+  __ lw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+  for (int reg = reg_from; reg <= reg_to; reg++) {
+    __ sw(a0, register_location(reg));
+  }
 }
 
 
 void RegExpMacroAssemblerMIPS::WriteStackPointerToRegister(int reg) {
-  UNIMPLEMENTED_MIPS();
+  __ lw(a1, MemOperand(frame_pointer(), kStackHighEnd));
+  __ Subu(a0, backtrack_stackpointer(), a1);
+  __ sw(a0, register_location(reg));
 }
 
 
 // Private methods:
 
 void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
-  UNIMPLEMENTED_MIPS();
+  static const int num_arguments = 3;
+  __ PrepareCallCFunction(num_arguments, scratch);
+  __ mov(a2, frame_pointer());
+  // Code* of self.
+  __ li(a1, Operand(masm_->CodeObject()));
+  // a0 becomes return address pointer.
+  ExternalReference stack_guard_check =
+      ExternalReference::re_check_stack_guard_state(masm_->isolate());
+  CallCFunctionUsingStub(stack_guard_check, num_arguments);
 }
 
 
@@ -388,22 +1017,101 @@
 
 
 int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
-                                                  Code* re_code,
-                                                  Address re_frame) {
-  UNIMPLEMENTED_MIPS();
+                                                   Code* re_code,
+                                                   Address re_frame) {
+  Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
+  ASSERT(isolate == Isolate::Current());
+  if (isolate->stack_guard()->IsStackOverflow()) {
+    isolate->StackOverflow();
+    return EXCEPTION;
+  }
+
+  // If not real stack overflow the stack guard was used to interrupt
+  // execution for another purpose.
+
+  // If this is a direct call from JavaScript retry the RegExp forcing the call
+  // through the runtime system. Currently the direct call cannot handle a GC.
+  if (frame_entry<int>(re_frame, kDirectCall) == 1) {
+    return RETRY;
+  }
+
+  // Prepare for possible GC.
+  HandleScope handles;
+  Handle<Code> code_handle(re_code);
+
+  Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
+  // Current string.
+  bool is_ascii = subject->IsAsciiRepresentation();
+
+  ASSERT(re_code->instruction_start() <= *return_address);
+  ASSERT(*return_address <=
+      re_code->instruction_start() + re_code->instruction_size());
+
+  MaybeObject* result = Execution::HandleStackGuardInterrupt();
+
+  if (*code_handle != re_code) {  // Return address no longer valid.
+    int delta = *code_handle - re_code;
+    // Overwrite the return address on the stack.
+    *return_address += delta;
+  }
+
+  if (result->IsException()) {
+    return EXCEPTION;
+  }
+
+  // String might have changed.
+  if (subject->IsAsciiRepresentation() != is_ascii) {
+    // If we changed between an ASCII and an UC16 string, the specialized
+    // code cannot be used, and we need to restart regexp matching from
+    // scratch (including, potentially, compiling a new version of the code).
+    return RETRY;
+  }
+
+  // Otherwise, the content of the string might have moved. It must still
+  // be a sequential or external string with the same content.
+  // Update the start and end pointers in the stack frame to the current
+  // location (whether it has actually moved or not).
+  ASSERT(StringShape(*subject).IsSequential() ||
+      StringShape(*subject).IsExternal());
+
+  // The original start address of the characters to match.
+  const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
+
+  // Find the current start address of the same character at the current string
+  // position.
+  int start_index = frame_entry<int>(re_frame, kStartIndex);
+  const byte* new_address = StringCharacterPosition(*subject, start_index);
+
+  if (start_address != new_address) {
+    // If there is a difference, update the object pointer and start and end
+    // addresses in the RegExp stack frame to match the new value.
+    const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
+    int byte_length = end_address - start_address;
+    frame_entry<const String*>(re_frame, kInputString) = *subject;
+    frame_entry<const byte*>(re_frame, kInputStart) = new_address;
+    frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+  }
+
   return 0;
 }
 
 
 MemOperand RegExpMacroAssemblerMIPS::register_location(int register_index) {
-  UNIMPLEMENTED_MIPS();
-  return MemOperand(zero_reg, 0);
+  ASSERT(register_index < (1<<30));
+  if (num_registers_ <= register_index) {
+    num_registers_ = register_index + 1;
+  }
+  return MemOperand(frame_pointer(),
+                    kRegisterZero - register_index * kPointerSize);
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckPosition(int cp_offset,
                                             Label* on_outside_input) {
-  UNIMPLEMENTED_MIPS();
+  BranchOrBacktrack(on_outside_input,
+                    ge,
+                    current_input_offset(),
+                    Operand(-cp_offset * char_size()));
 }
 
 
@@ -411,61 +1119,126 @@
                                                  Condition condition,
                                                  Register rs,
                                                  const Operand& rt) {
-  UNIMPLEMENTED_MIPS();
+  if (condition == al) {  // Unconditional.
+    if (to == NULL) {
+      Backtrack();
+      return;
+    }
+    __ jmp(to);
+    return;
+  }
+  if (to == NULL) {
+    __ Branch(&backtrack_label_, condition, rs, rt);
+    return;
+  }
+  __ Branch(to, condition, rs, rt);
 }
 
 
 void RegExpMacroAssemblerMIPS::SafeCall(Label* to, Condition cond, Register rs,
                                            const Operand& rt) {
-  UNIMPLEMENTED_MIPS();
+  __ BranchAndLink(to, cond, rs, rt);
 }
 
 
 void RegExpMacroAssemblerMIPS::SafeReturn() {
-  UNIMPLEMENTED_MIPS();
+  __ pop(ra);
+  __ Addu(t5, ra, Operand(masm_->CodeObject()));
+  __ Jump(t5);
 }
 
 
 void RegExpMacroAssemblerMIPS::SafeCallTarget(Label* name) {
-  UNIMPLEMENTED_MIPS();
+  __ bind(name);
+  __ Subu(ra, ra, Operand(masm_->CodeObject()));
+  __ push(ra);
 }
 
 
 void RegExpMacroAssemblerMIPS::Push(Register source) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(!source.is(backtrack_stackpointer()));
+  __ Addu(backtrack_stackpointer(),
+          backtrack_stackpointer(),
+          Operand(-kPointerSize));
+  __ sw(source, MemOperand(backtrack_stackpointer()));
 }
 
 
 void RegExpMacroAssemblerMIPS::Pop(Register target) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(!target.is(backtrack_stackpointer()));
+  __ lw(target, MemOperand(backtrack_stackpointer()));
+  __ Addu(backtrack_stackpointer(), backtrack_stackpointer(), kPointerSize);
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckPreemption() {
-  UNIMPLEMENTED_MIPS();
+  // Check for preemption.
+  ExternalReference stack_limit =
+      ExternalReference::address_of_stack_limit(masm_->isolate());
+  __ li(a0, Operand(stack_limit));
+  __ lw(a0, MemOperand(a0));
+  SafeCall(&check_preempt_label_, ls, sp, Operand(a0));
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckStackLimit() {
-  UNIMPLEMENTED_MIPS();
+  ExternalReference stack_limit =
+      ExternalReference::address_of_regexp_stack_limit(masm_->isolate());
+
+  __ li(a0, Operand(stack_limit));
+  __ lw(a0, MemOperand(a0));
+  SafeCall(&stack_overflow_label_, ls, backtrack_stackpointer(), Operand(a0));
 }
 
 
 void RegExpMacroAssemblerMIPS::CallCFunctionUsingStub(
     ExternalReference function,
     int num_arguments) {
-  UNIMPLEMENTED_MIPS();
+  // Must pass all arguments in registers. The stub pushes on the stack.
+  ASSERT(num_arguments <= 4);
+  __ li(code_pointer(), Operand(function));
+  RegExpCEntryStub stub;
+  __ CallStub(&stub);
+  if (OS::ActivationFrameAlignment() != 0) {
+    __ lw(sp, MemOperand(sp, 16));
+  }
+  __ li(code_pointer(), Operand(masm_->CodeObject()));
 }
 
 
 void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
-                                                             int characters) {
-  UNIMPLEMENTED_MIPS();
+                                                            int characters) {
+  Register offset = current_input_offset();
+  if (cp_offset != 0) {
+    __ Addu(a0, current_input_offset(), Operand(cp_offset * char_size()));
+    offset = a0;
+  }
+  // We assume that we cannot do unaligned loads on MIPS, so this function
+  // must only be used to load a single character at a time.
+  ASSERT(characters == 1);
+  __ Addu(t5, end_of_input_address(), Operand(offset));
+  if (mode_ == ASCII) {
+    __ lbu(current_character(), MemOperand(t5, 0));
+  } else {
+    ASSERT(mode_ == UC16);
+    __ lhu(current_character(), MemOperand(t5, 0));
+  }
 }
 
 
 void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
-  UNIMPLEMENTED_MIPS();
+  int stack_alignment = OS::ActivationFrameAlignment();
+  if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
+  // Stack is already aligned for call, so decrement by alignment
+  // to make room for storing the return address.
+  __ Subu(sp, sp, Operand(stack_alignment));
+  __ sw(ra, MemOperand(sp, 0));
+  __ mov(a0, sp);
+  __ mov(t9, t1);
+  __ Call(t9);
+  __ lw(ra, MemOperand(sp, 0));
+  __ Addu(sp, sp, Operand(stack_alignment));
+  __ Jump(Operand(ra));
 }
 
 
diff --git a/src/mips/regexp-macro-assembler-mips.h b/src/mips/regexp-macro-assembler-mips.h
index 7310c9d..ad7ada5 100644
--- a/src/mips/regexp-macro-assembler-mips.h
+++ b/src/mips/regexp-macro-assembler-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -121,10 +121,11 @@
   static const int kStoredRegisters = kFramePointer;
   // Return address (stored from link register, read into pc on return).
   static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
+  static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
   // Stack frame header.
   static const int kStackFrameHeader = kReturnAddress + kPointerSize;
   // Stack parameters placed by caller.
-  static const int kRegisterOutput = kStackFrameHeader + 16;
+  static const int kRegisterOutput = kStackFrameHeader + 20;
   static const int kStackHighEnd = kRegisterOutput + kPointerSize;
   static const int kDirectCall = kStackHighEnd + kPointerSize;
   static const int kIsolate = kDirectCall + kPointerSize;
@@ -183,7 +184,7 @@
   // Register holding pointer to the current code object.
   inline Register code_pointer() { return t1; }
 
-  // Byte size of chars in the string to match (decided by the Mode argument)
+  // Byte size of chars in the string to match (decided by the Mode argument).
   inline int char_size() { return static_cast<int>(mode_); }
 
   // Equivalent to a conditional branch to the label, unless the label
@@ -228,7 +229,7 @@
   int num_registers_;
 
   // Number of registers to output at the end (the saved registers
-  // are always 0..num_saved_registers_-1)
+  // are always 0..num_saved_registers_-1).
   int num_saved_registers_;
 
   // Labels used internally.
@@ -239,6 +240,7 @@
   Label exit_label_;
   Label check_preempt_label_;
   Label stack_overflow_label_;
+  Label internal_failure_label_;
 };
 
 #endif  // V8_INTERPRETED_REGEXP
diff --git a/src/mips/register-allocator-mips-inl.h b/src/mips/register-allocator-mips-inl.h
deleted file mode 100644
index bbfb31d..0000000
--- a/src/mips/register-allocator-mips-inl.h
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
-#define V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
-
-#include "v8.h"
-#include "mips/assembler-mips.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-bool RegisterAllocator::IsReserved(Register reg) {
-  // The code for this test relies on the order of register codes.
-  return reg.is(cp) || reg.is(s8_fp) || reg.is(sp);
-}
-
-
-int RegisterAllocator::ToNumber(Register reg) {
-  ASSERT(reg.is_valid() && !IsReserved(reg));
-  const int kNumbers[] = {
-    0,    // zero_reg
-    1,    // at
-    2,    // v0
-    3,    // v1
-    4,    // a0
-    5,    // a1
-    6,    // a2
-    7,    // a3
-    8,    // t0
-    9,    // t1
-    10,   // t2
-    11,   // t3
-    12,   // t4
-    13,   // t5
-    14,   // t
-    15,   // t7
-    16,   // t8
-    17,   // t9
-    18,   // s0
-    19,   // s1
-    20,   // s2
-    21,   // s3
-    22,   // s4
-    23,   // s5
-    24,   // s6
-    25,   // s7
-    26,   // k0
-    27,   // k1
-    28,   // gp
-    29,   // sp
-    30,   // s8_fp
-    31,   // ra
-  };
-  return kNumbers[reg.code()];
-}
-
-
-Register RegisterAllocator::ToRegister(int num) {
-  ASSERT(num >= 0 && num < kNumRegisters);
-  const Register kRegisters[] = {
-    zero_reg,
-    at,
-    v0,
-    v1,
-    a0,
-    a1,
-    a2,
-    a3,
-    t0,
-    t1,
-    t2,
-    t3,
-    t4,
-    t5,
-    t6,
-    t7,
-    s0,
-    s1,
-    s2,
-    s3,
-    s4,
-    s5,
-    s6,
-    s7,
-    t8,
-    t9,
-    k0,
-    k1,
-    gp,
-    sp,
-    s8_fp,
-    ra
-  };
-  return kRegisters[num];
-}
-
-
-void RegisterAllocator::Initialize() {
-  Reset();
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
-
diff --git a/src/mips/register-allocator-mips.cc b/src/mips/register-allocator-mips.cc
deleted file mode 100644
index 2c5d61b..0000000
--- a/src/mips/register-allocator-mips.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Result implementation.
-
-void Result::ToRegister() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void Result::ToRegister(Register target) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
-  // No byte registers on MIPS.
-  UNREACHABLE();
-  return Result();
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/simulator-mips.cc b/src/mips/simulator-mips.cc
index 50ad7a1..68fb7ce 100644
--- a/src/mips/simulator-mips.cc
+++ b/src/mips/simulator-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -35,7 +35,7 @@
 
 #include "disasm.h"
 #include "assembler.h"
-#include "globals.h"    // Need the BitCast
+#include "globals.h"    // Need the BitCast.
 #include "mips/constants-mips.h"
 #include "mips/simulator-mips.h"
 
@@ -46,7 +46,7 @@
 namespace v8 {
 namespace internal {
 
-// Utils functions
+// Utils functions.
 bool HaveSameSign(int32_t a, int32_t b) {
   return ((a ^ b) >= 0);
 }
@@ -139,7 +139,7 @@
 }
 
 
-#else  // ndef GENERATED_CODE_COVERAGE
+#else  // GENERATED_CODE_COVERAGE
 
 #define UNSUPPORTED() printf("Unsupported instruction.\n");
 
@@ -263,15 +263,15 @@
 #define REG_INFO(n) Registers::Name(n), GetRegisterValue(n), GetRegisterValue(n)
 
   PrintF("\n");
-  // at, v0, a0
+  // at, v0, a0.
   PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
          REG_INFO(1), REG_INFO(2), REG_INFO(4));
-  // v1, a1
+  // v1, a1.
   PrintF("%26s\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
          "", REG_INFO(3), REG_INFO(5));
-  // a2
+  // a2.
   PrintF("%26s\t%26s\t%3s: 0x%08x %10d\n", "", "", REG_INFO(6));
-  // a3
+  // a3.
   PrintF("%26s\t%26s\t%3s: 0x%08x %10d\n", "", "", REG_INFO(7));
   PrintF("\n");
   // t0-t7, s0-s7
@@ -280,16 +280,16 @@
            REG_INFO(8+i), REG_INFO(16+i));
   }
   PrintF("\n");
-  // t8, k0, LO
+  // t8, k0, LO.
   PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
          REG_INFO(24), REG_INFO(26), REG_INFO(32));
-  // t9, k1, HI
+  // t9, k1, HI.
   PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
          REG_INFO(25), REG_INFO(27), REG_INFO(33));
-  // sp, fp, gp
+  // sp, fp, gp.
   PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
          REG_INFO(29), REG_INFO(30), REG_INFO(28));
-  // pc
+  // pc.
   PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
          REG_INFO(31), REG_INFO(34));
 
@@ -307,7 +307,7 @@
   PrintAllRegs();
 
   PrintF("\n\n");
-  // f0, f1, f2, ... f31
+  // f0, f1, f2, ... f31.
   PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(0) );
   PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(2) );
   PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(4) );
@@ -345,7 +345,7 @@
   char arg2[ARG_SIZE + 1];
   char* argv[3] = { cmd, arg1, arg2 };
 
-  // make sure to have a proper terminating character if reaching the limit
+  // Make sure to have a proper terminating character if reaching the limit.
   cmd[COMMAND_SIZE] = 0;
   arg1[ARG_SIZE] = 0;
   arg2[ARG_SIZE] = 0;
@@ -358,10 +358,10 @@
     if (last_pc != sim_->get_pc()) {
       disasm::NameConverter converter;
       disasm::Disassembler dasm(converter);
-      // use a reasonably large buffer
+      // Use a reasonably large buffer.
       v8::internal::EmbeddedVector<char, 256> buffer;
       dasm.InstructionDecode(buffer,
-                             reinterpret_cast<byte_*>(sim_->get_pc()));
+                             reinterpret_cast<byte*>(sim_->get_pc()));
       PrintF("  0x%08x  %s\n", sim_->get_pc(), buffer.start());
       last_pc = sim_->get_pc();
     }
@@ -475,7 +475,7 @@
 
         if (strcmp(cmd, "stack") == 0) {
           cur = reinterpret_cast<int32_t*>(sim_->get_register(Simulator::sp));
-        } else {  // "mem"
+        } else {  // Command "mem".
           int32_t value;
           if (!GetValue(arg1, &value)) {
             PrintF("%s unrecognized\n", arg1);
@@ -496,35 +496,62 @@
         end = cur + words;
 
         while (cur < end) {
-          PrintF("  0x%08x:  0x%08x %10d\n",
+          PrintF("  0x%08x:  0x%08x %10d",
                  reinterpret_cast<intptr_t>(cur), *cur, *cur);
+          HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
+          int value = *cur;
+          Heap* current_heap = v8::internal::Isolate::Current()->heap();
+          if (current_heap->Contains(obj) || ((value & 1) == 0)) {
+            PrintF(" (");
+            if ((value & 1) == 0) {
+              PrintF("smi %d", value / 2);
+            } else {
+              obj->ShortPrint();
+            }
+            PrintF(")");
+          }
+          PrintF("\n");
           cur++;
         }
 
-      } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0)) {
+      } else if ((strcmp(cmd, "disasm") == 0) ||
+                 (strcmp(cmd, "dpc") == 0) ||
+                 (strcmp(cmd, "di") == 0)) {
         disasm::NameConverter converter;
         disasm::Disassembler dasm(converter);
-        // use a reasonably large buffer
+        // Use a reasonably large buffer.
         v8::internal::EmbeddedVector<char, 256> buffer;
 
-        byte_* cur = NULL;
-        byte_* end = NULL;
+        byte* cur = NULL;
+        byte* end = NULL;
 
         if (argc == 1) {
-          cur = reinterpret_cast<byte_*>(sim_->get_pc());
+          cur = reinterpret_cast<byte*>(sim_->get_pc());
           end = cur + (10 * Instruction::kInstrSize);
         } else if (argc == 2) {
-          int32_t value;
-          if (GetValue(arg1, &value)) {
-            cur = reinterpret_cast<byte_*>(value);
-            // no length parameter passed, assume 10 instructions
-            end = cur + (10 * Instruction::kInstrSize);
+          int regnum = Registers::Number(arg1);
+          if (regnum != kInvalidRegister || strncmp(arg1, "0x", 2) == 0) {
+            // The argument is an address or a register name.
+            int32_t value;
+            if (GetValue(arg1, &value)) {
+              cur = reinterpret_cast<byte*>(value);
+              // Disassemble 10 instructions at <arg1>.
+              end = cur + (10 * Instruction::kInstrSize);
+            }
+          } else {
+            // The argument is the number of instructions.
+            int32_t value;
+            if (GetValue(arg1, &value)) {
+              cur = reinterpret_cast<byte*>(sim_->get_pc());
+              // Disassemble <arg1> instructions.
+              end = cur + (value * Instruction::kInstrSize);
+            }
           }
         } else {
           int32_t value1;
           int32_t value2;
           if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
-            cur = reinterpret_cast<byte_*>(value1);
+            cur = reinterpret_cast<byte*>(value1);
             end = cur + (value2 * Instruction::kInstrSize);
           }
         }
@@ -561,25 +588,25 @@
       } else if (strcmp(cmd, "unstop") == 0) {
           PrintF("Unstop command not implemented on MIPS.");
       } else if ((strcmp(cmd, "stat") == 0) || (strcmp(cmd, "st") == 0)) {
-        // Print registers and disassemble
+        // Print registers and disassemble.
         PrintAllRegs();
         PrintF("\n");
 
         disasm::NameConverter converter;
         disasm::Disassembler dasm(converter);
-        // use a reasonably large buffer
+        // Use a reasonably large buffer.
         v8::internal::EmbeddedVector<char, 256> buffer;
 
-        byte_* cur = NULL;
-        byte_* end = NULL;
+        byte* cur = NULL;
+        byte* end = NULL;
 
         if (argc == 1) {
-          cur = reinterpret_cast<byte_*>(sim_->get_pc());
+          cur = reinterpret_cast<byte*>(sim_->get_pc());
           end = cur + (10 * Instruction::kInstrSize);
         } else if (argc == 2) {
           int32_t value;
           if (GetValue(arg1, &value)) {
-            cur = reinterpret_cast<byte_*>(value);
+            cur = reinterpret_cast<byte*>(value);
             // no length parameter passed, assume 10 instructions
             end = cur + (10 * Instruction::kInstrSize);
           }
@@ -587,7 +614,7 @@
           int32_t value1;
           int32_t value2;
           if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
-            cur = reinterpret_cast<byte_*>(value1);
+            cur = reinterpret_cast<byte*>(value1);
             end = cur + (value2 * Instruction::kInstrSize);
           }
         }
@@ -615,8 +642,10 @@
         PrintF("flags\n");
         PrintF("  print flags\n");
         PrintF("disasm [<instructions>]\n");
-        PrintF("disasm [[<address>] <instructions>]\n");
-        PrintF("  disassemble code, default is 10 instructions from pc\n");
+        PrintF("disasm [<address/register>]\n");
+        PrintF("disasm [[<address/register>] <instructions>]\n");
+        PrintF("  disassemble code, default is 10 instructions\n");
+        PrintF("  from pc (alias 'di')\n");
         PrintF("gdb\n");
         PrintF("  enter gdb\n");
         PrintF("break <address>\n");
@@ -689,8 +718,8 @@
 
 CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
   v8::internal::HashMap::Entry* entry = i_cache->Lookup(page,
-                                                         ICacheHash(page),
-                                                         true);
+                                                        ICacheHash(page),
+                                                        true);
   if (entry->value == NULL) {
     CachePage* new_page = new CachePage();
     entry->value = new_page;
@@ -738,23 +767,23 @@
 }
 
 
-void Simulator::Initialize() {
-  if (Isolate::Current()->simulator_initialized()) return;
-  Isolate::Current()->set_simulator_initialized(true);
-  ::v8::internal::ExternalReference::set_redirector(&RedirectExternalReference);
+void Simulator::Initialize(Isolate* isolate) {
+  if (isolate->simulator_initialized()) return;
+  isolate->set_simulator_initialized(true);
+  ::v8::internal::ExternalReference::set_redirector(isolate,
+                                                    &RedirectExternalReference);
 }
 
 
-Simulator::Simulator() : isolate_(Isolate::Current()) {
+Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
   i_cache_ = isolate_->simulator_i_cache();
   if (i_cache_ == NULL) {
     i_cache_ = new v8::internal::HashMap(&ICacheMatch);
     isolate_->set_simulator_i_cache(i_cache_);
   }
-  Initialize();
+  Initialize(isolate);
   // Setup simulator support first. Some of this information is needed to
   // setup the architecture state.
-  stack_size_ = 1 * 1024*1024;  // allocate 1MB for stack
   stack_ = reinterpret_cast<char*>(malloc(stack_size_));
   pc_modified_ = false;
   icount_ = 0;
@@ -852,17 +881,14 @@
 // Get the active Simulator for the current thread.
 Simulator* Simulator::current(Isolate* isolate) {
   v8::internal::Isolate::PerIsolateThreadData* isolate_data =
-      Isolate::CurrentPerIsolateThreadData();
-  if (isolate_data == NULL) {
-    Isolate::EnterDefaultIsolate();
-    isolate_data = Isolate::CurrentPerIsolateThreadData();
-  }
+       isolate->FindOrAllocatePerThreadDataForThisThread();
+  ASSERT(isolate_data != NULL);
   ASSERT(isolate_data != NULL);
 
   Simulator* sim = isolate_data->simulator();
   if (sim == NULL) {
     // TODO(146): delete the simulator object when a thread/isolate goes away.
-    sim = new Simulator();
+    sim = new Simulator(isolate);
     isolate_data->set_simulator(sim);
   }
   return sim;
@@ -877,7 +903,7 @@
     pc_modified_ = true;
   }
 
-  // zero register always hold 0.
+  // Zero register always holds 0.
   registers_[reg] = (reg == 0) ? 0 : value;
 }
 
@@ -937,6 +963,87 @@
 }
 
 
+// For use in calls that take two double values, constructed either
+// from a0-a3 or f12 and f14.
+void Simulator::GetFpArgs(double* x, double* y) {
+  if (!IsMipsSoftFloatABI) {
+    *x = get_fpu_register_double(12);
+    *y = get_fpu_register_double(14);
+  } else {
+    // We use a char buffer to get around the strict-aliasing rules which
+    // otherwise allow the compiler to optimize away the copy.
+    char buffer[sizeof(*x)];
+    int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
+
+    // Registers a0 and a1 -> x.
+    reg_buffer[0] = get_register(a0);
+    reg_buffer[1] = get_register(a1);
+    memcpy(x, buffer, sizeof(buffer));
+
+    // Registers a2 and a3 -> y.
+    reg_buffer[0] = get_register(a2);
+    reg_buffer[1] = get_register(a3);
+    memcpy(y, buffer, sizeof(buffer));
+  }
+}
+
+
+// For use in calls that take one double value, constructed either
+// from a0 and a1 or f12.
+void Simulator::GetFpArgs(double* x) {
+  if (!IsMipsSoftFloatABI) {
+    *x = get_fpu_register_double(12);
+  } else {
+    // We use a char buffer to get around the strict-aliasing rules which
+    // otherwise allow the compiler to optimize away the copy.
+    char buffer[sizeof(*x)];
+    int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
+    // Registers a0 and a1 -> x.
+    reg_buffer[0] = get_register(a0);
+    reg_buffer[1] = get_register(a1);
+    memcpy(x, buffer, sizeof(buffer));
+  }
+}
+
+
+// For use in calls that take one double value constructed either
+// from a0 and a1 or f12 and one integer value.
+void Simulator::GetFpArgs(double* x, int32_t* y) {
+  if (!IsMipsSoftFloatABI) {
+    *x = get_fpu_register_double(12);
+    *y = get_register(a2);
+  } else {
+    // We use a char buffer to get around the strict-aliasing rules which
+    // otherwise allow the compiler to optimize away the copy.
+    char buffer[sizeof(*x)];
+    int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
+    // Registers 0 and 1 -> x.
+    reg_buffer[0] = get_register(a0);
+    reg_buffer[1] = get_register(a1);
+    memcpy(x, buffer, sizeof(buffer));
+
+    // Register 2 -> y.
+    reg_buffer[0] = get_register(a2);
+    memcpy(y, buffer, sizeof(*y));
+  }
+}
+
+
+// The return value is either in v0/v1 or f0.
+void Simulator::SetFpResult(const double& result) {
+  if (!IsMipsSoftFloatABI) {
+    set_fpu_register_double(0, result);
+  } else {
+    char buffer[2 * sizeof(registers_[0])];
+    int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
+    memcpy(buffer, &result, sizeof(buffer));
+    // Copy result to v0 and v1.
+    set_register(v0, reg_buffer[0]);
+    set_register(v1, reg_buffer[1]);
+  }
+}
+
+
 // Helper functions for setting and testing the FCSR register's bits.
 void Simulator::set_fcsr_bit(uint32_t cc, bool value) {
   if (value) {
@@ -995,7 +1102,7 @@
 
 int Simulator::ReadW(int32_t addr, Instruction* instr) {
   if (addr >=0 && addr < 0x400) {
-    // this has to be a NULL-dereference
+    // This has to be a NULL-dereference, drop into debugger.
     MipsDebugger dbg(this);
     dbg.Debug();
   }
@@ -1003,8 +1110,9 @@
     intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
     return *ptr;
   }
-  PrintF("Unaligned read at 0x%08x, pc=%p\n", addr,
-      reinterpret_cast<void*>(instr));
+  PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
   MipsDebugger dbg(this);
   dbg.Debug();
   return 0;
@@ -1013,7 +1121,7 @@
 
 void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
   if (addr >= 0 && addr < 0x400) {
-    // this has to be a NULL-dereference
+    // This has to be a NULL-dereference, drop into debugger.
     MipsDebugger dbg(this);
     dbg.Debug();
   }
@@ -1022,8 +1130,9 @@
     *ptr = value;
     return;
   }
-  PrintF("Unaligned write at 0x%08x, pc=%p\n", addr,
-      reinterpret_cast<void*>(instr));
+  PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
   MipsDebugger dbg(this);
   dbg.Debug();
 }
@@ -1034,8 +1143,9 @@
     double* ptr = reinterpret_cast<double*>(addr);
     return *ptr;
   }
-  PrintF("Unaligned (double) read at 0x%08x, pc=%p\n", addr,
-      reinterpret_cast<void*>(instr));
+  PrintF("Unaligned (double) read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
   OS::Abort();
   return 0;
 }
@@ -1047,8 +1157,9 @@
     *ptr = value;
     return;
   }
-  PrintF("Unaligned (double) write at 0x%08x, pc=%p\n", addr,
-      reinterpret_cast<void*>(instr));
+  PrintF("Unaligned (double) write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
   OS::Abort();
 }
 
@@ -1058,8 +1169,9 @@
     uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
     return *ptr;
   }
-  PrintF("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr,
-      reinterpret_cast<void*>(instr));
+  PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
   OS::Abort();
   return 0;
 }
@@ -1070,8 +1182,9 @@
     int16_t* ptr = reinterpret_cast<int16_t*>(addr);
     return *ptr;
   }
-  PrintF("Unaligned signed halfword read at 0x%08x, pc=%p\n", addr,
-      reinterpret_cast<void*>(instr));
+  PrintF("Unaligned signed halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
   OS::Abort();
   return 0;
 }
@@ -1083,8 +1196,9 @@
     *ptr = value;
     return;
   }
-  PrintF("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr,
-      reinterpret_cast<void*>(instr));
+  PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
   OS::Abort();
 }
 
@@ -1095,8 +1209,9 @@
     *ptr = value;
     return;
   }
-  PrintF("Unaligned halfword write at 0x%08x, pc=%p\n", addr,
-      reinterpret_cast<void*>(instr));
+  PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
   OS::Abort();
 }
 
@@ -1158,6 +1273,14 @@
                                          int32_t arg2,
                                          int32_t arg3);
 
+// This signature supports direct call in to API function native callback
+// (refer to InvocationCallback in v8.h).
+typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
+
+// This signature supports direct call to accessor getter callback.
+typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectGetterCall)(int32_t arg0,
+                                                                  int32_t arg1);
+
 // Software interrupt instructions are used by the simulator to call into the
 // C-based V8 runtime. They are also used for debugging with simulator.
 void Simulator::SoftwareInterrupt(Instruction* instr) {
@@ -1169,11 +1292,6 @@
 
   // We first check if we met a call_rt_redirected.
   if (instr->InstructionBits() == rtCallRedirInstr) {
-    // Check if stack is aligned. Error if not aligned is reported below to
-    // include information on the function called.
-    bool stack_aligned =
-        (get_register(sp)
-         & (::v8::internal::FLAG_sim_stack_alignment - 1)) == 0;
     Redirection* redirection = Redirection::FromSwiInstruction(instr);
     int32_t arg0 = get_register(a0);
     int32_t arg1 = get_register(a1);
@@ -1188,58 +1306,122 @@
     // stack check here.
     int32_t* stack_pointer = reinterpret_cast<int32_t*>(get_register(sp));
     int32_t* stack = reinterpret_cast<int32_t*>(stack_);
-    if (stack_pointer >= stack && stack_pointer < stack + stack_size_) {
-      arg4 = stack_pointer[0];
-      arg5 = stack_pointer[1];
+    if (stack_pointer >= stack && stack_pointer < stack + stack_size_ - 5) {
+      // Args 4 and 5 are on the stack after the reserved space for args 0..3.
+      arg4 = stack_pointer[4];
+      arg5 = stack_pointer[5];
     }
+
+    bool fp_call =
+         (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
+         (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
+         (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
+         (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
+
+    if (!IsMipsSoftFloatABI) {
+      // With the hard floating point calling convention, double
+      // arguments are passed in FPU registers. Fetch the arguments
+      // from there and call the builtin using soft floating point
+      // convention.
+      switch (redirection->type()) {
+      case ExternalReference::BUILTIN_FP_FP_CALL:
+      case ExternalReference::BUILTIN_COMPARE_CALL:
+        arg0 = get_fpu_register(f12);
+        arg1 = get_fpu_register(f13);
+        arg2 = get_fpu_register(f14);
+        arg3 = get_fpu_register(f15);
+        break;
+      case ExternalReference::BUILTIN_FP_CALL:
+        arg0 = get_fpu_register(f12);
+        arg1 = get_fpu_register(f13);
+        break;
+      case ExternalReference::BUILTIN_FP_INT_CALL:
+        arg0 = get_fpu_register(f12);
+        arg1 = get_fpu_register(f13);
+        arg2 = get_register(a2);
+        break;
+      default:
+        break;
+      }
+    }
+
     // This is dodgy but it works because the C entry stubs are never moved.
     // See comment in codegen-arm.cc and bug 1242173.
     int32_t saved_ra = get_register(ra);
 
     intptr_t external =
-        reinterpret_cast<int32_t>(redirection->external_function());
+          reinterpret_cast<intptr_t>(redirection->external_function());
 
     // Based on CpuFeatures::IsSupported(FPU), Mips will use either hardware
     // FPU, or gcc soft-float routines. Hardware FPU is simulated in this
     // simulator. Soft-float has additional abstraction of ExternalReference,
-    // to support serialization. Finally, when simulated on x86 host, the
-    // x86 softfloat routines are used, and this Redirection infrastructure
-    // lets simulated-mips make calls into x86 C code.
-    // When doing that, the 'double' return type must be handled differently
-    // than the usual int64_t return. The data is returned in different
-    // registers and cannot be cast from one type to the other. However, the
-    // calling arguments are passed the same way in both cases.
-    if (redirection->type() == ExternalReference::FP_RETURN_CALL) {
+    // to support serialization.
+    if (fp_call) {
       SimulatorRuntimeFPCall target =
-          reinterpret_cast<SimulatorRuntimeFPCall>(external);
-      if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
-        PrintF("Call to host function at %p with args %08x:%08x %08x:%08x",
-               FUNCTION_ADDR(target), arg0, arg1, arg2, arg3);
-        if (!stack_aligned) {
-          PrintF(" with unaligned stack %08x\n", get_register(sp));
+                  reinterpret_cast<SimulatorRuntimeFPCall>(external);
+      if (::v8::internal::FLAG_trace_sim) {
+        double dval0, dval1;
+        int32_t ival;
+        switch (redirection->type()) {
+          case ExternalReference::BUILTIN_FP_FP_CALL:
+          case ExternalReference::BUILTIN_COMPARE_CALL:
+            GetFpArgs(&dval0, &dval1);
+            PrintF("Call to host function at %p with args %f, %f",
+                FUNCTION_ADDR(target), dval0, dval1);
+            break;
+          case ExternalReference::BUILTIN_FP_CALL:
+            GetFpArgs(&dval0);
+            PrintF("Call to host function at %p with arg %f",
+                FUNCTION_ADDR(target), dval1);
+            break;
+          case ExternalReference::BUILTIN_FP_INT_CALL:
+            GetFpArgs(&dval0, &ival);
+            PrintF("Call to host function at %p with args %f, %d",
+                FUNCTION_ADDR(target), dval0, ival);
+            break;
+          default:
+            UNREACHABLE();
+            break;
         }
-        PrintF("\n");
       }
       double result = target(arg0, arg1, arg2, arg3);
-      // fp result -> registers v0 and v1.
-      int32_t gpreg_pair[2];
-      memcpy(&gpreg_pair[0], &result, 2 * sizeof(int32_t));
-      set_register(v0, gpreg_pair[0]);
-      set_register(v1, gpreg_pair[1]);
+      if (redirection->type() != ExternalReference::BUILTIN_COMPARE_CALL) {
+          SetFpResult(result);
+      } else {
+        int32_t gpreg_pair[2];
+        memcpy(&gpreg_pair[0], &result, 2 * sizeof(int32_t));
+        set_register(v0, gpreg_pair[0]);
+        set_register(v1, gpreg_pair[1]);
+      }
     } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
-      PrintF("Mips does not yet support ExternalReference::DIRECT_API_CALL\n");
-      ASSERT(redirection->type() != ExternalReference::DIRECT_API_CALL);
+      // See DirectCEntryStub::GenerateCall for explanation of register usage.
+      SimulatorRuntimeDirectApiCall target =
+                  reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+      if (::v8::internal::FLAG_trace_sim) {
+        PrintF("Call to host function at %p args %08x\n",
+               FUNCTION_ADDR(target), arg1);
+      }
+      v8::Handle<v8::Value> result = target(arg1);
+      *(reinterpret_cast<int*>(arg0)) = (int32_t) *result;
+      set_register(v0, arg0);
     } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
-      PrintF("Mips does not support ExternalReference::DIRECT_GETTER_CALL\n");
-      ASSERT(redirection->type() != ExternalReference::DIRECT_GETTER_CALL);
+      // See DirectCEntryStub::GenerateCall for explanation of register usage.
+      SimulatorRuntimeDirectGetterCall target =
+                  reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+      if (::v8::internal::FLAG_trace_sim) {
+        PrintF("Call to host function at %p args %08x %08x\n",
+               FUNCTION_ADDR(target), arg1, arg2);
+      }
+      v8::Handle<v8::Value> result = target(arg1, arg2);
+      *(reinterpret_cast<int*>(arg0)) = (int32_t) *result;
+      set_register(v0, arg0);
     } else {
-      // Builtin call.
-      ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
       SimulatorRuntimeCall target =
-          reinterpret_cast<SimulatorRuntimeCall>(external);
-      if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+                  reinterpret_cast<SimulatorRuntimeCall>(external);
+      if (::v8::internal::FLAG_trace_sim) {
         PrintF(
-            "Call to host function at %p: %08x, %08x, %08x, %08x, %08x, %08x",
+            "Call to host function at %p "
+            "args %08x, %08x, %08x, %08x, %08x, %08x\n",
             FUNCTION_ADDR(target),
             arg0,
             arg1,
@@ -1247,12 +1429,7 @@
             arg3,
             arg4,
             arg5);
-        if (!stack_aligned) {
-          PrintF(" with unaligned stack %08x\n", get_register(sp));
-        }
-        PrintF("\n");
       }
-
       int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
       set_register(v0, static_cast<int32_t>(result));
       set_register(v1, static_cast<int32_t>(result >> 32));
@@ -1263,8 +1440,8 @@
     set_register(ra, saved_ra);
     set_pc(get_register(ra));
 
-  } else if (func == BREAK && code >= 0 && code < 16) {
-    // First 16 break_ codes interpreted as debug markers.
+  } else if (func == BREAK && code >= 0 && code < 32) {
+    // First 32 break_ codes interpreted as debug-markers/watchpoints.
     MipsDebugger dbg(this);
     ++break_count_;
     PrintF("\n---- break %d marker: %3d  (instr count: %8d) ----------"
@@ -1314,9 +1491,9 @@
   const int32_t  fs_reg = instr->FsValue();
 
 
-  // ---------- Configuration
+  // ---------- Configuration.
   switch (op) {
-    case COP1:    // Coprocessor instructions
+    case COP1:    // Coprocessor instructions.
       switch (instr->RsFieldRaw()) {
         case BC1:   // Handled in DecodeTypeImmed, should never come here.
           UNREACHABLE();
@@ -1365,7 +1542,7 @@
           } else {
             // Logical right-rotate of a word by a fixed number of bits. This
             // is special case of SRL instruction, added in MIPS32 Release 2.
-            // RS field is equal to 00001
+            // RS field is equal to 00001.
             alu_out = (rt_u >> sa) | (rt_u << (32 - sa));
           }
           break;
@@ -1383,7 +1560,7 @@
           } else {
             // Logical right-rotate of a word by a variable number of bits.
             // This is special case od SRLV instruction, added in MIPS32
-            // Release 2. SA field is equal to 00001
+            // Release 2. SA field is equal to 00001.
             alu_out = (rt_u >> rs_u) | (rt_u << (32 - rs_u));
           }
           break;
@@ -1402,10 +1579,6 @@
         case MULTU:
           u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
           break;
-        case DIV:
-        case DIVU:
-            exceptions[kDivideByZero] = rt == 0;
-          break;
         case ADD:
           if (HaveSameSign(rs, rt)) {
             if (rs > 0) {
@@ -1450,7 +1623,7 @@
         case SLTU:
           alu_out = rs_u < rt_u ? 1 : 0;
           break;
-        // Break and trap instructions
+        // Break and trap instructions.
         case BREAK:
 
           do_interrupt = true;
@@ -1478,6 +1651,10 @@
         case MOVCI:
           // No action taken on decode.
           break;
+        case DIV:
+        case DIVU:
+          // div and divu never raise exceptions.
+          break;
         default:
           UNREACHABLE();
       };
@@ -1497,7 +1674,7 @@
     case SPECIAL3:
       switch (instr->FunctionFieldRaw()) {
         case INS: {   // Mips32r2 instruction.
-          // Interpret Rd field as 5-bit msb of insert.
+          // Interpret rd field as 5-bit msb of insert.
           uint16_t msb = rd_reg;
           // Interpret sa field as 5-bit lsb of insert.
           uint16_t lsb = sa;
@@ -1507,7 +1684,7 @@
           break;
         }
         case EXT: {   // Mips32r2 instruction.
-          // Interpret Rd field as 5-bit msb of extract.
+          // Interpret rd field as 5-bit msb of extract.
           uint16_t msb = rd_reg;
           // Interpret sa field as 5-bit lsb of extract.
           uint16_t lsb = sa;
@@ -1543,7 +1720,7 @@
   int64_t  i64hilo = 0;
   uint64_t u64hilo = 0;
 
-  // ALU output
+  // ALU output.
   // It should not be used as is. Instructions using it should always
   // initialize it first.
   int32_t alu_out = 0x12345678;
@@ -1551,7 +1728,7 @@
   // For break and trap instructions.
   bool do_interrupt = false;
 
-  // For jr and jalr
+  // For jr and jalr.
   // Get current pc.
   int32_t current_pc = get_pc();
   // Next pc
@@ -1568,11 +1745,11 @@
   // ---------- Raise exceptions triggered.
   SignalExceptions();
 
-  // ---------- Execution
+  // ---------- Execution.
   switch (op) {
     case COP1:
       switch (instr->RsFieldRaw()) {
-        case BC1:   // branch on coprocessor condition
+        case BC1:   // Branch on coprocessor condition.
           UNREACHABLE();
           break;
         case CFC1:
@@ -1802,7 +1979,7 @@
           Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
               current_pc+Instruction::kInstrSize);
           BranchDelayInstructionDecode(branch_delay_instr);
-          set_register(31, current_pc + 2* Instruction::kInstrSize);
+          set_register(31, current_pc + 2 * Instruction::kInstrSize);
           set_pc(next_pc);
           pc_modified_ = true;
           break;
@@ -1817,13 +1994,19 @@
           set_register(HI, static_cast<int32_t>(u64hilo >> 32));
           break;
         case DIV:
-          // Divide by zero was checked in the configuration step.
-          set_register(LO, rs / rt);
-          set_register(HI, rs % rt);
+          // Divide by zero was not checked in the configuration step - div and
+          // divu do not raise exceptions. On division by 0, the result will
+          // be UNPREDICTABLE.
+          if (rt != 0) {
+            set_register(LO, rs / rt);
+            set_register(HI, rs % rt);
+          }
           break;
         case DIVU:
-          set_register(LO, rs_u / rt_u);
-          set_register(HI, rs_u % rt_u);
+          if (rt_u != 0) {
+            set_register(LO, rs_u / rt_u);
+            set_register(HI, rs_u % rt_u);
+          }
           break;
         // Break and trap instructions.
         case BREAK:
@@ -1842,9 +2025,9 @@
           if (rt) set_register(rd_reg, rs);
           break;
         case MOVCI: {
-          uint32_t cc = instr->FCccValue();
+          uint32_t cc = instr->FBccValue();
           uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
-          if (instr->Bit(16)) {  // Read Tf bit
+          if (instr->Bit(16)) {  // Read Tf bit.
             if (test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
           } else {
             if (!test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
@@ -1893,17 +2076,17 @@
 }
 
 
-// Type 2: instructions using a 16 bytes immediate. (eg: addi, beq)
+// Type 2: instructions using a 16 bytes immediate. (eg: addi, beq).
 void Simulator::DecodeTypeImmediate(Instruction* instr) {
   // Instruction fields.
   Opcode   op     = instr->OpcodeFieldRaw();
   int32_t  rs     = get_register(instr->RsValue());
   uint32_t rs_u   = static_cast<uint32_t>(rs);
-  int32_t  rt_reg = instr->RtValue();  // destination register
+  int32_t  rt_reg = instr->RtValue();  // Destination register.
   int32_t  rt     = get_register(rt_reg);
   int16_t  imm16  = instr->Imm16Value();
 
-  int32_t  ft_reg = instr->FtValue();  // destination register
+  int32_t  ft_reg = instr->FtValue();  // Destination register.
 
   // Zero extended immediate.
   uint32_t  oe_imm16 = 0xffff & imm16;
@@ -1927,10 +2110,10 @@
 
   // Used for memory instructions.
   int32_t addr = 0x0;
-  // Value to be written in memory
+  // Value to be written in memory.
   uint32_t mem_value = 0x0;
 
-  // ---------- Configuration (and execution for REGIMM)
+  // ---------- Configuration (and execution for REGIMM).
   switch (op) {
     // ------------- COP1. Coprocessor instructions.
     case COP1:
@@ -1941,7 +2124,7 @@
           cc_value = test_fcsr_bit(fcsr_cc);
           do_branch = (instr->FBtrueValue()) ? cc_value : !cc_value;
           execute_branch_delay_instruction = true;
-          // Set next_pc
+          // Set next_pc.
           if (do_branch) {
             next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
           } else {
@@ -1952,7 +2135,7 @@
           UNREACHABLE();
       };
       break;
-    // ------------- REGIMM class
+    // ------------- REGIMM class.
     case REGIMM:
       switch (instr->RtFieldRaw()) {
         case BLTZ:
@@ -1977,7 +2160,7 @@
         case BGEZAL:
           // Branch instructions common part.
           execute_branch_delay_instruction = true;
-          // Set next_pc
+          // Set next_pc.
           if (do_branch) {
             next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
             if (instr->IsLinkingInstruction()) {
@@ -1989,8 +2172,8 @@
         default:
           break;
         };
-    break;  // case REGIMM
-    // ------------- Branch instructions
+    break;  // case REGIMM.
+    // ------------- Branch instructions.
     // When comparing to zero, the encoding of rt field is always 0, so we don't
     // need to replace rt with zero.
     case BEQ:
@@ -2005,7 +2188,7 @@
     case BGTZ:
       do_branch = rs  > 0;
       break;
-    // ------------- Arithmetic instructions
+    // ------------- Arithmetic instructions.
     case ADDI:
       if (HaveSameSign(rs, se_imm16)) {
         if (rs > 0) {
@@ -2038,7 +2221,7 @@
     case LUI:
         alu_out = (oe_imm16 << 16);
       break;
-    // ------------- Memory instructions
+    // ------------- Memory instructions.
     case LB:
       addr = rs + se_imm16;
       alu_out = ReadB(addr);
@@ -2048,7 +2231,7 @@
       alu_out = ReadH(addr, instr);
       break;
     case LWL: {
-      // al_offset is an offset of the effective address within an aligned word
+      // al_offset is offset of the effective address within an aligned word.
       uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
       uint8_t byte_shift = kPointerAlignmentMask - al_offset;
       uint32_t mask = (1 << byte_shift * 8) - 1;
@@ -2071,7 +2254,7 @@
       alu_out = ReadHU(addr, instr);
       break;
     case LWR: {
-      // al_offset is an offset of the effective address within an aligned word
+      // al_offset is offset of the effective address within an aligned word.
       uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
       uint8_t byte_shift = kPointerAlignmentMask - al_offset;
       uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0;
@@ -2126,16 +2309,16 @@
   // ---------- Raise exceptions triggered.
   SignalExceptions();
 
-  // ---------- Execution
+  // ---------- Execution.
   switch (op) {
-    // ------------- Branch instructions
+    // ------------- Branch instructions.
     case BEQ:
     case BNE:
     case BLEZ:
     case BGTZ:
       // Branch instructions common part.
       execute_branch_delay_instruction = true;
-      // Set next_pc
+      // Set next_pc.
       if (do_branch) {
         next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
         if (instr->IsLinkingInstruction()) {
@@ -2145,7 +2328,7 @@
         next_pc = current_pc + 2 * Instruction::kInstrSize;
       }
       break;
-    // ------------- Arithmetic instructions
+    // ------------- Arithmetic instructions.
     case ADDI:
     case ADDIU:
     case SLTI:
@@ -2156,7 +2339,7 @@
     case LUI:
       set_register(rt_reg, alu_out);
       break;
-    // ------------- Memory instructions
+    // ------------- Memory instructions.
     case LB:
     case LH:
     case LWL:
@@ -2216,26 +2399,26 @@
 }
 
 
-// Type 3: instructions using a 26 bytes immediate. (eg: j, jal)
+// Type 3: instructions using a 26 bytes immediate. (eg: j, jal).
 void Simulator::DecodeTypeJump(Instruction* instr) {
   // Get current pc.
   int32_t current_pc = get_pc();
   // Get unchanged bits of pc.
   int32_t pc_high_bits = current_pc & 0xf0000000;
-  // Next pc
+  // Next pc.
   int32_t next_pc = pc_high_bits | (instr->Imm26Value() << 2);
 
-  // Execute branch delay slot
+  // Execute branch delay slot.
   // We don't check for end_sim_pc. First it should not be met as the current pc
   // is valid. Secondly a jump should always execute its branch delay slot.
   Instruction* branch_delay_instr =
-    reinterpret_cast<Instruction*>(current_pc+Instruction::kInstrSize);
+      reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
   BranchDelayInstructionDecode(branch_delay_instr);
 
   // Update pc and ra if necessary.
   // Do this after the branch delay execution.
   if (instr->IsLinkingInstruction()) {
-    set_register(31, current_pc + 2* Instruction::kInstrSize);
+    set_register(31, current_pc + 2 * Instruction::kInstrSize);
   }
   set_pc(next_pc);
   pc_modified_ = true;
@@ -2251,11 +2434,11 @@
   if (::v8::internal::FLAG_trace_sim) {
     disasm::NameConverter converter;
     disasm::Disassembler dasm(converter);
-    // use a reasonably large buffer
+    // Use a reasonably large buffer.
     v8::internal::EmbeddedVector<char, 256> buffer;
-    dasm.InstructionDecode(buffer, reinterpret_cast<byte_*>(instr));
+    dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
     PrintF("  0x%08x  %s\n", reinterpret_cast<intptr_t>(instr),
-           buffer.start());
+        buffer.start());
   }
 
   switch (instr->InstructionType()) {
@@ -2310,10 +2493,10 @@
 }
 
 
-int32_t Simulator::Call(byte_* entry, int argument_count, ...) {
+int32_t Simulator::Call(byte* entry, int argument_count, ...) {
   va_list parameters;
   va_start(parameters, argument_count);
-  // Setup arguments
+  // Setup arguments.
 
   // First four arguments passed in registers.
   ASSERT(argument_count >= 4);
@@ -2338,7 +2521,7 @@
   va_end(parameters);
   set_register(sp, entry_stack);
 
-  // Prepare to execute the code at entry
+  // Prepare to execute the code at entry.
   set_register(pc, reinterpret_cast<int32_t>(entry));
   // Put down marker for end of simulation. The simulator will stop simulation
   // when the PC reaches this value. By saving the "end simulation" value into
@@ -2374,7 +2557,7 @@
   set_register(gp, callee_saved_value);
   set_register(fp, callee_saved_value);
 
-  // Start the simulation
+  // Start the simulation.
   Execute();
 
   // Check that the callee-saved registers have been preserved.
diff --git a/src/mips/simulator-mips.h b/src/mips/simulator-mips.h
index 0cd9bbe..21476dc 100644
--- a/src/mips/simulator-mips.h
+++ b/src/mips/simulator-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -50,14 +50,15 @@
   entry(p0, p1, p2, p3, p4)
 
 typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*,
-                                  void*, int*, Address, int, Isolate*);
+                                   void*, int*, Address, int, Isolate*);
+
 
 // Call the generated regexp code directly. The code at the entry address
 // should act as a function matching the type arm_regexp_matcher.
 // The fifth argument is a dummy that reserves the space used for
 // the return address added by the ExitFrame in native calls.
 #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
-  (FUNCTION_CAST<mips_regexp_matcher>(entry)(                             \
+  (FUNCTION_CAST<mips_regexp_matcher>(entry)( \
       p0, p1, p2, p3, NULL, p4, p5, p6, p7))
 
 #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
@@ -68,7 +69,8 @@
 // just use the C stack limit.
 class SimulatorStack : public v8::internal::AllStatic {
  public:
-  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+  static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
+                                            uintptr_t c_limit) {
     return c_limit;
   }
 
@@ -95,6 +97,7 @@
 // Running with a simulator.
 
 #include "hashmap.h"
+#include "assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -151,7 +154,7 @@
     sp,
     s8,
     ra,
-    // LO, HI, and pc
+    // LO, HI, and pc.
     LO,
     HI,
     pc,   // pc must be the last register.
@@ -164,13 +167,13 @@
   // Generated code will always use doubles. So we will only use even registers.
   enum FPURegister {
     f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11,
-    f12, f13, f14, f15,   // f12 and f14 are arguments FPURegisters
+    f12, f13, f14, f15,   // f12 and f14 are arguments FPURegisters.
     f16, f17, f18, f19, f20, f21, f22, f23, f24, f25,
     f26, f27, f28, f29, f30, f31,
     kNumFPURegisters
   };
 
-  Simulator();
+  explicit Simulator(Isolate* isolate);
   ~Simulator();
 
   // The currently executing Simulator instance. Potentially there can be one
@@ -182,7 +185,7 @@
   // instruction.
   void set_register(int reg, int32_t value);
   int32_t get_register(int reg) const;
-  // Same for FPURegisters
+  // Same for FPURegisters.
   void set_fpu_register(int fpureg, int32_t value);
   void set_fpu_register_float(int fpureg, float value);
   void set_fpu_register_double(int fpureg, double value);
@@ -205,7 +208,7 @@
   void Execute();
 
   // Call on program start.
-  static void Initialize();
+  static void Initialize(Isolate* isolate);
 
   // V8 generally calls into generated JS code with 5 parameters and into
   // generated RegExp code with 7 parameters. This is a convenience function,
@@ -304,7 +307,6 @@
                            int size);
   static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
 
-
   enum Exception {
     none,
     kIntegerOverflow,
@@ -321,9 +323,12 @@
   static void* RedirectExternalReference(void* external_function,
                                          ExternalReference::Type type);
 
-  // Used for real time calls that takes two double values as arguments and
-  // returns a double.
-  void SetFpResult(double result);
+  // For use in calls that take double value arguments.
+  void GetFpArgs(double* x, double* y);
+  void GetFpArgs(double* x);
+  void GetFpArgs(double* x, int32_t* y);
+  void SetFpResult(const double& result);
+
 
   // Architecture state.
   // Registers.
@@ -334,35 +339,36 @@
   uint32_t FCSR_;
 
   // Simulator support.
+  // Allocate 1MB for stack.
+  static const size_t stack_size_ = 1 * 1024*1024;
   char* stack_;
-  size_t stack_size_;
   bool pc_modified_;
   int icount_;
   int break_count_;
 
-  // Icache simulation
+  // Icache simulation.
   v8::internal::HashMap* i_cache_;
 
+  v8::internal::Isolate* isolate_;
+
   // Registered breakpoints.
   Instruction* break_pc_;
   Instr break_instr_;
-
-  v8::internal::Isolate* isolate_;
 };
 
 
 // When running with the simulator transition into simulated execution at this
 // point.
 #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
-reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
+    reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
       FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
 
 #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
-  Simulator::current(Isolate::Current())->Call( \
-      entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
+    Simulator::current(Isolate::Current())->Call( \
+        entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
 
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
-  try_catch_address == NULL ? \
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address)                              \
+  try_catch_address == NULL ?                                                  \
       NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
 
 
@@ -373,8 +379,9 @@
 // trouble down the line.
 class SimulatorStack : public v8::internal::AllStatic {
  public:
-  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
-    return Simulator::current(Isolate::Current())->StackLimit();
+  static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
+                                            uintptr_t c_limit) {
+    return Simulator::current(isolate)->StackLimit();
   }
 
   static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 1a49558..47428a8 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,7 +30,7 @@
 #if defined(V8_TARGET_ARCH_MIPS)
 
 #include "ic-inl.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "stub-cache.h"
 
 namespace v8 {
@@ -39,6 +39,124 @@
 #define __ ACCESS_MASM(masm)
 
 
+static void ProbeTable(Isolate* isolate,
+                       MacroAssembler* masm,
+                       Code::Flags flags,
+                       StubCache::Table table,
+                       Register name,
+                       Register offset,
+                       Register scratch,
+                       Register scratch2) {
+  ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+
+  uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
+  uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
+
+  // Check the relative positions of the address fields.
+  ASSERT(value_off_addr > key_off_addr);
+  ASSERT((value_off_addr - key_off_addr) % 4 == 0);
+  ASSERT((value_off_addr - key_off_addr) < (256 * 4));
+
+  Label miss;
+  Register offsets_base_addr = scratch;
+
+  // Check that the key in the entry matches the name.
+  __ li(offsets_base_addr, Operand(key_offset));
+  __ sll(scratch2, offset, 1);
+  __ addu(scratch2, offsets_base_addr, scratch2);
+  __ lw(scratch2, MemOperand(scratch2));
+  __ Branch(&miss, ne, name, Operand(scratch2));
+
+  // Get the code entry from the cache.
+  __ Addu(offsets_base_addr, offsets_base_addr,
+         Operand(value_off_addr - key_off_addr));
+  __ sll(scratch2, offset, 1);
+  __ addu(scratch2, offsets_base_addr, scratch2);
+  __ lw(scratch2, MemOperand(scratch2));
+
+  // Check that the flags match what we're looking for.
+  __ lw(scratch2, FieldMemOperand(scratch2, Code::kFlagsOffset));
+  __ And(scratch2, scratch2, Operand(~Code::kFlagsNotUsedInLookup));
+  __ Branch(&miss, ne, scratch2, Operand(flags));
+
+  // Re-load code entry from cache.
+  __ sll(offset, offset, 1);
+  __ addu(offset, offset, offsets_base_addr);
+  __ lw(offset, MemOperand(offset));
+
+  // Jump to the first instruction in the code stub.
+  __ Addu(offset, offset, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(offset);
+
+  // Miss: fall through.
+  __ bind(&miss);
+}
+
+
+// Helper function used to check that the dictionary doesn't contain
+// the property. This function may return false negatives, so miss_label
+// must always call a backup property check that is complete.
+// This function is safe to call if the receiver has fast properties.
+// Name must be a symbol and receiver must be a heap object.
+MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
+    MacroAssembler* masm,
+    Label* miss_label,
+    Register receiver,
+    String* name,
+    Register scratch0,
+    Register scratch1) {
+  ASSERT(name->IsSymbol());
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
+  __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+  Label done;
+
+  const int kInterceptorOrAccessCheckNeededMask =
+      (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+  // Bail out if the receiver has a named interceptor or requires access checks.
+  Register map = scratch1;
+  __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
+  __ And(at, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
+  __ Branch(miss_label, ne, at, Operand(zero_reg));
+
+
+  // Check that receiver is a JSObject.
+  __ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  __ Branch(miss_label, lt, scratch0, Operand(FIRST_JS_OBJECT_TYPE));
+
+  // Load properties array.
+  Register properties = scratch0;
+  __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  // Check that the properties array is a dictionary.
+  __ lw(map, FieldMemOperand(properties, HeapObject::kMapOffset));
+  Register tmp = properties;
+  __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
+  __ Branch(miss_label, ne, map, Operand(tmp));
+
+  // Restore the temporarily used register.
+  __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+  MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
+      masm,
+      miss_label,
+      &done,
+      receiver,
+      properties,
+      name,
+      scratch1);
+  if (result->IsFailure()) return result;
+
+  __ bind(&done);
+  __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+  return result;
+}
+
+
 void StubCache::GenerateProbe(MacroAssembler* masm,
                               Code::Flags flags,
                               Register receiver,
@@ -46,20 +164,96 @@
                               Register scratch,
                               Register extra,
                               Register extra2) {
-  UNIMPLEMENTED_MIPS();
+  Isolate* isolate = masm->isolate();
+  Label miss;
+
+  // Make sure that code is valid. The shifting code relies on the
+  // entry size being 8.
+  ASSERT(sizeof(Entry) == 8);
+
+  // Make sure the flags does not name a specific type.
+  ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+  // Make sure that there are no register conflicts.
+  ASSERT(!scratch.is(receiver));
+  ASSERT(!scratch.is(name));
+  ASSERT(!extra.is(receiver));
+  ASSERT(!extra.is(name));
+  ASSERT(!extra.is(scratch));
+  ASSERT(!extra2.is(receiver));
+  ASSERT(!extra2.is(name));
+  ASSERT(!extra2.is(scratch));
+  ASSERT(!extra2.is(extra));
+
+  // Check scratch, extra and extra2 registers are valid.
+  ASSERT(!scratch.is(no_reg));
+  ASSERT(!extra.is(no_reg));
+  ASSERT(!extra2.is(no_reg));
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, &miss, t0);
+
+  // Get the map of the receiver and compute the hash.
+  __ lw(scratch, FieldMemOperand(name, String::kHashFieldOffset));
+  __ lw(t8, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ Addu(scratch, scratch, Operand(t8));
+  __ Xor(scratch, scratch, Operand(flags));
+  __ And(scratch,
+         scratch,
+         Operand((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+
+  // Probe the primary table.
+  ProbeTable(isolate, masm, flags, kPrimary, name, scratch, extra, extra2);
+
+  // Primary miss: Compute hash for secondary probe.
+  __ Subu(scratch, scratch, Operand(name));
+  __ Addu(scratch, scratch, Operand(flags));
+  __ And(scratch,
+         scratch,
+         Operand((kSecondaryTableSize - 1) << kHeapObjectTagSize));
+
+  // Probe the secondary table.
+  ProbeTable(isolate, masm, flags, kSecondary, name, scratch, extra, extra2);
+
+  // Cache miss: Fall-through and let caller handle the miss by
+  // entering the runtime system.
+  __ bind(&miss);
 }
 
 
 void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
                                                        int index,
                                                        Register prototype) {
-  UNIMPLEMENTED_MIPS();
+  // Load the global or builtins object from the current context.
+  __ lw(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  // Load the global context from the global or builtins object.
+  __ lw(prototype,
+         FieldMemOperand(prototype, GlobalObject::kGlobalContextOffset));
+  // Load the function from the global context.
+  __ lw(prototype, MemOperand(prototype, Context::SlotOffset(index)));
+  // Load the initial map.  The global functions all have initial maps.
+  __ lw(prototype,
+         FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
+  // Load the prototype from the initial map.
+  __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
 }
 
 
 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
     MacroAssembler* masm, int index, Register prototype, Label* miss) {
-  UNIMPLEMENTED_MIPS();
+  Isolate* isolate = masm->isolate();
+  // Check we're still in the same context.
+  __ lw(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  ASSERT(!prototype.is(at));
+  __ li(at, isolate->global());
+  __ Branch(miss, ne, prototype, Operand(at));
+  // Get the global function with the given index.
+  JSFunction* function =
+      JSFunction::cast(isolate->global_context()->get(index));
+  // Load its initial map. The global functions all have initial maps.
+  __ li(prototype, Handle<Map>(function->initial_map()));
+  // Load the prototype from the initial map.
+  __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
 }
 
 
@@ -69,7 +263,18 @@
 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
                                             Register dst, Register src,
                                             JSObject* holder, int index) {
-  UNIMPLEMENTED_MIPS();
+  // Adjust for the number of properties stored in the holder.
+  index -= holder->map()->inobject_properties();
+  if (index < 0) {
+    // Get the property straight out of the holder.
+    int offset = holder->map()->instance_size() + (index * kPointerSize);
+    __ lw(dst, FieldMemOperand(src, offset));
+  } else {
+    // Calculate the offset into the properties array.
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
+    __ lw(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
+    __ lw(dst, FieldMemOperand(dst, offset));
+  }
 }
 
 
@@ -77,7 +282,41 @@
                                            Register receiver,
                                            Register scratch,
                                            Label* miss_label) {
-  UNIMPLEMENTED_MIPS();
+  // Check that the receiver isn't a smi.
+  __ And(scratch, receiver, Operand(kSmiTagMask));
+  __ Branch(miss_label, eq, scratch, Operand(zero_reg));
+
+  // Check that the object is a JS array.
+  __ GetObjectType(receiver, scratch, scratch);
+  __ Branch(miss_label, ne, scratch, Operand(JS_ARRAY_TYPE));
+
+  // Load length directly from the JS array.
+  __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ Ret();
+}
+
+
+// Generate code to check if an object is a string.  If the object is a
+// heap object, its map's instance type is left in the scratch1 register.
+// If this is not needed, scratch1 and scratch2 may be the same register.
+static void GenerateStringCheck(MacroAssembler* masm,
+                                Register receiver,
+                                Register scratch1,
+                                Register scratch2,
+                                Label* smi,
+                                Label* non_string_object) {
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, smi, t0);
+
+  // Check that the object is a string.
+  __ lw(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+  __ And(scratch2, scratch1, Operand(kIsNotStringMask));
+  // The cast is to resolve the overload for the argument of 0x0.
+  __ Branch(non_string_object,
+            ne,
+            scratch2,
+            Operand(static_cast<int32_t>(kStringTag)));
 }
 
 
@@ -91,7 +330,28 @@
                                             Register scratch2,
                                             Label* miss,
                                             bool support_wrappers) {
-  UNIMPLEMENTED_MIPS();
+  Label check_wrapper;
+
+  // Check if the object is a string leaving the instance type in the
+  // scratch1 register.
+  GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
+                      support_wrappers ? &check_wrapper : miss);
+
+  // Load length directly from the string.
+  __ lw(v0, FieldMemOperand(receiver, String::kLengthOffset));
+  __ Ret();
+
+  if (support_wrappers) {
+    // Check if the object is a JSValue wrapper.
+    __ bind(&check_wrapper);
+    __ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE));
+
+    // Unwrap the value and check if the wrapped value is a string.
+    __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
+    GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
+    __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
+    __ Ret();
+  }
 }
 
 
@@ -100,7 +360,9 @@
                                                  Register scratch1,
                                                  Register scratch2,
                                                  Label* miss_label) {
-  UNIMPLEMENTED_MIPS();
+  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+  __ mov(v0, scratch1);
+  __ Ret();
 }
 
 
@@ -115,15 +377,254 @@
                                       Register name_reg,
                                       Register scratch,
                                       Label* miss_label) {
-  UNIMPLEMENTED_MIPS();
+  // a0 : value.
+  Label exit;
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver_reg, miss_label, scratch);
+
+  // Check that the map of the receiver hasn't changed.
+  __ lw(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+  __ Branch(miss_label, ne, scratch, Operand(Handle<Map>(object->map())));
+
+  // Perform global security token check if needed.
+  if (object->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
+  }
+
+  // Stub never generated for non-global objects that require access
+  // checks.
+  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+  // Perform map transition for the receiver if necessary.
+  if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+    // The properties must be extended before we can store the value.
+    // We jump to a runtime call that extends the properties array.
+    __ push(receiver_reg);
+    __ li(a2, Operand(Handle<Map>(transition)));
+    __ Push(a2, a0);
+    __ TailCallExternalReference(
+           ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+                             masm->isolate()),
+           3, 1);
+    return;
+  }
+
+  if (transition != NULL) {
+    // Update the map of the object; no write barrier updating is
+    // needed because the map is never in new space.
+    __ li(t0, Operand(Handle<Map>(transition)));
+    __ sw(t0, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+  }
+
+  // Adjust for the number of properties stored in the object. Even in the
+  // face of a transition we can use the old map here because the size of the
+  // object and the number of in-object properties is not going to change.
+  index -= object->map()->inobject_properties();
+
+  if (index < 0) {
+    // Set the property straight into the object.
+    int offset = object->map()->instance_size() + (index * kPointerSize);
+    __ sw(a0, FieldMemOperand(receiver_reg, offset));
+
+    // Skip updating write barrier if storing a smi.
+    __ JumpIfSmi(a0, &exit, scratch);
+
+    // Update the write barrier for the array address.
+    // Pass the now unused name_reg as a scratch register.
+    __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
+  } else {
+    // Write to the properties array.
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
+    // Get the properties array.
+    __ lw(scratch, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+    __ sw(a0, FieldMemOperand(scratch, offset));
+
+    // Skip updating write barrier if storing a smi.
+    __ JumpIfSmi(a0, &exit);
+
+    // Update the write barrier for the array address.
+    // Ok to clobber receiver_reg and name_reg, since we return.
+    __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
+  }
+
+  // Return the value (register v0).
+  __ bind(&exit);
+  __ mov(v0, a0);
+  __ Ret();
 }
 
 
 void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
+  Code* code = NULL;
+  if (kind == Code::LOAD_IC) {
+    code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
+  } else {
+    code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
+  }
+
+  Handle<Code> ic(code);
+  __ Jump(ic, RelocInfo::CODE_TARGET);
 }
 
 
+static void GenerateCallFunction(MacroAssembler* masm,
+                                 Object* object,
+                                 const ParameterCount& arguments,
+                                 Label* miss) {
+  // ----------- S t a t e -------------
+  //  -- a0: receiver
+  //  -- a1: function to call
+  // -----------------------------------
+  // Check that the function really is a function.
+  __ JumpIfSmi(a1, miss);
+  __ GetObjectType(a1, a3, a3);
+  __ Branch(miss, ne, a3, Operand(JS_FUNCTION_TYPE));
+
+  // Patch the receiver on the stack with the global proxy if
+  // necessary.
+  if (object->IsGlobalObject()) {
+    __ lw(a3, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
+    __ sw(a3, MemOperand(sp, arguments.immediate() * kPointerSize));
+  }
+
+  // Invoke the function.
+  __ InvokeFunction(a1, arguments, JUMP_FUNCTION);
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm,
+                                     Register receiver,
+                                     Register holder,
+                                     Register name,
+                                     JSObject* holder_obj) {
+  __ push(name);
+  InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
+  ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
+  Register scratch = name;
+  __ li(scratch, Operand(Handle<Object>(interceptor)));
+  __ Push(scratch, receiver, holder);
+  __ lw(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
+  __ push(scratch);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
+                                                   Register receiver,
+                                                   Register holder,
+                                                   Register name,
+                                                   JSObject* holder_obj) {
+  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+  ExternalReference ref =
+      ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
+          masm->isolate());
+  __ li(a0, Operand(5));
+  __ li(a1, Operand(ref));
+
+  CEntryStub stub(1);
+  __ CallStub(&stub);
+}
+
+
+static const int kFastApiCallArguments = 3;
+
+
+// Reserves space for the extra arguments to FastHandleApiCall in the
+// caller's frame.
+//
+// These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
+static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
+                                       Register scratch) {
+  ASSERT(Smi::FromInt(0) == 0);
+  for (int i = 0; i < kFastApiCallArguments; i++) {
+    __ push(zero_reg);
+  }
+}
+
+
+// Undoes the effects of ReserveSpaceForFastApiCall.
+static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
+  __ Drop(kFastApiCallArguments);
+}
+
+
+static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
+                                      const CallOptimization& optimization,
+                                      int argc) {
+  // ----------- S t a t e -------------
+  //  -- sp[0]              : holder (set by CheckPrototypes)
+  //  -- sp[4]              : callee js function
+  //  -- sp[8]              : call data
+  //  -- sp[12]             : last js argument
+  //  -- ...
+  //  -- sp[(argc + 3) * 4] : first js argument
+  //  -- sp[(argc + 4) * 4] : receiver
+  // -----------------------------------
+  // Get the function and setup the context.
+  JSFunction* function = optimization.constant_function();
+  __ li(t1, Operand(Handle<JSFunction>(function)));
+  __ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset));
+
+  // Pass the additional arguments FastHandleApiCall expects.
+  Object* call_data = optimization.api_call_info()->data();
+  Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
+  if (masm->isolate()->heap()->InNewSpace(call_data)) {
+    __ li(a0, api_call_info_handle);
+    __ lw(t2, FieldMemOperand(a0, CallHandlerInfo::kDataOffset));
+  } else {
+    __ li(t2, Operand(Handle<Object>(call_data)));
+  }
+
+  // Store js function and call data.
+  __ sw(t1, MemOperand(sp, 1 * kPointerSize));
+  __ sw(t2, MemOperand(sp, 2 * kPointerSize));
+
+  // a2 points to call data as expected by Arguments
+  // (refer to layout above).
+  __ Addu(a2, sp, Operand(2 * kPointerSize));
+
+  Object* callback = optimization.api_call_info()->callback();
+  Address api_function_address = v8::ToCData<Address>(callback);
+  ApiFunction fun(api_function_address);
+
+  const int kApiStackSpace = 4;
+
+  __ EnterExitFrame(false, kApiStackSpace);
+
+  // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
+  // struct from the function (which is currently the case). This means we pass
+  // the first argument in a1 instead of a0. TryCallApiFunctionAndReturn
+  // will handle setting up a0.
+
+  // a1 = v8::Arguments&
+  // Arguments is built at sp + 1 (sp is a reserved spot for ra).
+  __ Addu(a1, sp, kPointerSize);
+
+  // v8::Arguments::implicit_args = data
+  __ sw(a2, MemOperand(a1, 0 * kPointerSize));
+  // v8::Arguments::values = last argument
+  __ Addu(t0, a2, Operand(argc * kPointerSize));
+  __ sw(t0, MemOperand(a1, 1 * kPointerSize));
+  // v8::Arguments::length_ = argc
+  __ li(t0, Operand(argc));
+  __ sw(t0, MemOperand(a1, 2 * kPointerSize));
+  // v8::Arguments::is_construct_call = 0
+  __ sw(zero_reg, MemOperand(a1, 3 * kPointerSize));
+
+  // Emitting a stub call may try to allocate (if the code is not
+  // already generated). Do not allow the assembler to perform a
+  // garbage collection but instead return the allocation failure
+  // object.
+  const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
+  ExternalReference ref =
+      ExternalReference(&fun,
+                        ExternalReference::DIRECT_API_CALL,
+                        masm->isolate());
+  return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
+}
+
 class CallInterceptorCompiler BASE_EMBEDDED {
  public:
   CallInterceptorCompiler(StubCompiler* stub_compiler,
@@ -133,32 +634,150 @@
         arguments_(arguments),
         name_(name) {}
 
-  void Compile(MacroAssembler* masm,
-               JSObject* object,
-               JSObject* holder,
-               String* name,
-               LookupResult* lookup,
-               Register receiver,
-               Register scratch1,
-               Register scratch2,
-               Register scratch3,
-               Label* miss) {
-    UNIMPLEMENTED_MIPS();
-  }
-
- private:
-  void CompileCacheable(MacroAssembler* masm,
+  MaybeObject* Compile(MacroAssembler* masm,
                        JSObject* object,
+                       JSObject* holder,
+                       String* name,
+                       LookupResult* lookup,
                        Register receiver,
                        Register scratch1,
                        Register scratch2,
                        Register scratch3,
-                       JSObject* interceptor_holder,
-                       LookupResult* lookup,
-                       String* name,
-                       const CallOptimization& optimization,
-                       Label* miss_label) {
-    UNIMPLEMENTED_MIPS();
+                       Label* miss) {
+    ASSERT(holder->HasNamedInterceptor());
+    ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+    // Check that the receiver isn't a smi.
+    __ JumpIfSmi(receiver, miss);
+
+    CallOptimization optimization(lookup);
+
+    if (optimization.is_constant_call()) {
+      return CompileCacheable(masm,
+                              object,
+                              receiver,
+                              scratch1,
+                              scratch2,
+                              scratch3,
+                              holder,
+                              lookup,
+                              name,
+                              optimization,
+                              miss);
+    } else {
+      CompileRegular(masm,
+                     object,
+                     receiver,
+                     scratch1,
+                     scratch2,
+                     scratch3,
+                     name,
+                     holder,
+                     miss);
+      return masm->isolate()->heap()->undefined_value();
+    }
+  }
+
+ private:
+  MaybeObject* CompileCacheable(MacroAssembler* masm,
+                                JSObject* object,
+                                Register receiver,
+                                Register scratch1,
+                                Register scratch2,
+                                Register scratch3,
+                                JSObject* interceptor_holder,
+                                LookupResult* lookup,
+                                String* name,
+                                const CallOptimization& optimization,
+                                Label* miss_label) {
+    ASSERT(optimization.is_constant_call());
+    ASSERT(!lookup->holder()->IsGlobalObject());
+
+    Counters* counters = masm->isolate()->counters();
+
+    int depth1 = kInvalidProtoDepth;
+    int depth2 = kInvalidProtoDepth;
+    bool can_do_fast_api_call = false;
+    if (optimization.is_simple_api_call() &&
+        !lookup->holder()->IsGlobalObject()) {
+      depth1 =
+          optimization.GetPrototypeDepthOfExpectedType(object,
+                                                      interceptor_holder);
+      if (depth1 == kInvalidProtoDepth) {
+        depth2 =
+            optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
+                                                        lookup->holder());
+      }
+      can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
+                             (depth2 != kInvalidProtoDepth);
+    }
+
+    __ IncrementCounter(counters->call_const_interceptor(), 1,
+                      scratch1, scratch2);
+
+    if (can_do_fast_api_call) {
+      __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
+                          scratch1, scratch2);
+      ReserveSpaceForFastApiCall(masm, scratch1);
+    }
+
+    // Check that the maps from receiver to interceptor's holder
+    // haven't changed and thus we can invoke interceptor.
+    Label miss_cleanup;
+    Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
+    Register holder =
+      stub_compiler_->CheckPrototypes(object, receiver,
+                                      interceptor_holder, scratch1,
+                                      scratch2, scratch3, name, depth1, miss);
+
+    // Invoke an interceptor and if it provides a value,
+    // branch to |regular_invoke|.
+    Label regular_invoke;
+    LoadWithInterceptor(masm, receiver, holder, interceptor_holder, scratch2,
+                        &regular_invoke);
+
+    // Interceptor returned nothing for this property.  Try to use cached
+    // constant function.
+
+    // Check that the maps from interceptor's holder to constant function's
+    // holder haven't changed and thus we can use cached constant function.
+    if (interceptor_holder != lookup->holder()) {
+      stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
+                                      lookup->holder(), scratch1,
+                                      scratch2, scratch3, name, depth2, miss);
+    } else {
+      // CheckPrototypes has a side effect of fetching a 'holder'
+      // for API (object which is instanceof for the signature).  It's
+      // safe to omit it here, as if present, it should be fetched
+      // by the previous CheckPrototypes.
+      ASSERT(depth2 == kInvalidProtoDepth);
+    }
+
+    // Invoke function.
+    if (can_do_fast_api_call) {
+      MaybeObject* result = GenerateFastApiDirectCall(masm,
+                                                      optimization,
+                                                      arguments_.immediate());
+      if (result->IsFailure()) return result;
+    } else {
+      __ InvokeFunction(optimization.constant_function(), arguments_,
+                        JUMP_FUNCTION);
+    }
+
+    // Deferred code for fast API call case---clean preallocated space.
+    if (can_do_fast_api_call) {
+      __ bind(&miss_cleanup);
+      FreeSpaceForFastApiCall(masm);
+      __ Branch(miss_label);
+    }
+
+    // Invoke a regular function.
+    __ bind(&regular_invoke);
+    if (can_do_fast_api_call) {
+      FreeSpaceForFastApiCall(masm);
+    }
+
+    return masm->isolate()->heap()->undefined_value();
   }
 
   void CompileRegular(MacroAssembler* masm,
@@ -170,7 +789,31 @@
                       String* name,
                       JSObject* interceptor_holder,
                       Label* miss_label) {
-    UNIMPLEMENTED_MIPS();
+    Register holder =
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+                                        scratch1, scratch2, scratch3, name,
+                                        miss_label);
+
+    // Call a runtime function to load the interceptor property.
+    __ EnterInternalFrame();
+    // Save the name_ register across the call.
+    __ push(name_);
+
+    PushInterceptorArguments(masm,
+                             receiver,
+                             holder,
+                             name_,
+                             interceptor_holder);
+
+    __ CallExternalReference(
+          ExternalReference(
+              IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
+              masm->isolate()),
+          5);
+
+    // Restore the name_ register.
+    __ pop(name_);
+    __ LeaveInternalFrame();
   }
 
   void LoadWithInterceptor(MacroAssembler* masm,
@@ -179,7 +822,23 @@
                            JSObject* holder_obj,
                            Register scratch,
                            Label* interceptor_succeeded) {
-    UNIMPLEMENTED_MIPS();
+    __ EnterInternalFrame();
+
+    __ Push(holder, name_);
+
+    CompileCallLoadPropertyWithInterceptor(masm,
+                                           receiver,
+                                           holder,
+                                           name_,
+                                           holder_obj);
+
+    __ pop(name_);  // Restore the name.
+    __ pop(receiver);  // Restore the holder.
+    __ LeaveInternalFrame();
+
+    // If interceptor returns no-result sentinel, call the constant function.
+    __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
+    __ Branch(interceptor_succeeded, ne, v0, Operand(scratch));
   }
 
   StubCompiler* stub_compiler_;
@@ -188,6 +847,175 @@
 };
 
 
+
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
+    MacroAssembler* masm,
+    GlobalObject* global,
+    String* name,
+    Register scratch,
+    Label* miss) {
+  Object* probe;
+  { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
+    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
+  }
+  JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+  ASSERT(cell->value()->IsTheHole());
+  __ li(scratch, Operand(Handle<Object>(cell)));
+  __ lw(scratch,
+        FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+  __ Branch(miss, ne, scratch, Operand(at));
+  return cell;
+}
+
+
+// Calls GenerateCheckPropertyCell for each global object in the prototype chain
+// from object to (but not including) holder.
+MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells(
+    MacroAssembler* masm,
+    JSObject* object,
+    JSObject* holder,
+    String* name,
+    Register scratch,
+    Label* miss) {
+  JSObject* current = object;
+  while (current != holder) {
+    if (current->IsGlobalObject()) {
+      // Returns a cell or a failure.
+      MaybeObject* result = GenerateCheckPropertyCell(
+          masm,
+          GlobalObject::cast(current),
+          name,
+          scratch,
+          miss);
+      if (result->IsFailure()) return result;
+    }
+    ASSERT(current->IsJSObject());
+    current = JSObject::cast(current->GetPrototype());
+  }
+  return NULL;
+}
+
+
+// Convert and store int passed in register ival to IEEE 754 single precision
+// floating point value at memory location (dst + 4 * wordoffset)
+// If FPU is available use it for conversion.
+static void StoreIntAsFloat(MacroAssembler* masm,
+                            Register dst,
+                            Register wordoffset,
+                            Register ival,
+                            Register fval,
+                            Register scratch1,
+                            Register scratch2) {
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    __ mtc1(ival, f0);
+    __ cvt_s_w(f0, f0);
+    __ sll(scratch1, wordoffset, 2);
+    __ addu(scratch1, dst, scratch1);
+    __ swc1(f0, MemOperand(scratch1, 0));
+  } else {
+    // FPU is not available,  do manual conversions.
+
+    Label not_special, done;
+    // Move sign bit from source to destination.  This works because the sign
+    // bit in the exponent word of the double has the same position and polarity
+    // as the 2's complement sign bit in a Smi.
+    ASSERT(kBinary32SignMask == 0x80000000u);
+
+    __ And(fval, ival, Operand(kBinary32SignMask));
+    // Negate value if it is negative.
+    __ subu(scratch1, zero_reg, ival);
+    __ movn(ival, scratch1, fval);
+
+    // We have -1, 0 or 1, which we treat specially. Register ival contains
+    // absolute value: it is either equal to 1 (special case of -1 and 1),
+    // greater than 1 (not a special case) or less than 1 (special case of 0).
+    __ Branch(&not_special, gt, ival, Operand(1));
+
+    // For 1 or -1 we need to or in the 0 exponent (biased).
+    static const uint32_t exponent_word_for_1 =
+        kBinary32ExponentBias << kBinary32ExponentShift;
+
+    __ Xor(scratch1, ival, Operand(1));
+    __ li(scratch2, exponent_word_for_1);
+    __ or_(scratch2, fval, scratch2);
+    __ movz(fval, scratch2, scratch1);  // Only if ival is equal to 1.
+    __ Branch(&done);
+
+    __ bind(&not_special);
+    // Count leading zeros.
+    // Gets the wrong answer for 0, but we already checked for that case above.
+    Register zeros = scratch2;
+    __ clz(zeros, ival);
+
+    // Compute exponent and or it into the exponent register.
+    __ li(scratch1, (kBitsPerInt - 1) + kBinary32ExponentBias);
+    __ subu(scratch1, scratch1, zeros);
+
+    __ sll(scratch1, scratch1, kBinary32ExponentShift);
+    __ or_(fval, fval, scratch1);
+
+    // Shift up the source chopping the top bit off.
+    __ Addu(zeros, zeros, Operand(1));
+    // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
+    __ sllv(ival, ival, zeros);
+    // And the top (top 20 bits).
+    __ srl(scratch1, ival, kBitsPerInt - kBinary32MantissaBits);
+    __ or_(fval, fval, scratch1);
+
+    __ bind(&done);
+
+    __ sll(scratch1, wordoffset, 2);
+    __ addu(scratch1, dst, scratch1);
+    __ sw(fval, MemOperand(scratch1, 0));
+  }
+}
+
+
+// Convert unsigned integer with specified number of leading zeroes in binary
+// representation to IEEE 754 double.
+// Integer to convert is passed in register hiword.
+// Resulting double is returned in registers hiword:loword.
+// This functions does not work correctly for 0.
+static void GenerateUInt2Double(MacroAssembler* masm,
+                                Register hiword,
+                                Register loword,
+                                Register scratch,
+                                int leading_zeroes) {
+  const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
+  const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
+
+  const int mantissa_shift_for_hi_word =
+      meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
+
+  const int mantissa_shift_for_lo_word =
+      kBitsPerInt - mantissa_shift_for_hi_word;
+
+  __ li(scratch, biased_exponent << HeapNumber::kExponentShift);
+  if (mantissa_shift_for_hi_word > 0) {
+    __ sll(loword, hiword, mantissa_shift_for_lo_word);
+    __ srl(hiword, hiword, mantissa_shift_for_hi_word);
+    __ or_(hiword, scratch, hiword);
+  } else {
+    __ mov(loword, zero_reg);
+    __ sll(hiword, hiword, mantissa_shift_for_hi_word);
+    __ or_(hiword, scratch, hiword);
+  }
+
+  // If least significant bit of biased exponent was not 1 it was corrupted
+  // by most significant bit of mantissa so we should fix that.
+  if (!(biased_exponent & 1)) {
+    __ li(scratch, 1 << HeapNumber::kExponentShift);
+    __ nor(scratch, scratch, scratch);
+    __ and_(hiword, hiword, scratch);
+  }
+}
+
+
 #undef __
 #define __ ACCESS_MASM(masm())
 
@@ -201,8 +1029,132 @@
                                        String* name,
                                        int save_at_depth,
                                        Label* miss) {
-  UNIMPLEMENTED_MIPS();
-  return no_reg;
+  // Make sure there's no overlap between holder and object registers.
+  ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+  ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
+         && !scratch2.is(scratch1));
+
+  // Keep track of the current object in register reg.
+  Register reg = object_reg;
+  int depth = 0;
+
+  if (save_at_depth == depth) {
+    __ sw(reg, MemOperand(sp));
+  }
+
+  // Check the maps in the prototype chain.
+  // Traverse the prototype chain from the object and do map checks.
+  JSObject* current = object;
+  while (current != holder) {
+    depth++;
+
+    // Only global objects and objects that do not require access
+    // checks are allowed in stubs.
+    ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
+
+    ASSERT(current->GetPrototype()->IsJSObject());
+    JSObject* prototype = JSObject::cast(current->GetPrototype());
+    if (!current->HasFastProperties() &&
+        !current->IsJSGlobalObject() &&
+        !current->IsJSGlobalProxy()) {
+      if (!name->IsSymbol()) {
+        MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
+        Object* lookup_result = NULL;  // Initialization to please compiler.
+        if (!maybe_lookup_result->ToObject(&lookup_result)) {
+          set_failure(Failure::cast(maybe_lookup_result));
+          return reg;
+        }
+        name = String::cast(lookup_result);
+      }
+      ASSERT(current->property_dictionary()->FindEntry(name) ==
+             StringDictionary::kNotFound);
+
+      MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
+                                                                      miss,
+                                                                      reg,
+                                                                      name,
+                                                                      scratch1,
+                                                                      scratch2);
+      if (negative_lookup->IsFailure()) {
+        set_failure(Failure::cast(negative_lookup));
+        return reg;
+      }
+
+      __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+      reg = holder_reg;  // From now the object is in holder_reg.
+      __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+    } else if (heap()->InNewSpace(prototype)) {
+      // Get the map of the current object.
+      __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+      // Branch on the result of the map check.
+      __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
+
+      // Check access rights to the global object.  This has to happen
+      // after the map check so that we know that the object is
+      // actually a global object.
+      if (current->IsJSGlobalProxy()) {
+        __ CheckAccessGlobalProxy(reg, scratch1, miss);
+        // Restore scratch register to be the map of the object.  In the
+        // new space case below, we load the prototype from the map in
+        // the scratch register.
+        __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+      }
+
+      reg = holder_reg;  // From now the object is in holder_reg.
+      // The prototype is in new space; we cannot store a reference
+      // to it in the code. Load it from the map.
+      __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+    } else {
+      // Check the map of the current object.
+      __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+      // Branch on the result of the map check.
+      __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
+      // Check access rights to the global object.  This has to happen
+      // after the map check so that we know that the object is
+      // actually a global object.
+      if (current->IsJSGlobalProxy()) {
+        __ CheckAccessGlobalProxy(reg, scratch1, miss);
+      }
+      // The prototype is in old space; load it directly.
+      reg = holder_reg;  // From now the object is in holder_reg.
+      __ li(reg, Operand(Handle<JSObject>(prototype)));
+    }
+
+    if (save_at_depth == depth) {
+      __ sw(reg, MemOperand(sp));
+    }
+
+    // Go to the next object in the prototype chain.
+    current = prototype;
+  }
+
+  // Check the holder map.
+  __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+  __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
+
+  // Log the check depth.
+  LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
+  // Perform security check for access to the global object.
+  ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
+  if (holder->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(reg, scratch1, miss);
+  };
+
+  // If we've skipped any global objects, it's not enough to verify
+  // that their maps haven't changed.  We also need to check that the
+  // property cell for the property is still empty.
+
+  MaybeObject* result = GenerateCheckPropertyCells(masm(),
+                                                   object,
+                                                   holder,
+                                                   name,
+                                                   scratch1,
+                                                   miss);
+  if (result->IsFailure()) set_failure(Failure::cast(result));
+
+  // Return the register containing the holder.
+  return reg;
 }
 
 
@@ -215,7 +1167,16 @@
                                      int index,
                                      String* name,
                                      Label* miss) {
-  UNIMPLEMENTED_MIPS();
+  // Check that the receiver isn't a smi.
+  __ And(scratch1, receiver, Operand(kSmiTagMask));
+  __ Branch(miss, eq, scratch1, Operand(zero_reg));
+
+  // Check that the maps haven't changed.
+  Register reg =
+      CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
+                      name, miss);
+  GenerateFastPropertyLoad(masm(), v0, reg, holder, index);
+  __ Ret();
 }
 
 
@@ -228,7 +1189,17 @@
                                         Object* value,
                                         String* name,
                                         Label* miss) {
-  UNIMPLEMENTED_MIPS();
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, miss, scratch1);
+
+  // Check that the maps haven't changed.
+  Register reg =
+      CheckPrototypes(object, receiver, holder,
+                      scratch1, scratch2, scratch3, name, miss);
+
+  // Return the constant value.
+  __ li(v0, Operand(Handle<Object>(value)));
+  __ Ret();
 }
 
 
@@ -242,8 +1213,56 @@
                                                 AccessorInfo* callback,
                                                 String* name,
                                                 Label* miss) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, miss, scratch1);
+
+  // Check that the maps haven't changed.
+  Register reg =
+    CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
+                    name, miss);
+
+  // Build AccessorInfo::args_ list on the stack and push property name below
+  // the exit frame to make GC aware of them and store pointers to them.
+  __ push(receiver);
+  __ mov(scratch2, sp);  // scratch2 = AccessorInfo::args_
+  Handle<AccessorInfo> callback_handle(callback);
+  if (heap()->InNewSpace(callback_handle->data())) {
+    __ li(scratch3, callback_handle);
+    __ lw(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
+  } else {
+    __ li(scratch3, Handle<Object>(callback_handle->data()));
+  }
+  __ Push(reg, scratch3, name_reg);
+  __ mov(a2, scratch2);  // Saved in case scratch2 == a1.
+  __ mov(a1, sp);  // a1 (first argument - see note below) = Handle<String>
+
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  ApiFunction fun(getter_address);
+
+  // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
+  // struct from the function (which is currently the case). This means we pass
+  // the arguments in a1-a2 instead of a0-a1. TryCallApiFunctionAndReturn
+  // will handle setting up a0.
+
+  const int kApiStackSpace = 1;
+
+  __ EnterExitFrame(false, kApiStackSpace);
+  // Create AccessorInfo instance on the stack above the exit frame with
+  // scratch2 (internal::Object **args_) as the data.
+  __ sw(a2, MemOperand(sp, kPointerSize));
+  // a2 (second argument - see note above) = AccessorInfo&
+  __ Addu(a2, sp, kPointerSize);
+
+  // Emitting a stub call may try to allocate (if the code is not
+  // already generated).  Do not allow the assembler to perform a
+  // garbage collection but instead return the allocation failure
+  // object.
+  ExternalReference ref =
+      ExternalReference(&fun,
+                        ExternalReference::DIRECT_GETTER_CALL,
+                        masm()->isolate());
+  // 4 args - will be freed later by LeaveExitFrame.
+  return masm()->TryCallApiFunctionAndReturn(ref, 4);
 }
 
 
@@ -257,12 +1276,143 @@
                                            Register scratch3,
                                            String* name,
                                            Label* miss) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(interceptor_holder->HasNamedInterceptor());
+  ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, miss);
+
+  // So far the most popular follow ups for interceptor loads are FIELD
+  // and CALLBACKS, so inline only them, other cases may be added
+  // later.
+  bool compile_followup_inline = false;
+  if (lookup->IsProperty() && lookup->IsCacheable()) {
+    if (lookup->type() == FIELD) {
+      compile_followup_inline = true;
+    } else if (lookup->type() == CALLBACKS &&
+        lookup->GetCallbackObject()->IsAccessorInfo() &&
+        AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
+      compile_followup_inline = true;
+    }
+  }
+
+  if (compile_followup_inline) {
+    // Compile the interceptor call, followed by inline code to load the
+    // property from further up the prototype chain if the call fails.
+    // Check that the maps haven't changed.
+    Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
+                                          scratch1, scratch2, scratch3,
+                                          name, miss);
+    ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
+
+    // Save necessary data before invoking an interceptor.
+    // Requires a frame to make GC aware of pushed pointers.
+    __ EnterInternalFrame();
+
+    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+      // CALLBACKS case needs a receiver to be passed into C++ callback.
+      __ Push(receiver, holder_reg, name_reg);
+    } else {
+      __ Push(holder_reg, name_reg);
+    }
+
+    // Invoke an interceptor.  Note: map checks from receiver to
+    // interceptor's holder has been compiled before (see a caller
+    // of this method).
+    CompileCallLoadPropertyWithInterceptor(masm(),
+                                           receiver,
+                                           holder_reg,
+                                           name_reg,
+                                           interceptor_holder);
+
+    // Check if interceptor provided a value for property.  If it's
+    // the case, return immediately.
+    Label interceptor_failed;
+    __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
+    __ Branch(&interceptor_failed, eq, v0, Operand(scratch1));
+    __ LeaveInternalFrame();
+    __ Ret();
+
+    __ bind(&interceptor_failed);
+    __ pop(name_reg);
+    __ pop(holder_reg);
+    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+      __ pop(receiver);
+    }
+
+    __ LeaveInternalFrame();
+
+    // Check that the maps from interceptor's holder to lookup's holder
+    // haven't changed.  And load lookup's holder into |holder| register.
+    if (interceptor_holder != lookup->holder()) {
+      holder_reg = CheckPrototypes(interceptor_holder,
+                                   holder_reg,
+                                   lookup->holder(),
+                                   scratch1,
+                                   scratch2,
+                                   scratch3,
+                                   name,
+                                   miss);
+    }
+
+    if (lookup->type() == FIELD) {
+      // We found FIELD property in prototype chain of interceptor's holder.
+      // Retrieve a field from field's holder.
+      GenerateFastPropertyLoad(masm(), v0, holder_reg,
+                               lookup->holder(), lookup->GetFieldIndex());
+      __ Ret();
+    } else {
+      // We found CALLBACKS property in prototype chain of interceptor's
+      // holder.
+      ASSERT(lookup->type() == CALLBACKS);
+      ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
+      AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+      ASSERT(callback != NULL);
+      ASSERT(callback->getter() != NULL);
+
+      // Tail call to runtime.
+      // Important invariant in CALLBACKS case: the code above must be
+      // structured to never clobber |receiver| register.
+      __ li(scratch2, Handle<AccessorInfo>(callback));
+      // holder_reg is either receiver or scratch1.
+      if (!receiver.is(holder_reg)) {
+        ASSERT(scratch1.is(holder_reg));
+        __ Push(receiver, holder_reg);
+        __ lw(scratch3,
+              FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
+        __ Push(scratch3, scratch2, name_reg);
+      } else {
+        __ push(receiver);
+        __ lw(scratch3,
+              FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
+        __ Push(holder_reg, scratch3, scratch2, name_reg);
+      }
+
+      ExternalReference ref =
+          ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
+                            masm()->isolate());
+      __ TailCallExternalReference(ref, 5, 1);
+    }
+  } else {  // !compile_followup_inline
+    // Call the runtime system to load the interceptor.
+    // Check that the maps haven't changed.
+    Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
+                                          scratch1, scratch2, scratch3,
+                                          name, miss);
+    PushInterceptorArguments(masm(), receiver, holder_reg,
+                             name_reg, interceptor_holder);
+
+    ExternalReference ref = ExternalReference(
+        IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), masm()->isolate());
+    __ TailCallExternalReference(ref, 5, 1);
+  }
 }
 
 
 void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
-  UNIMPLEMENTED_MIPS();
+  if (kind_ == Code::KEYED_CALL_IC) {
+    __ Branch(miss, ne, a2, Operand(Handle<String>(name)));
+  }
 }
 
 
@@ -270,20 +1420,63 @@
                                                    JSObject* holder,
                                                    String* name,
                                                    Label* miss) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(holder->IsGlobalObject());
+
+  // Get the number of arguments.
+  const int argc = arguments().immediate();
+
+  // Get the receiver from the stack.
+  __ lw(a0, MemOperand(sp, argc * kPointerSize));
+
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual calls. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ JumpIfSmi(a0, miss);
+  }
+
+  // Check that the maps haven't changed.
+  CheckPrototypes(object, a0, holder, a3, a1, t0, name, miss);
 }
 
 
 void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
                                                     JSFunction* function,
                                                     Label* miss) {
-  UNIMPLEMENTED_MIPS();
+  // Get the value from the cell.
+  __ li(a3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+  __ lw(a1, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
+
+  // Check that the cell contains the same function.
+  if (heap()->InNewSpace(function)) {
+    // We can't embed a pointer to a function in new space so we have
+    // to verify that the shared function info is unchanged. This has
+    // the nice side effect that multiple closures based on the same
+    // function can all use this call IC. Before we load through the
+    // function, we have to verify that it still is a function.
+    __ JumpIfSmi(a1, miss);
+    __ GetObjectType(a1, a3, a3);
+    __ Branch(miss, ne, a3, Operand(JS_FUNCTION_TYPE));
+
+    // Check the shared function info. Make sure it hasn't changed.
+    __ li(a3, Handle<SharedFunctionInfo>(function->shared()));
+    __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+    __ Branch(miss, ne, t0, Operand(a3));
+  } else {
+    __ Branch(miss, ne, a1, Operand(Handle<JSFunction>(function)));
+  }
 }
 
 
 MaybeObject* CallStubCompiler::GenerateMissBranch() {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  MaybeObject* maybe_obj =
+      isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
+                                               kind_,
+                                               extra_ic_state_);
+  Object* obj;
+  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
+  return obj;
 }
 
 
@@ -291,8 +1484,34 @@
                                                 JSObject* holder,
                                                 int index,
                                                 String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  GenerateNameCheck(name, &miss);
+
+  const int argc = arguments().immediate();
+
+  // Get the receiver of the function from the stack into a0.
+  __ lw(a0, MemOperand(sp, argc * kPointerSize));
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(a0, &miss, t0);
+
+  // Do the right check and compute the holder register.
+  Register reg = CheckPrototypes(object, a0, holder, a1, a3, t0, name, &miss);
+  GenerateFastPropertyLoad(masm(), a1, reg, holder, index);
+
+  GenerateCallFunction(masm(), object, arguments(), &miss);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(FIELD, name);
 }
 
 
@@ -301,8 +1520,160 @@
                                                     JSGlobalPropertyCell* cell,
                                                     JSFunction* function,
                                                     String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // If object is not an array, bail out to regular call.
+  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+
+  Label miss;
+
+  GenerateNameCheck(name, &miss);
+
+  Register receiver = a1;
+
+  // Get the receiver from the stack.
+  const int argc = arguments().immediate();
+  __ lw(receiver, MemOperand(sp, argc * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, &miss);
+
+  // Check that the maps haven't changed.
+  CheckPrototypes(JSObject::cast(object), receiver,
+                  holder, a3, v0, t0, name, &miss);
+
+  if (argc == 0) {
+    // Nothing to do, just return the length.
+    __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+    __ Drop(argc + 1);
+    __ Ret();
+  } else {
+    Label call_builtin;
+
+    Register elements = a3;
+    Register end_elements = t1;
+
+    // Get the elements array of the object.
+    __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
+
+    // Check that the elements are in fast mode and writable.
+    __ CheckMap(elements,
+                v0,
+                Heap::kFixedArrayMapRootIndex,
+                &call_builtin,
+                DONT_DO_SMI_CHECK);
+
+    if (argc == 1) {  // Otherwise fall through to call the builtin.
+      Label exit, with_write_barrier, attempt_to_grow_elements;
+
+      // Get the array's length into v0 and calculate new length.
+      __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+      STATIC_ASSERT(kSmiTagSize == 1);
+      STATIC_ASSERT(kSmiTag == 0);
+      __ Addu(v0, v0, Operand(Smi::FromInt(argc)));
+
+      // Get the element's length.
+      __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+
+      // Check if we could survive without allocation.
+      __ Branch(&attempt_to_grow_elements, gt, v0, Operand(t0));
+
+      // Save new length.
+      __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+      // Push the element.
+      __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
+      // We may need a register containing the address end_elements below,
+      // so write back the value in end_elements.
+      __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
+      __ Addu(end_elements, elements, end_elements);
+      const int kEndElementsOffset =
+          FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
+      __ sw(t0, MemOperand(end_elements, kEndElementsOffset));
+      __ Addu(end_elements, end_elements, kPointerSize);
+
+      // Check for a smi.
+      __ JumpIfNotSmi(t0, &with_write_barrier);
+      __ bind(&exit);
+      __ Drop(argc + 1);
+      __ Ret();
+
+      __ bind(&with_write_barrier);
+      __ InNewSpace(elements, t0, eq, &exit);
+      __ RecordWriteHelper(elements, end_elements, t0);
+      __ Drop(argc + 1);
+      __ Ret();
+
+      __ bind(&attempt_to_grow_elements);
+      // v0: array's length + 1.
+      // t0: elements' length.
+
+      if (!FLAG_inline_new) {
+        __ Branch(&call_builtin);
+      }
+
+      ExternalReference new_space_allocation_top =
+          ExternalReference::new_space_allocation_top_address(
+              masm()->isolate());
+      ExternalReference new_space_allocation_limit =
+          ExternalReference::new_space_allocation_limit_address(
+              masm()->isolate());
+
+      const int kAllocationDelta = 4;
+      // Load top and check if it is the end of elements.
+      __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
+      __ Addu(end_elements, elements, end_elements);
+      __ Addu(end_elements, end_elements, Operand(kEndElementsOffset));
+      __ li(t3, Operand(new_space_allocation_top));
+      __ lw(t2, MemOperand(t3));
+      __ Branch(&call_builtin, ne, end_elements, Operand(t2));
+
+      __ li(t5, Operand(new_space_allocation_limit));
+      __ lw(t5, MemOperand(t5));
+      __ Addu(t2, t2, Operand(kAllocationDelta * kPointerSize));
+      __ Branch(&call_builtin, hi, t2, Operand(t5));
+
+      // We fit and could grow elements.
+      // Update new_space_allocation_top.
+      __ sw(t2, MemOperand(t3));
+      // Push the argument.
+      __ lw(t2, MemOperand(sp, (argc - 1) * kPointerSize));
+      __ sw(t2, MemOperand(end_elements));
+      // Fill the rest with holes.
+      __ LoadRoot(t2, Heap::kTheHoleValueRootIndex);
+      for (int i = 1; i < kAllocationDelta; i++) {
+        __ sw(t2, MemOperand(end_elements, i * kPointerSize));
+      }
+
+      // Update elements' and array's sizes.
+      __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+      __ Addu(t0, t0, Operand(Smi::FromInt(kAllocationDelta)));
+      __ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+
+      // Elements are in new space, so write barrier is not required.
+      __ Drop(argc + 1);
+      __ Ret();
+    }
+    __ bind(&call_builtin);
+    __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
+                                                   masm()->isolate()),
+                                 argc + 1,
+                                 1);
+  }
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(function);
 }
 
 
@@ -311,8 +1682,87 @@
                                                    JSGlobalPropertyCell* cell,
                                                    JSFunction* function,
                                                    String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // If object is not an array, bail out to regular call.
+  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
+
+  Label miss, return_undefined, call_builtin;
+
+  Register receiver = a1;
+  Register elements = a3;
+
+  GenerateNameCheck(name, &miss);
+
+  // Get the receiver from the stack.
+  const int argc = arguments().immediate();
+  __ lw(receiver, MemOperand(sp, argc * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, &miss);
+
+  // Check that the maps haven't changed.
+  CheckPrototypes(JSObject::cast(object),
+                  receiver, holder, elements, t0, v0, name, &miss);
+
+  // Get the elements array of the object.
+  __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
+
+  // Check that the elements are in fast mode and writable.
+  __ CheckMap(elements,
+              v0,
+              Heap::kFixedArrayMapRootIndex,
+              &call_builtin,
+              DONT_DO_SMI_CHECK);
+
+  // Get the array's length into t0 and calculate new length.
+  __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ Subu(t0, t0, Operand(Smi::FromInt(1)));
+  __ Branch(&return_undefined, lt, t0, Operand(zero_reg));
+
+  // Get the last element.
+  __ LoadRoot(t2, Heap::kTheHoleValueRootIndex);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  STATIC_ASSERT(kSmiTag == 0);
+  // We can't address the last element in one operation. Compute the more
+  // expensive shift first, and use an offset later on.
+  __ sll(t1, t0, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(elements, elements, t1);
+  __ lw(v0, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
+  __ Branch(&call_builtin, eq, v0, Operand(t2));
+
+  // Set the array's length.
+  __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+  // Fill with the hole.
+  __ sw(t2, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
+  __ Drop(argc + 1);
+  __ Ret();
+
+  __ bind(&return_undefined);
+  __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+  __ Drop(argc + 1);
+  __ Ret();
+
+  __ bind(&call_builtin);
+  __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop,
+                                                 masm()->isolate()),
+                               argc + 1,
+                               1);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(function);
 }
 
 
@@ -322,8 +1772,84 @@
     JSGlobalPropertyCell* cell,
     JSFunction* function,
     String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2                     : function name
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // If object is not a string, bail out to regular call.
+  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+
+  const int argc = arguments().immediate();
+
+  Label miss;
+  Label name_miss;
+  Label index_out_of_range;
+
+  Label* index_out_of_range_label = &index_out_of_range;
+
+  if (kind_ == Code::CALL_IC &&
+      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+       DEFAULT_STRING_STUB)) {
+    index_out_of_range_label = &miss;
+  }
+
+  GenerateNameCheck(name, &name_miss);
+
+  // Check that the maps starting from the prototype haven't changed.
+  GenerateDirectLoadGlobalFunctionPrototype(masm(),
+                                            Context::STRING_FUNCTION_INDEX,
+                                            v0,
+                                            &miss);
+  ASSERT(object != holder);
+  CheckPrototypes(JSObject::cast(object->GetPrototype()), v0, holder,
+                  a1, a3, t0, name, &miss);
+
+  Register receiver = a1;
+  Register index = t1;
+  Register scratch = a3;
+  Register result = v0;
+  __ lw(receiver, MemOperand(sp, argc * kPointerSize));
+  if (argc > 0) {
+    __ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
+  } else {
+    __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
+  }
+
+  StringCharCodeAtGenerator char_code_at_generator(receiver,
+                                                   index,
+                                                   scratch,
+                                                   result,
+                                                   &miss,  // When not a string.
+                                                   &miss,  // When not a number.
+                                                   index_out_of_range_label,
+                                                   STRING_INDEX_IS_NUMBER);
+  char_code_at_generator.GenerateFast(masm());
+  __ Drop(argc + 1);
+  __ Ret();
+
+  StubRuntimeCallHelper call_helper;
+  char_code_at_generator.GenerateSlow(masm(), call_helper);
+
+  if (index_out_of_range.is_linked()) {
+    __ bind(&index_out_of_range);
+    __ LoadRoot(v0, Heap::kNanValueRootIndex);
+    __ Drop(argc + 1);
+    __ Ret();
+  }
+
+  __ bind(&miss);
+  // Restore function name in a2.
+  __ li(a2, Handle<String>(name));
+  __ bind(&name_miss);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(function);
 }
 
 
@@ -333,8 +1859,85 @@
     JSGlobalPropertyCell* cell,
     JSFunction* function,
     String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2                     : function name
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // If object is not a string, bail out to regular call.
+  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+
+  const int argc = arguments().immediate();
+
+  Label miss;
+  Label name_miss;
+  Label index_out_of_range;
+  Label* index_out_of_range_label = &index_out_of_range;
+
+  if (kind_ == Code::CALL_IC &&
+      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+       DEFAULT_STRING_STUB)) {
+    index_out_of_range_label = &miss;
+  }
+
+  GenerateNameCheck(name, &name_miss);
+
+  // Check that the maps starting from the prototype haven't changed.
+  GenerateDirectLoadGlobalFunctionPrototype(masm(),
+                                            Context::STRING_FUNCTION_INDEX,
+                                            v0,
+                                            &miss);
+  ASSERT(object != holder);
+  CheckPrototypes(JSObject::cast(object->GetPrototype()), v0, holder,
+                  a1, a3, t0, name, &miss);
+
+  Register receiver = v0;
+  Register index = t1;
+  Register scratch1 = a1;
+  Register scratch2 = a3;
+  Register result = v0;
+  __ lw(receiver, MemOperand(sp, argc * kPointerSize));
+  if (argc > 0) {
+    __ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
+  } else {
+    __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
+  }
+
+  StringCharAtGenerator char_at_generator(receiver,
+                                          index,
+                                          scratch1,
+                                          scratch2,
+                                          result,
+                                          &miss,  // When not a string.
+                                          &miss,  // When not a number.
+                                          index_out_of_range_label,
+                                          STRING_INDEX_IS_NUMBER);
+  char_at_generator.GenerateFast(masm());
+  __ Drop(argc + 1);
+  __ Ret();
+
+  StubRuntimeCallHelper call_helper;
+  char_at_generator.GenerateSlow(masm(), call_helper);
+
+  if (index_out_of_range.is_linked()) {
+    __ bind(&index_out_of_range);
+    __ LoadRoot(v0, Heap::kEmptyStringRootIndex);
+    __ Drop(argc + 1);
+    __ Ret();
+  }
+
+  __ bind(&miss);
+  // Restore function name in a2.
+  __ li(a2, Handle<String>(name));
+  __ bind(&name_miss);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(function);
 }
 
 
@@ -344,8 +1947,69 @@
     JSGlobalPropertyCell* cell,
     JSFunction* function,
     String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2                     : function name
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  const int argc = arguments().immediate();
+
+  // If the object is not a JSObject or we got an unexpected number of
+  // arguments, bail out to the regular call.
+  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+
+  Label miss;
+  GenerateNameCheck(name, &miss);
+
+  if (cell == NULL) {
+    __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+
+    STATIC_ASSERT(kSmiTag == 0);
+    __ JumpIfSmi(a1, &miss);
+
+    CheckPrototypes(JSObject::cast(object), a1, holder, v0, a3, t0, name,
+                    &miss);
+  } else {
+    ASSERT(cell->value() == function);
+    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    GenerateLoadFunctionFromCell(cell, function, &miss);
+  }
+
+  // Load the char code argument.
+  Register code = a1;
+  __ lw(code, MemOperand(sp, 0 * kPointerSize));
+
+  // Check the code is a smi.
+  Label slow;
+  STATIC_ASSERT(kSmiTag == 0);
+  __ JumpIfNotSmi(code, &slow);
+
+  // Convert the smi code to uint16.
+  __ And(code, code, Operand(Smi::FromInt(0xffff)));
+
+  StringCharFromCodeGenerator char_from_code_generator(code, v0);
+  char_from_code_generator.GenerateFast(masm());
+  __ Drop(argc + 1);
+  __ Ret();
+
+  StubRuntimeCallHelper call_helper;
+  char_from_code_generator.GenerateSlow(masm(), call_helper);
+
+  // Tail call the full function. We do not have to patch the receiver
+  // because the function makes no use of it.
+  __ bind(&slow);
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+
+  __ bind(&miss);
+  // a2: function name.
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
@@ -354,8 +2018,134 @@
                                                     JSGlobalPropertyCell* cell,
                                                     JSFunction* function,
                                                     String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2                     : function name
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  if (!CpuFeatures::IsSupported(FPU))
+    return heap()->undefined_value();
+  CpuFeatures::Scope scope_fpu(FPU);
+
+  const int argc = arguments().immediate();
+
+  // If the object is not a JSObject or we got an unexpected number of
+  // arguments, bail out to the regular call.
+  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+
+  Label miss, slow;
+  GenerateNameCheck(name, &miss);
+
+  if (cell == NULL) {
+    __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+
+    STATIC_ASSERT(kSmiTag == 0);
+    __ JumpIfSmi(a1, &miss);
+
+    CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
+                    &miss);
+  } else {
+    ASSERT(cell->value() == function);
+    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    GenerateLoadFunctionFromCell(cell, function, &miss);
+  }
+
+  // Load the (only) argument into v0.
+  __ lw(v0, MemOperand(sp, 0 * kPointerSize));
+
+  // If the argument is a smi, just return.
+  STATIC_ASSERT(kSmiTag == 0);
+  __ And(t0, v0, Operand(kSmiTagMask));
+  __ Drop(argc + 1, eq, t0, Operand(zero_reg));
+  __ Ret(eq, t0, Operand(zero_reg));
+
+  __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
+
+  Label wont_fit_smi, no_fpu_error, restore_fcsr_and_return;
+
+  // If fpu is enabled, we use the floor instruction.
+
+  // Load the HeapNumber value.
+  __ ldc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
+
+  // Backup FCSR.
+  __ cfc1(a3, FCSR);
+  // Clearing FCSR clears the exception mask with no side-effects.
+  __ ctc1(zero_reg, FCSR);
+  // Convert the argument to an integer.
+  __ floor_w_d(f0, f0);
+
+  // Start checking for special cases.
+  // Get the argument exponent and clear the sign bit.
+  __ lw(t1, FieldMemOperand(v0, HeapNumber::kValueOffset + kPointerSize));
+  __ And(t2, t1, Operand(~HeapNumber::kSignMask));
+  __ srl(t2, t2, HeapNumber::kMantissaBitsInTopWord);
+
+  // Retrieve FCSR and check for fpu errors.
+  __ cfc1(t5, FCSR);
+  __ srl(t5, t5, kFCSRFlagShift);
+  // Flag 1 marks an inaccurate but still good result so we ignore it.
+  __ And(t5, t5, Operand(kFCSRFlagMask ^ 1));
+  __ Branch(&no_fpu_error, eq, t5, Operand(zero_reg));
+
+  // Check for NaN, Infinity, and -Infinity.
+  // They are invariant through a Math.Floor call, so just
+  // return the original argument.
+  __ Subu(t3, t2, Operand(HeapNumber::kExponentMask
+        >> HeapNumber::kMantissaBitsInTopWord));
+  __ Branch(&restore_fcsr_and_return, eq, t3, Operand(zero_reg));
+  // We had an overflow or underflow in the conversion. Check if we
+  // have a big exponent.
+  // If greater or equal, the argument is already round and in v0.
+  __ Branch(&restore_fcsr_and_return, ge, t3,
+      Operand(HeapNumber::kMantissaBits));
+  __ Branch(&wont_fit_smi);
+
+  __ bind(&no_fpu_error);
+  // Move the result back to v0.
+  __ mfc1(v0, f0);
+  // Check if the result fits into a smi.
+  __ Addu(a1, v0, Operand(0x40000000));
+  __ Branch(&wont_fit_smi, lt, a1, Operand(zero_reg));
+  // Tag the result.
+  STATIC_ASSERT(kSmiTag == 0);
+  __ sll(v0, v0, kSmiTagSize);
+
+  // Check for -0.
+  __ Branch(&restore_fcsr_and_return, ne, v0, Operand(zero_reg));
+  // t1 already holds the HeapNumber exponent.
+  __ And(t0, t1, Operand(HeapNumber::kSignMask));
+  // If our HeapNumber is negative it was -0, so load its address and return.
+  // Else v0 is loaded with 0, so we can also just return.
+  __ Branch(&restore_fcsr_and_return, eq, t0, Operand(zero_reg));
+  __ lw(v0, MemOperand(sp, 0 * kPointerSize));
+
+  __ bind(&restore_fcsr_and_return);
+  // Restore FCSR and return.
+  __ ctc1(a3, FCSR);
+
+  __ Drop(argc + 1);
+  __ Ret();
+
+  __ bind(&wont_fit_smi);
+  // Restore FCSR and fall to slow case.
+  __ ctc1(a3, FCSR);
+
+  __ bind(&slow);
+  // Tail call the full function. We do not have to patch the receiver
+  // because the function makes no use of it.
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+
+  __ bind(&miss);
+  // a2: function name.
+  MaybeObject* obj = GenerateMissBranch();
+  if (obj->IsFailure()) return obj;
+
+  // Return the generated code.
+  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
@@ -364,8 +2154,100 @@
                                                   JSGlobalPropertyCell* cell,
                                                   JSFunction* function,
                                                   String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2                     : function name
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  const int argc = arguments().immediate();
+
+  // If the object is not a JSObject or we got an unexpected number of
+  // arguments, bail out to the regular call.
+  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
+
+  Label miss;
+  GenerateNameCheck(name, &miss);
+
+  if (cell == NULL) {
+    __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+
+    STATIC_ASSERT(kSmiTag == 0);
+    __ JumpIfSmi(a1, &miss);
+
+    CheckPrototypes(JSObject::cast(object), a1, holder, v0, a3, t0, name,
+                    &miss);
+  } else {
+    ASSERT(cell->value() == function);
+    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
+    GenerateLoadFunctionFromCell(cell, function, &miss);
+  }
+
+  // Load the (only) argument into v0.
+  __ lw(v0, MemOperand(sp, 0 * kPointerSize));
+
+  // Check if the argument is a smi.
+  Label not_smi;
+  STATIC_ASSERT(kSmiTag == 0);
+  __ JumpIfNotSmi(v0, &not_smi);
+
+  // Do bitwise not or do nothing depending on the sign of the
+  // argument.
+  __ sra(t0, v0, kBitsPerInt - 1);
+  __ Xor(a1, v0, t0);
+
+  // Add 1 or do nothing depending on the sign of the argument.
+  __ Subu(v0, a1, t0);
+
+  // If the result is still negative, go to the slow case.
+  // This only happens for the most negative smi.
+  Label slow;
+  __ Branch(&slow, lt, v0, Operand(zero_reg));
+
+  // Smi case done.
+  __ Drop(argc + 1);
+  __ Ret();
+
+  // Check if the argument is a heap number and load its exponent and
+  // sign.
+  __ bind(&not_smi);
+  __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
+  __ lw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+
+  // Check the sign of the argument. If the argument is positive,
+  // just return it.
+  Label negative_sign;
+  __ And(t0, a1, Operand(HeapNumber::kSignMask));
+  __ Branch(&negative_sign, ne, t0, Operand(zero_reg));
+  __ Drop(argc + 1);
+  __ Ret();
+
+  // If the argument is negative, clear the sign, and return a new
+  // number.
+  __ bind(&negative_sign);
+  __ Xor(a1, a1, Operand(HeapNumber::kSignMask));
+  __ lw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+  __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
+  __ AllocateHeapNumber(v0, t0, t1, t2, &slow);
+  __ sw(a1, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+  __ sw(a3, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+  __ Drop(argc + 1);
+  __ Ret();
+
+  // Tail call the full function. We do not have to patch the receiver
+  // because the function makes no use of it.
+  __ bind(&slow);
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+
+  __ bind(&miss);
+  // a2: function name.
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
@@ -376,8 +2258,51 @@
     JSGlobalPropertyCell* cell,
     JSFunction* function,
     String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+
+  Counters* counters = isolate()->counters();
+
+  ASSERT(optimization.is_simple_api_call());
+  // Bail out if object is a global object as we don't want to
+  // repatch it to global receiver.
+  if (object->IsGlobalObject()) return heap()->undefined_value();
+  if (cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSObject()) return heap()->undefined_value();
+  int depth = optimization.GetPrototypeDepthOfExpectedType(
+            JSObject::cast(object), holder);
+  if (depth == kInvalidProtoDepth) return heap()->undefined_value();
+
+  Label miss, miss_before_stack_reserved;
+
+  GenerateNameCheck(name, &miss_before_stack_reserved);
+
+  // Get the receiver from the stack.
+  const int argc = arguments().immediate();
+  __ lw(a1, MemOperand(sp, argc * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(a1, &miss_before_stack_reserved);
+
+  __ IncrementCounter(counters->call_const(), 1, a0, a3);
+  __ IncrementCounter(counters->call_const_fast_api(), 1, a0, a3);
+
+  ReserveSpaceForFastApiCall(masm(), a0);
+
+  // Check that the maps haven't changed and find a Holder as a side effect.
+  CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
+                  depth, &miss);
+
+  MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc);
+  if (result->IsFailure()) return result;
+
+  __ bind(&miss);
+  FreeSpaceForFastApiCall(masm());
+
+  __ bind(&miss_before_stack_reserved);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(function);
 }
 
 
@@ -386,26 +2311,251 @@
                                                    JSFunction* function,
                                                    String* name,
                                                    CheckType check) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  if (HasCustomCallGenerator(function)) {
+    MaybeObject* maybe_result = CompileCustomCall(
+        object, holder, NULL, function, name);
+    Object* result;
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+    // Undefined means bail out to regular compiler.
+    if (!result->IsUndefined()) return result;
+  }
+
+  Label miss;
+
+  GenerateNameCheck(name, &miss);
+
+  // Get the receiver from the stack.
+  const int argc = arguments().immediate();
+  __ lw(a1, MemOperand(sp, argc * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  if (check != NUMBER_CHECK) {
+    __ And(t1, a1, Operand(kSmiTagMask));
+    __ Branch(&miss, eq, t1, Operand(zero_reg));
+  }
+
+  // Make sure that it's okay not to patch the on stack receiver
+  // unless we're doing a receiver map check.
+  ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
+
+  SharedFunctionInfo* function_info = function->shared();
+  switch (check) {
+    case RECEIVER_MAP_CHECK:
+      __ IncrementCounter(masm()->isolate()->counters()->call_const(),
+          1, a0, a3);
+
+      // Check that the maps haven't changed.
+      CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
+                      &miss);
+
+      // Patch the receiver on the stack with the global proxy if
+      // necessary.
+      if (object->IsGlobalObject()) {
+        __ lw(a3, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
+        __ sw(a3, MemOperand(sp, argc * kPointerSize));
+      }
+      break;
+
+    case STRING_CHECK:
+      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
+      } else {
+        // Check that the object is a two-byte string or a symbol.
+        __ GetObjectType(a1, a3, a3);
+        __ Branch(&miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
+        // Check that the maps starting from the prototype haven't changed.
+        GenerateDirectLoadGlobalFunctionPrototype(
+            masm(), Context::STRING_FUNCTION_INDEX, a0, &miss);
+        CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
+                        a1, t0, name, &miss);
+      }
+      break;
+
+    case NUMBER_CHECK: {
+      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
+      } else {
+      Label fast;
+        // Check that the object is a smi or a heap number.
+        __ And(t1, a1, Operand(kSmiTagMask));
+        __ Branch(&fast, eq, t1, Operand(zero_reg));
+        __ GetObjectType(a1, a0, a0);
+        __ Branch(&miss, ne, a0, Operand(HEAP_NUMBER_TYPE));
+        __ bind(&fast);
+        // Check that the maps starting from the prototype haven't changed.
+        GenerateDirectLoadGlobalFunctionPrototype(
+            masm(), Context::NUMBER_FUNCTION_INDEX, a0, &miss);
+        CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
+                        a1, t0, name, &miss);
+      }
+      break;
+    }
+
+    case BOOLEAN_CHECK: {
+      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
+      } else {
+        Label fast;
+        // Check that the object is a boolean.
+        __ LoadRoot(t0, Heap::kTrueValueRootIndex);
+        __ Branch(&fast, eq, a1, Operand(t0));
+        __ LoadRoot(t0, Heap::kFalseValueRootIndex);
+        __ Branch(&miss, ne, a1, Operand(t0));
+        __ bind(&fast);
+        // Check that the maps starting from the prototype haven't changed.
+        GenerateDirectLoadGlobalFunctionPrototype(
+            masm(), Context::BOOLEAN_FUNCTION_INDEX, a0, &miss);
+        CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
+                        a1, t0, name, &miss);
+      }
+      break;
+    }
+
+    default:
+      UNREACHABLE();
+  }
+
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(function);
 }
 
 
 MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
                                                       JSObject* holder,
                                                       String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  Label miss;
+
+  GenerateNameCheck(name, &miss);
+
+  // Get the number of arguments.
+  const int argc = arguments().immediate();
+
+  LookupResult lookup;
+  LookupPostInterceptor(holder, name, &lookup);
+
+  // Get the receiver from the stack.
+  __ lw(a1, MemOperand(sp, argc * kPointerSize));
+
+  CallInterceptorCompiler compiler(this, arguments(), a2);
+  MaybeObject* result = compiler.Compile(masm(),
+                                         object,
+                                         holder,
+                                         name,
+                                         &lookup,
+                                         a1,
+                                         a3,
+                                         t0,
+                                         a0,
+                                         &miss);
+  if (result->IsFailure()) {
+    return result;
+  }
+
+  // Move returned value, the function to call, to a1.
+  __ mov(a1, v0);
+  // Restore receiver.
+  __ lw(a0, MemOperand(sp, argc * kPointerSize));
+
+  GenerateCallFunction(masm(), object, arguments(), &miss);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(INTERCEPTOR, name);
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
-                                                 GlobalObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 JSFunction* function,
-                                                 String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+MaybeObject* CallStubCompiler::CompileCallGlobal(
+    JSObject* object,
+    GlobalObject* holder,
+    JSGlobalPropertyCell* cell,
+    JSFunction* function,
+    String* name,
+    Code::ExtraICState extra_ic_state) {
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  if (HasCustomCallGenerator(function)) {
+    MaybeObject* maybe_result = CompileCustomCall(
+        object, holder, cell, function, name);
+    Object* result;
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+    // Undefined means bail out to regular compiler.
+    if (!result->IsUndefined()) return result;
+  }
+
+  Label miss;
+
+  GenerateNameCheck(name, &miss);
+
+  // Get the number of arguments.
+  const int argc = arguments().immediate();
+
+  GenerateGlobalReceiverCheck(object, holder, name, &miss);
+  GenerateLoadFunctionFromCell(cell, function, &miss);
+
+  // Patch the receiver on the stack with the global proxy if
+  // necessary.
+  if (object->IsGlobalObject()) {
+    __ lw(a3, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
+    __ sw(a3, MemOperand(sp, argc * kPointerSize));
+  }
+
+  // Setup the context (function already in r1).
+  __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+  // Jump to the cached code (tail call).
+  Counters* counters = masm()->isolate()->counters();
+  __ IncrementCounter(counters->call_global_inline(), 1, a3, t0);
+  ASSERT(function->is_compiled());
+  Handle<Code> code(function->code());
+  ParameterCount expected(function->shared()->formal_parameter_count());
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  if (V8::UseCrankshaft()) {
+    UNIMPLEMENTED_MIPS();
+  } else {
+    __ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET,
+                  JUMP_FUNCTION, call_kind);
+  }
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  __ IncrementCounter(counters->call_global_inline_miss(), 1, a1, a3);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(NORMAL, name);
 }
 
 
@@ -413,39 +2563,205 @@
                                                   int index,
                                                   Map* transition,
                                                   String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  // Name register might be clobbered.
+  GenerateStoreField(masm(),
+                     object,
+                     index,
+                     transition,
+                     a1, a2, a3,
+                     &miss);
+  __ bind(&miss);
+  __ li(a2, Operand(Handle<String>(name)));  // Restore name.
+  Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
 }
 
 
 MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
                                                      AccessorInfo* callback,
                                                      String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(a1, &miss);
+
+  // Check that the map of the object hasn't changed.
+  __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ Branch(&miss, ne, a3, Operand(Handle<Map>(object->map())));
+
+  // Perform global security token check if needed.
+  if (object->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(a1, a3, &miss);
+  }
+
+  // Stub never generated for non-global objects that require access
+  // checks.
+  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+  __ push(a1);  // Receiver.
+  __ li(a3, Operand(Handle<AccessorInfo>(callback)));  // Callback info.
+  __ Push(a3, a2, a0);
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_callback_property =
+      ExternalReference(IC_Utility(IC::kStoreCallbackProperty),
+          masm()->isolate());
+  __ TailCallExternalReference(store_callback_property, 4, 1);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
 }
 
 
 MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
                                                         String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(a1, &miss);
+
+  // Check that the map of the object hasn't changed.
+  __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ Branch(&miss, ne, a3, Operand(Handle<Map>(receiver->map())));
+
+  // Perform global security token check if needed.
+  if (receiver->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(a1, a3, &miss);
+  }
+
+  // Stub is never generated for non-global objects that require access
+  // checks.
+  ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
+
+  __ Push(a1, a2, a0);  // Receiver, name, value.
+
+  __ li(a0, Operand(Smi::FromInt(strict_mode_)));
+  __ push(a0);  // Strict mode.
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_ic_property =
+      ExternalReference(IC_Utility(IC::kStoreInterceptorProperty),
+          masm()->isolate());
+  __ TailCallExternalReference(store_ic_property, 4, 1);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(INTERCEPTOR, name);
 }
 
 
 MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
                                                    JSGlobalPropertyCell* cell,
                                                    String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  // Check that the map of the global has not changed.
+  __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ Branch(&miss, ne, a3, Operand(Handle<Map>(object->map())));
+
+  // Check that the value in the cell is not the hole. If it is, this
+  // cell could have been deleted and reintroducing the global needs
+  // to update the property details in the property dictionary of the
+  // global object. We bail out to the runtime system to do that.
+  __ li(t0, Operand(Handle<JSGlobalPropertyCell>(cell)));
+  __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
+  __ lw(t2, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
+  __ Branch(&miss, eq, t1, Operand(t2));
+
+  // Store the value in the cell.
+  __ sw(a0, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
+  __ mov(v0, a0);  // Stored value must be returned in v0.
+  Counters* counters = masm()->isolate()->counters();
+  __ IncrementCounter(counters->named_store_global_inline(), 1, a1, a3);
+  __ Ret();
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  __ IncrementCounter(counters->named_store_global_inline_miss(), 1, a1, a3);
+  Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, name);
 }
 
 
 MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
                                                       JSObject* object,
                                                       JSObject* last) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : receiver
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  // Check that the receiver is not a smi.
+  __ JumpIfSmi(a0, &miss);
+
+  // Check the maps of the full prototype chain.
+  CheckPrototypes(object, a0, last, a3, a1, t0, name, &miss);
+
+  // If the last object in the prototype chain is a global object,
+  // check that the global property cell is empty.
+  if (last->IsGlobalObject()) {
+    MaybeObject* cell = GenerateCheckPropertyCell(masm(),
+                                                  GlobalObject::cast(last),
+                                                  name,
+                                                  a1,
+                                                  &miss);
+    if (cell->IsFailure()) {
+      miss.Unuse();
+      return cell;
+    }
+  }
+
+  // Return undefined if maps of the full prototype chain is still the same.
+  __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(NONEXISTENT, heap()->empty_string());
 }
 
 
@@ -453,8 +2769,21 @@
                                                 JSObject* holder,
                                                 int index,
                                                 String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  __ mov(v0, a0);
+
+  GenerateLoadField(object, holder, v0, a3, a1, t0, index, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(FIELD, name);
 }
 
 
@@ -462,8 +2791,25 @@
                                                    JSObject* object,
                                                    JSObject* holder,
                                                    AccessorInfo* callback) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  MaybeObject* result = GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0,
+                                             callback, name, &miss);
+  if (result->IsFailure()) {
+    miss.Unuse();
+    return result;
+  }
+
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CALLBACKS, name);
 }
 
 
@@ -471,16 +2817,50 @@
                                                    JSObject* holder,
                                                    Object* value,
                                                    String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  GenerateLoadConstant(object, holder, a0, a3, a1, t0, value, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CONSTANT_FUNCTION, name);
 }
 
 
 MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
                                                       JSObject* holder,
                                                       String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  //  -- [sp]  : receiver
+  // -----------------------------------
+  Label miss;
+
+  LookupResult lookup;
+  LookupPostInterceptor(holder, name, &lookup);
+  GenerateLoadInterceptor(object,
+                          holder,
+                          &lookup,
+                          a0,
+                          a2,
+                          a3,
+                          a1,
+                          t0,
+                          name,
+                          &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(INTERCEPTOR, name);
 }
 
 
@@ -489,8 +2869,45 @@
                                                  JSGlobalPropertyCell* cell,
                                                  String* name,
                                                  bool is_dont_delete) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  Label miss;
+
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual calls. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ And(t0, a0, Operand(kSmiTagMask));
+    __ Branch(&miss, eq, t0, Operand(zero_reg));
+  }
+
+  // Check that the map of the global has not changed.
+  CheckPrototypes(object, a0, holder, a3, t0, a1, name, &miss);
+
+  // Get the value from the cell.
+  __ li(a3, Operand(Handle<JSGlobalPropertyCell>(cell)));
+  __ lw(t0, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
+
+  // Check for deleted property if property can actually be deleted.
+  if (!is_dont_delete) {
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+    __ Branch(&miss, eq, t0, Operand(at));
+  }
+
+  __ mov(v0, t0);
+  Counters* counters = masm()->isolate()->counters();
+  __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
+  __ Ret();
+
+  __ bind(&miss);
+  __ IncrementCounter(counters->named_load_global_stub_miss(), 1, a1, a3);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(NORMAL, name);
 }
 
 
@@ -498,8 +2915,21 @@
                                                      JSObject* receiver,
                                                      JSObject* holder,
                                                      int index) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Check the key is the cached one.
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+  GenerateLoadField(receiver, holder, a1, a2, a3, t0, index, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  return GetCode(FIELD, name);
 }
 
 
@@ -508,8 +2938,27 @@
     JSObject* receiver,
     JSObject* holder,
     AccessorInfo* callback) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Check the key is the cached one.
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+  MaybeObject* result = GenerateLoadCallback(receiver, holder, a1, a0, a2, a3,
+                                             t0, callback, name, &miss);
+  if (result->IsFailure()) {
+    miss.Unuse();
+    return result;
+  }
+
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  return GetCode(CALLBACKS, name);
 }
 
 
@@ -517,40 +2966,171 @@
                                                         JSObject* receiver,
                                                         JSObject* holder,
                                                         Object* value) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Check the key is the cached one.
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+  GenerateLoadConstant(receiver, holder, a1, a2, a3, t0, value, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CONSTANT_FUNCTION, name);
 }
 
 
 MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
                                                            JSObject* holder,
                                                            String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Check the key is the cached one.
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+  LookupResult lookup;
+  LookupPostInterceptor(holder, name, &lookup);
+  GenerateLoadInterceptor(receiver,
+                          holder,
+                          &lookup,
+                          a1,
+                          a0,
+                          a2,
+                          a3,
+                          t0,
+                          name,
+                          &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  return GetCode(INTERCEPTOR, name);
 }
 
 
 MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Check the key is the cached one.
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+  GenerateLoadArrayLength(masm(), a1, a2, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  return GetCode(CALLBACKS, name);
 }
 
 
 MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss;
+
+  Counters* counters = masm()->isolate()->counters();
+  __ IncrementCounter(counters->keyed_load_string_length(), 1, a2, a3);
+
+  // Check the key is the cached one.
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+  GenerateLoadStringLength(masm(), a1, a2, a3, &miss, true);
+  __ bind(&miss);
+  __ DecrementCounter(counters->keyed_load_string_length(), 1, a2, a3);
+
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  return GetCode(CALLBACKS, name);
 }
 
 
 MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss;
+
+  Counters* counters = masm()->isolate()->counters();
+  __ IncrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
+
+  // Check the name hasn't changed.
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
+
+  GenerateLoadFunctionPrototype(masm(), a1, a2, a3, &miss);
+  __ bind(&miss);
+  __ DecrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  return GetCode(CALLBACKS, name);
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+MaybeObject* KeyedLoadStubCompiler::CompileLoadFastElement(Map* receiver_map) {
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  MaybeObject* maybe_stub = KeyedLoadFastElementStub().TryGetCode();
+  Code* stub;
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(a1,
+                 a2,
+                 Handle<Map>(receiver_map),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
+
+  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
+    MapList* receiver_maps,
+    CodeList* handler_ics) {
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss;
+  __ JumpIfSmi(a1, &miss);
+
+  int receiver_count = receiver_maps->length();
+  __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+  for (int current = 0; current < receiver_count; ++current) {
+    Handle<Map> map(receiver_maps->at(current));
+    Handle<Code> code(handler_ics->at(current));
+    __ Jump(code, RelocInfo::CODE_TARGET, eq, a2, Operand(map));
+  }
+
+  __ bind(&miss);
+  Handle<Code> miss_ic = isolate()->builtins()->KeyedLoadIC_Miss();
+  __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL, MEGAMORPHIC);
 }
 
 
@@ -558,39 +3138,1143 @@
                                                        int index,
                                                        Map* transition,
                                                        String* name) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : key
+  //  -- a2    : receiver
+  //  -- ra    : return address
+  // -----------------------------------
+
+  Label miss;
+
+  Counters* counters = masm()->isolate()->counters();
+  __ IncrementCounter(counters->keyed_store_field(), 1, a3, t0);
+
+  // Check that the name has not changed.
+  __ Branch(&miss, ne, a1, Operand(Handle<String>(name)));
+
+  // a3 is used as scratch register. a1 and a2 keep their values if a jump to
+  // the miss label is generated.
+  GenerateStoreField(masm(),
+                     object,
+                     index,
+                     transition,
+                     a2, a1, a3,
+                     &miss);
+  __ bind(&miss);
+
+  __ DecrementCounter(counters->keyed_store_field(), 1, a3, t0);
+  Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
-    JSObject* receiver) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+MaybeObject* KeyedStoreStubCompiler::CompileStoreFastElement(
+    Map* receiver_map) {
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : key
+  //  -- a2    : receiver
+  //  -- ra    : return address
+  //  -- a3    : scratch
+  // -----------------------------------
+  bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+  MaybeObject* maybe_stub =
+      KeyedStoreFastElementStub(is_js_array).TryGetCode();
+  Code* stub;
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(a2,
+                 a3,
+                 Handle<Map>(receiver_map),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
+
+  Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
+MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
+    MapList* receiver_maps,
+    CodeList* handler_ics) {
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : key
+  //  -- a2    : receiver
+  //  -- ra    : return address
+  //  -- a3    : scratch
+  // -----------------------------------
+  Label miss;
+  __ JumpIfSmi(a2, &miss);
+
+  int receiver_count = receiver_maps->length();
+  __ lw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
+  for (int current = 0; current < receiver_count; ++current) {
+    Handle<Map> map(receiver_maps->at(current));
+    Handle<Code> code(handler_ics->at(current));
+    __ Jump(code, RelocInfo::CODE_TARGET, eq, a3, Operand(map));
+  }
+
+  __ bind(&miss);
+  Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
+  __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL, MEGAMORPHIC);
 }
 
 
 MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+  // a0    : argc
+  // a1    : constructor
+  // ra    : return address
+  // [sp]  : last argument
+  Label generic_stub_call;
+
+  // Use t7 for holding undefined which is used in several places below.
+  __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Check to see whether there are any break points in the function code. If
+  // there are jump to the generic constructor stub which calls the actual
+  // code for the function thereby hitting the break points.
+  __ lw(t5, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(a2, FieldMemOperand(t5, SharedFunctionInfo::kDebugInfoOffset));
+  __ Branch(&generic_stub_call, ne, a2, Operand(t7));
+#endif
+
+  // Load the initial map and verify that it is in fact a map.
+  // a1: constructor function
+  // t7: undefined
+  __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+  __ And(t0, a2, Operand(kSmiTagMask));
+  __ Branch(&generic_stub_call, eq, t0, Operand(zero_reg));
+  __ GetObjectType(a2, a3, t0);
+  __ Branch(&generic_stub_call, ne, t0, Operand(MAP_TYPE));
+
+#ifdef DEBUG
+  // Cannot construct functions this way.
+  // a0: argc
+  // a1: constructor function
+  // a2: initial map
+  // t7: undefined
+  __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+  __ Check(ne, "Function constructed by construct stub.",
+      a3, Operand(JS_FUNCTION_TYPE));
+#endif
+
+  // Now allocate the JSObject in new space.
+  // a0: argc
+  // a1: constructor function
+  // a2: initial map
+  // t7: undefined
+  __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+  __ AllocateInNewSpace(a3,
+                        t4,
+                        t5,
+                        t6,
+                        &generic_stub_call,
+                        SIZE_IN_WORDS);
+
+  // Allocated the JSObject, now initialize the fields. Map is set to initial
+  // map and properties and elements are set to empty fixed array.
+  // a0: argc
+  // a1: constructor function
+  // a2: initial map
+  // a3: object size (in words)
+  // t4: JSObject (not tagged)
+  // t7: undefined
+  __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
+  __ mov(t5, t4);
+  __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
+  __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
+  __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
+  __ Addu(t5, t5, Operand(3 * kPointerSize));
+  ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+  ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+  ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+
+
+  // Calculate the location of the first argument. The stack contains only the
+  // argc arguments.
+  __ sll(a1, a0, kPointerSizeLog2);
+  __ Addu(a1, a1, sp);
+
+  // Fill all the in-object properties with undefined.
+  // a0: argc
+  // a1: first argument
+  // a3: object size (in words)
+  // t4: JSObject (not tagged)
+  // t5: First in-object property of JSObject (not tagged)
+  // t7: undefined
+  // Fill the initialized properties with a constant value or a passed argument
+  // depending on the this.x = ...; assignment in the function.
+  SharedFunctionInfo* shared = function->shared();
+  for (int i = 0; i < shared->this_property_assignments_count(); i++) {
+    if (shared->IsThisPropertyAssignmentArgument(i)) {
+      Label not_passed, next;
+      // Check if the argument assigned to the property is actually passed.
+      int arg_number = shared->GetThisPropertyAssignmentArgument(i);
+      __ Branch(&not_passed, less_equal, a0, Operand(arg_number));
+      // Argument passed - find it on the stack.
+      __ lw(a2, MemOperand(a1, (arg_number + 1) * -kPointerSize));
+      __ sw(a2, MemOperand(t5));
+      __ Addu(t5, t5, kPointerSize);
+      __ jmp(&next);
+      __ bind(&not_passed);
+      // Set the property to undefined.
+      __ sw(t7, MemOperand(t5));
+      __ Addu(t5, t5, Operand(kPointerSize));
+      __ bind(&next);
+    } else {
+      // Set the property to the constant value.
+      Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
+      __ li(a2, Operand(constant));
+      __ sw(a2, MemOperand(t5));
+      __ Addu(t5, t5, kPointerSize);
+    }
+  }
+
+  // Fill the unused in-object property fields with undefined.
+  ASSERT(function->has_initial_map());
+  for (int i = shared->this_property_assignments_count();
+       i < function->initial_map()->inobject_properties();
+       i++) {
+      __ sw(t7, MemOperand(t5));
+      __ Addu(t5, t5, kPointerSize);
+  }
+
+  // a0: argc
+  // t4: JSObject (not tagged)
+  // Move argc to a1 and the JSObject to return to v0 and tag it.
+  __ mov(a1, a0);
+  __ mov(v0, t4);
+  __ Or(v0, v0, Operand(kHeapObjectTag));
+
+  // v0: JSObject
+  // a1: argc
+  // Remove caller arguments and receiver from the stack and return.
+  __ sll(t0, a1, kPointerSizeLog2);
+  __ Addu(sp, sp, t0);
+  __ Addu(sp, sp, Operand(kPointerSize));
+  Counters* counters = masm()->isolate()->counters();
+  __ IncrementCounter(counters->constructed_objects(), 1, a1, a2);
+  __ IncrementCounter(counters->constructed_objects_stub(), 1, a1, a2);
+  __ Ret();
+
+  // Jump to the generic stub in case the specialized code cannot handle the
+  // construction.
+  __ bind(&generic_stub_call);
+  Handle<Code> generic_construct_stub =
+      masm()->isolate()->builtins()->JSConstructStubGeneric();
+  __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode();
 }
 
 
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
-    JSObject* receiver_object,
-    ExternalArrayType array_type,
-    Code::Flags flags) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+MaybeObject* ExternalArrayLoadStubCompiler::CompileLoad(
+    JSObject*receiver, ExternalArrayType array_type) {
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  MaybeObject* maybe_stub =
+      KeyedLoadExternalArrayStub(array_type).TryGetCode();
+  Code* stub;
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(a1,
+                 a2,
+                 Handle<Map>(receiver->map()),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
+
+  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode();
 }
 
 
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
-    JSObject* receiver_object,
-    ExternalArrayType array_type,
-    Code::Flags flags) {
-  UNIMPLEMENTED_MIPS();
-  return NULL;
+MaybeObject* ExternalArrayStoreStubCompiler::CompileStore(
+    JSObject* receiver, ExternalArrayType array_type) {
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : name
+  //  -- a2    : receiver
+  //  -- ra    : return address
+  // -----------------------------------
+  MaybeObject* maybe_stub =
+      KeyedStoreExternalArrayStub(array_type).TryGetCode();
+  Code* stub;
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(a2,
+                 a3,
+                 Handle<Map>(receiver->map()),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
+
+  Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  return GetCode();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+static bool IsElementTypeSigned(ExternalArrayType array_type) {
+  switch (array_type) {
+    case kExternalByteArray:
+    case kExternalShortArray:
+    case kExternalIntArray:
+      return true;
+
+    case kExternalUnsignedByteArray:
+    case kExternalUnsignedShortArray:
+    case kExternalUnsignedIntArray:
+      return false;
+
+    default:
+      UNREACHABLE();
+      return false;
+  }
+}
+
+
+void KeyedLoadStubCompiler::GenerateLoadExternalArray(
+    MacroAssembler* masm,
+    ExternalArrayType array_type) {
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+  Label miss_force_generic, slow, failed_allocation;
+
+  Register key = a0;
+  Register receiver = a1;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &miss_force_generic);
+
+  __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  // a3: elements array
+
+  // Check that the index is in range.
+  __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
+  __ sra(t2, key, kSmiTagSize);
+  // Unsigned comparison catches both negative and too-large values.
+  __ Branch(&miss_force_generic, Uless, t1, Operand(t2));
+
+  __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
+  // a3: base pointer of external storage
+
+  // We are not untagging smi key and instead work with it
+  // as if it was premultiplied by 2.
+  ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
+
+  Register value = a2;
+  switch (array_type) {
+    case kExternalByteArray:
+      __ srl(t2, key, 1);
+      __ addu(t3, a3, t2);
+      __ lb(value, MemOperand(t3, 0));
+      break;
+    case kExternalPixelArray:
+    case kExternalUnsignedByteArray:
+      __ srl(t2, key, 1);
+      __ addu(t3, a3, t2);
+      __ lbu(value, MemOperand(t3, 0));
+      break;
+    case kExternalShortArray:
+      __ addu(t3, a3, key);
+      __ lh(value, MemOperand(t3, 0));
+      break;
+    case kExternalUnsignedShortArray:
+      __ addu(t3, a3, key);
+      __ lhu(value, MemOperand(t3, 0));
+      break;
+    case kExternalIntArray:
+    case kExternalUnsignedIntArray:
+      __ sll(t2, key, 1);
+      __ addu(t3, a3, t2);
+      __ lw(value, MemOperand(t3, 0));
+      break;
+    case kExternalFloatArray:
+      __ sll(t3, t2, 2);
+      __ addu(t3, a3, t3);
+      if (CpuFeatures::IsSupported(FPU)) {
+        CpuFeatures::Scope scope(FPU);
+        __ lwc1(f0, MemOperand(t3, 0));
+      } else {
+        __ lw(value, MemOperand(t3, 0));
+      }
+      break;
+    case kExternalDoubleArray:
+      __ sll(t2, key, 2);
+      __ addu(t3, a3, t2);
+      if (CpuFeatures::IsSupported(FPU)) {
+        CpuFeatures::Scope scope(FPU);
+        __ ldc1(f0, MemOperand(t3, 0));
+      } else {
+        // t3: pointer to the beginning of the double we want to load.
+        __ lw(a2, MemOperand(t3, 0));
+        __ lw(a3, MemOperand(t3, Register::kSizeInBytes));
+      }
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  // For integer array types:
+  // a2: value
+  // For float array type:
+  // f0: value (if FPU is supported)
+  // a2: value (if FPU is not supported)
+  // For double array type:
+  // f0: value (if FPU is supported)
+  // a2/a3: value (if FPU is not supported)
+
+  if (array_type == kExternalIntArray) {
+    // For the Int and UnsignedInt array types, we need to see whether
+    // the value can be represented in a Smi. If not, we need to convert
+    // it to a HeapNumber.
+    Label box_int;
+    __ Subu(t3, value, Operand(0xC0000000));  // Non-smi value gives neg result.
+    __ Branch(&box_int, lt, t3, Operand(zero_reg));
+    // Tag integer as smi and return it.
+    __ sll(v0, value, kSmiTagSize);
+    __ Ret();
+
+    __ bind(&box_int);
+    // Allocate a HeapNumber for the result and perform int-to-double
+    // conversion.
+    // The arm version uses a temporary here to save r0, but we don't need to
+    // (a0 is not modified).
+    __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(v0, a3, t0, t1, &slow);
+
+    if (CpuFeatures::IsSupported(FPU)) {
+      CpuFeatures::Scope scope(FPU);
+      __ mtc1(value, f0);
+      __ cvt_d_w(f0, f0);
+      __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag));
+      __ Ret();
+    } else {
+      Register dst1 = t2;
+      Register dst2 = t3;
+      FloatingPointHelper::Destination dest =
+          FloatingPointHelper::kCoreRegisters;
+      FloatingPointHelper::ConvertIntToDouble(masm,
+                                              value,
+                                              dest,
+                                              f0,
+                                              dst1,
+                                              dst2,
+                                              t1,
+                                              f2);
+      __ sw(dst1, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+      __ sw(dst2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+      __ Ret();
+    }
+  } else if (array_type == kExternalUnsignedIntArray) {
+    // The test is different for unsigned int values. Since we need
+    // the value to be in the range of a positive smi, we can't
+    // handle either of the top two bits being set in the value.
+    if (CpuFeatures::IsSupported(FPU)) {
+      CpuFeatures::Scope scope(FPU);
+      Label pl_box_int;
+      __ And(t2, value, Operand(0xC0000000));
+      __ Branch(&pl_box_int, ne, t2, Operand(zero_reg));
+
+      // It can fit in an Smi.
+      // Tag integer as smi and return it.
+      __ sll(v0, value, kSmiTagSize);
+      __ Ret();
+
+      __ bind(&pl_box_int);
+      // Allocate a HeapNumber for the result and perform int-to-double
+      // conversion. Don't use a0 and a1 as AllocateHeapNumber clobbers all
+      // registers - also when jumping due to exhausted young space.
+      __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(v0, t2, t3, t6, &slow);
+
+      // This is replaced by a macro:
+      // __ mtc1(value, f0);     // LS 32-bits.
+      // __ mtc1(zero_reg, f1);  // MS 32-bits are all zero.
+      // __ cvt_d_l(f0, f0); // Use 64 bit conv to get correct unsigned 32-bit.
+
+      __ Cvt_d_uw(f0, value);
+
+      __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag));
+
+      __ Ret();
+    } else {
+      // Check whether unsigned integer fits into smi.
+      Label box_int_0, box_int_1, done;
+      __ And(t2, value, Operand(0x80000000));
+      __ Branch(&box_int_0, ne, t2, Operand(zero_reg));
+      __ And(t2, value, Operand(0x40000000));
+      __ Branch(&box_int_1, ne, t2, Operand(zero_reg));
+
+      // Tag integer as smi and return it.
+      __ sll(v0, value, kSmiTagSize);
+      __ Ret();
+
+      Register hiword = value;  // a2.
+      Register loword = a3;
+
+      __ bind(&box_int_0);
+      // Integer does not have leading zeros.
+      GenerateUInt2Double(masm, hiword, loword, t0, 0);
+      __ Branch(&done);
+
+      __ bind(&box_int_1);
+      // Integer has one leading zero.
+      GenerateUInt2Double(masm, hiword, loword, t0, 1);
+
+
+      __ bind(&done);
+      // Integer was converted to double in registers hiword:loword.
+      // Wrap it into a HeapNumber. Don't use a0 and a1 as AllocateHeapNumber
+      // clobbers all registers - also when jumping due to exhausted young
+      // space.
+      __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(t2, t3, t5, t6, &slow);
+
+      __ sw(hiword, FieldMemOperand(t2, HeapNumber::kExponentOffset));
+      __ sw(loword, FieldMemOperand(t2, HeapNumber::kMantissaOffset));
+
+      __ mov(v0, t2);
+      __ Ret();
+    }
+  } else if (array_type == kExternalFloatArray) {
+    // For the floating-point array type, we need to always allocate a
+    // HeapNumber.
+    if (CpuFeatures::IsSupported(FPU)) {
+      CpuFeatures::Scope scope(FPU);
+      // Allocate a HeapNumber for the result. Don't use a0 and a1 as
+      // AllocateHeapNumber clobbers all registers - also when jumping due to
+      // exhausted young space.
+      __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
+      // The float (single) value is already in fpu reg f0 (if we use float).
+      __ cvt_d_s(f0, f0);
+      __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag));
+      __ Ret();
+    } else {
+      // Allocate a HeapNumber for the result. Don't use a0 and a1 as
+      // AllocateHeapNumber clobbers all registers - also when jumping due to
+      // exhausted young space.
+      __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
+      // FPU is not available, do manual single to double conversion.
+
+      // a2: floating point value (binary32).
+      // v0: heap number for result
+
+      // Extract mantissa to t4.
+      __ And(t4, value, Operand(kBinary32MantissaMask));
+
+      // Extract exponent to t5.
+      __ srl(t5, value, kBinary32MantissaBits);
+      __ And(t5, t5, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
+
+      Label exponent_rebiased;
+      __ Branch(&exponent_rebiased, eq, t5, Operand(zero_reg));
+
+      __ li(t0, 0x7ff);
+      __ Xor(t1, t5, Operand(0xFF));
+      __ movz(t5, t0, t1);  // Set t5 to 0x7ff only if t5 is equal to 0xff.
+      __ Branch(&exponent_rebiased, eq, t0, Operand(0xff));
+
+      // Rebias exponent.
+      __ Addu(t5,
+              t5,
+              Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
+
+      __ bind(&exponent_rebiased);
+      __ And(a2, value, Operand(kBinary32SignMask));
+      value = no_reg;
+      __ sll(t0, t5, HeapNumber::kMantissaBitsInTopWord);
+      __ or_(a2, a2, t0);
+
+      // Shift mantissa.
+      static const int kMantissaShiftForHiWord =
+          kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+      static const int kMantissaShiftForLoWord =
+          kBitsPerInt - kMantissaShiftForHiWord;
+
+      __ srl(t0, t4, kMantissaShiftForHiWord);
+      __ or_(a2, a2, t0);
+      __ sll(a0, t4, kMantissaShiftForLoWord);
+
+      __ sw(a2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+      __ sw(a0, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+      __ Ret();
+    }
+
+  } else if (array_type == kExternalDoubleArray) {
+    if (CpuFeatures::IsSupported(FPU)) {
+      CpuFeatures::Scope scope(FPU);
+      // Allocate a HeapNumber for the result. Don't use a0 and a1 as
+      // AllocateHeapNumber clobbers all registers - also when jumping due to
+      // exhausted young space.
+      __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
+      // The double value is already in f0
+      __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
+      __ Ret();
+    } else {
+      // Allocate a HeapNumber for the result. Don't use a0 and a1 as
+      // AllocateHeapNumber clobbers all registers - also when jumping due to
+      // exhausted young space.
+      __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+      __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
+
+      __ sw(a2, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+      __ sw(a3, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+      __ Ret();
+    }
+
+  } else {
+    // Tag integer as smi and return it.
+    __ sll(v0, value, kSmiTagSize);
+    __ Ret();
+  }
+
+  // Slow case, key and receiver still in a0 and a1.
+  __ bind(&slow);
+  __ IncrementCounter(
+      masm->isolate()->counters()->keyed_load_external_array_slow(),
+      1, a2, a3);
+
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+
+  __ Push(a1, a0);
+
+  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+
+  __ bind(&miss_force_generic);
+  Code* stub = masm->isolate()->builtins()->builtin(
+      Builtins::kKeyedLoadIC_MissForceGeneric);
+  __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedStoreStubCompiler::GenerateStoreExternalArray(
+    MacroAssembler* masm,
+    ExternalArrayType array_type) {
+  // ---------- S t a t e --------------
+  //  -- a0     : value
+  //  -- a1     : key
+  //  -- a2     : receiver
+  //  -- ra     : return address
+  // -----------------------------------
+
+  Label slow, check_heap_number, miss_force_generic;
+
+  // Register usage.
+  Register value = a0;
+  Register key = a1;
+  Register receiver = a2;
+  // a3 mostly holds the elements array or the destination external array.
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &miss_force_generic);
+
+  // Check that the index is in range.
+  __ SmiUntag(t0, key);
+  __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
+  // Unsigned comparison catches both negative and too-large values.
+  __ Branch(&miss_force_generic, Ugreater_equal, t0, Operand(t1));
+
+  // Handle both smis and HeapNumbers in the fast path. Go to the
+  // runtime for all other kinds of values.
+  // a3: external array.
+  // t0: key (integer).
+
+  if (array_type == kExternalPixelArray) {
+    // Double to pixel conversion is only implemented in the runtime for now.
+    __ JumpIfNotSmi(value, &slow);
+  } else {
+    __ JumpIfNotSmi(value, &check_heap_number);
+  }
+  __ SmiUntag(t1, value);
+  __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
+
+  // a3: base pointer of external storage.
+  // t0: key (integer).
+  // t1: value (integer).
+
+  switch (array_type) {
+    case kExternalPixelArray: {
+      // Clamp the value to [0..255].
+      // v0 is used as a scratch register here.
+      Label done;
+      __ li(v0, Operand(255));
+      // Normal branch: nop in delay slot.
+      __ Branch(&done, gt, t1, Operand(v0));
+      // Use delay slot in this branch.
+      __ Branch(USE_DELAY_SLOT, &done, lt, t1, Operand(zero_reg));
+      __ mov(v0, zero_reg);  // In delay slot.
+      __ mov(v0, t1);  // Value is in range 0..255.
+      __ bind(&done);
+      __ mov(t1, v0);
+      __ addu(t8, a3, t0);
+      __ sb(t1, MemOperand(t8, 0));
+      }
+      break;
+    case kExternalByteArray:
+    case kExternalUnsignedByteArray:
+      __ addu(t8, a3, t0);
+      __ sb(t1, MemOperand(t8, 0));
+      break;
+    case kExternalShortArray:
+    case kExternalUnsignedShortArray:
+      __ sll(t8, t0, 1);
+      __ addu(t8, a3, t8);
+      __ sh(t1, MemOperand(t8, 0));
+      break;
+    case kExternalIntArray:
+    case kExternalUnsignedIntArray:
+      __ sll(t8, t0, 2);
+      __ addu(t8, a3, t8);
+      __ sw(t1, MemOperand(t8, 0));
+      break;
+    case kExternalFloatArray:
+      // Perform int-to-float conversion and store to memory.
+      StoreIntAsFloat(masm, a3, t0, t1, t2, t3, t4);
+      break;
+    case kExternalDoubleArray:
+      __ sll(t8, t0, 3);
+      __ addu(a3, a3, t8);
+      // a3: effective address of the double element
+      FloatingPointHelper::Destination destination;
+      if (CpuFeatures::IsSupported(FPU)) {
+        destination = FloatingPointHelper::kFPURegisters;
+      } else {
+        destination = FloatingPointHelper::kCoreRegisters;
+      }
+      FloatingPointHelper::ConvertIntToDouble(
+          masm, t1, destination,
+          f0, t2, t3,  // These are: double_dst, dst1, dst2.
+          t0, f2);  // These are: scratch2, single_scratch.
+      if (destination == FloatingPointHelper::kFPURegisters) {
+        CpuFeatures::Scope scope(FPU);
+        __ sdc1(f0, MemOperand(a3, 0));
+      } else {
+        __ sw(t2, MemOperand(a3, 0));
+        __ sw(t3, MemOperand(a3, Register::kSizeInBytes));
+      }
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  // Entry registers are intact, a0 holds the value which is the return value.
+  __ mov(v0, value);
+  __ Ret();
+
+  if (array_type != kExternalPixelArray) {
+    // a3: external array.
+    // t0: index (integer).
+    __ bind(&check_heap_number);
+    __ GetObjectType(value, t1, t2);
+    __ Branch(&slow, ne, t2, Operand(HEAP_NUMBER_TYPE));
+
+    __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
+
+    // a3: base pointer of external storage.
+    // t0: key (integer).
+
+    // The WebGL specification leaves the behavior of storing NaN and
+    // +/-Infinity into integer arrays basically undefined. For more
+    // reproducible behavior, convert these to zero.
+
+    if (CpuFeatures::IsSupported(FPU)) {
+      CpuFeatures::Scope scope(FPU);
+
+      __ ldc1(f0, FieldMemOperand(a0, HeapNumber::kValueOffset));
+
+      if (array_type == kExternalFloatArray) {
+        __ cvt_s_d(f0, f0);
+        __ sll(t8, t0, 2);
+        __ addu(t8, a3, t8);
+        __ swc1(f0, MemOperand(t8, 0));
+      } else if (array_type == kExternalDoubleArray) {
+        __ sll(t8, t0, 3);
+        __ addu(t8, a3, t8);
+        __ sdc1(f0, MemOperand(t8, 0));
+      } else {
+        Label done;
+
+        // Need to perform float-to-int conversion.
+        // Test whether exponent equal to 0x7FF (infinity or NaN).
+
+        __ mfc1(t3, f1);  // Move exponent word of double to t3 (as raw bits).
+        __ li(t1, Operand(0x7FF00000));
+        __ And(t3, t3, Operand(t1));
+        __ Branch(USE_DELAY_SLOT, &done, eq, t3, Operand(t1));
+        __ mov(t3, zero_reg);  // In delay slot.
+
+        // Not infinity or NaN simply convert to int.
+        if (IsElementTypeSigned(array_type)) {
+          __ trunc_w_d(f0, f0);
+          __ mfc1(t3, f0);
+        } else {
+          __ Trunc_uw_d(f0, t3);
+        }
+
+        // t3: HeapNumber converted to integer
+        __ bind(&done);
+        switch (array_type) {
+          case kExternalByteArray:
+          case kExternalUnsignedByteArray:
+            __ addu(t8, a3, t0);
+            __ sb(t3, MemOperand(t8, 0));
+            break;
+          case kExternalShortArray:
+          case kExternalUnsignedShortArray:
+            __ sll(t8, t0, 1);
+            __ addu(t8, a3, t8);
+            __ sh(t3, MemOperand(t8, 0));
+            break;
+          case kExternalIntArray:
+          case kExternalUnsignedIntArray:
+            __ sll(t8, t0, 2);
+            __ addu(t8, a3, t8);
+            __ sw(t3, MemOperand(t8, 0));
+            break;
+          default:
+            UNREACHABLE();
+            break;
+        }
+      }
+
+      // Entry registers are intact, a0 holds the value
+      // which is the return value.
+      __ mov(v0, value);
+      __ Ret();
+    } else {
+      // FPU is not available, do manual conversions.
+
+      __ lw(t3, FieldMemOperand(value, HeapNumber::kExponentOffset));
+      __ lw(t4, FieldMemOperand(value, HeapNumber::kMantissaOffset));
+
+      if (array_type == kExternalFloatArray) {
+        Label done, nan_or_infinity_or_zero;
+        static const int kMantissaInHiWordShift =
+            kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+        static const int kMantissaInLoWordShift =
+            kBitsPerInt - kMantissaInHiWordShift;
+
+        // Test for all special exponent values: zeros, subnormal numbers, NaNs
+        // and infinities. All these should be converted to 0.
+        __ li(t5, HeapNumber::kExponentMask);
+        __ and_(t6, t3, t5);
+        __ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(zero_reg));
+
+        __ xor_(t1, t6, t5);
+        __ li(t2, kBinary32ExponentMask);
+        __ movz(t6, t2, t1);  // Only if t6 is equal to t5.
+        __ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(t5));
+
+        // Rebias exponent.
+        __ srl(t6, t6, HeapNumber::kExponentShift);
+        __ Addu(t6,
+                t6,
+                Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
+
+        __ li(t1, Operand(kBinary32MaxExponent));
+        __ Slt(t1, t1, t6);
+        __ And(t2, t3, Operand(HeapNumber::kSignMask));
+        __ Or(t2, t2, Operand(kBinary32ExponentMask));
+        __ movn(t3, t2, t1);  // Only if t6 is gt kBinary32MaxExponent.
+        __ Branch(&done, gt, t6, Operand(kBinary32MaxExponent));
+
+        __ Slt(t1, t6, Operand(kBinary32MinExponent));
+        __ And(t2, t3, Operand(HeapNumber::kSignMask));
+        __ movn(t3, t2, t1);  // Only if t6 is lt kBinary32MinExponent.
+        __ Branch(&done, lt, t6, Operand(kBinary32MinExponent));
+
+        __ And(t7, t3, Operand(HeapNumber::kSignMask));
+        __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
+        __ sll(t3, t3, kMantissaInHiWordShift);
+        __ or_(t7, t7, t3);
+        __ srl(t4, t4, kMantissaInLoWordShift);
+        __ or_(t7, t7, t4);
+        __ sll(t6, t6, kBinary32ExponentShift);
+        __ or_(t3, t7, t6);
+
+        __ bind(&done);
+        __ sll(t9, a1, 2);
+        __ addu(t9, a2, t9);
+        __ sw(t3, MemOperand(t9, 0));
+
+        // Entry registers are intact, a0 holds the value which is the return
+        // value.
+        __ mov(v0, value);
+        __ Ret();
+
+        __ bind(&nan_or_infinity_or_zero);
+        __ And(t7, t3, Operand(HeapNumber::kSignMask));
+        __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
+        __ or_(t6, t6, t7);
+        __ sll(t3, t3, kMantissaInHiWordShift);
+        __ or_(t6, t6, t3);
+        __ srl(t4, t4, kMantissaInLoWordShift);
+        __ or_(t3, t6, t4);
+        __ Branch(&done);
+      } else if (array_type == kExternalDoubleArray) {
+        __ sll(t8, t0, 3);
+        __ addu(t8, a3, t8);
+        // t8: effective address of destination element.
+        __ sw(t4, MemOperand(t8, 0));
+        __ sw(t3, MemOperand(t8, Register::kSizeInBytes));
+        __ Ret();
+      } else {
+        bool is_signed_type  = IsElementTypeSigned(array_type);
+        int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
+        int32_t min_value    = is_signed_type ? 0x80000000 : 0x00000000;
+
+        Label done, sign;
+
+        // Test for all special exponent values: zeros, subnormal numbers, NaNs
+        // and infinities. All these should be converted to 0.
+        __ li(t5, HeapNumber::kExponentMask);
+        __ and_(t6, t3, t5);
+        __ movz(t3, zero_reg, t6);  // Only if t6 is equal to zero.
+        __ Branch(&done, eq, t6, Operand(zero_reg));
+
+        __ xor_(t2, t6, t5);
+        __ movz(t3, zero_reg, t2);  // Only if t6 is equal to t5.
+        __ Branch(&done, eq, t6, Operand(t5));
+
+        // Unbias exponent.
+        __ srl(t6, t6, HeapNumber::kExponentShift);
+        __ Subu(t6, t6, Operand(HeapNumber::kExponentBias));
+        // If exponent is negative then result is 0.
+        __ slt(t2, t6, zero_reg);
+        __ movn(t3, zero_reg, t2);  // Only if exponent is negative.
+        __ Branch(&done, lt, t6, Operand(zero_reg));
+
+        // If exponent is too big then result is minimal value.
+        __ slti(t1, t6, meaningfull_bits - 1);
+        __ li(t2, min_value);
+        __ movz(t3, t2, t1);  // Only if t6 is ge meaningfull_bits - 1.
+        __ Branch(&done, ge, t6, Operand(meaningfull_bits - 1));
+
+        __ And(t5, t3, Operand(HeapNumber::kSignMask));
+        __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
+        __ Or(t3, t3, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
+
+        __ li(t9, HeapNumber::kMantissaBitsInTopWord);
+        __ subu(t6, t9, t6);
+        __ slt(t1, t6, zero_reg);
+        __ srlv(t2, t3, t6);
+        __ movz(t3, t2, t1);  // Only if t6 is positive.
+        __ Branch(&sign, ge, t6, Operand(zero_reg));
+
+        __ subu(t6, zero_reg, t6);
+        __ sllv(t3, t3, t6);
+        __ li(t9, meaningfull_bits);
+        __ subu(t6, t9, t6);
+        __ srlv(t4, t4, t6);
+        __ or_(t3, t3, t4);
+
+        __ bind(&sign);
+        __ subu(t2, t3, zero_reg);
+        __ movz(t3, t2, t5);  // Only if t5 is zero.
+
+        __ bind(&done);
+
+        // Result is in t3.
+        // This switch block should be exactly the same as above (FPU mode).
+        switch (array_type) {
+          case kExternalByteArray:
+          case kExternalUnsignedByteArray:
+            __ addu(t8, a3, t0);
+            __ sb(t3, MemOperand(t8, 0));
+            break;
+          case kExternalShortArray:
+          case kExternalUnsignedShortArray:
+            __ sll(t8, t0, 1);
+            __ addu(t8, a3, t8);
+            __ sh(t3, MemOperand(t8, 0));
+            break;
+          case kExternalIntArray:
+          case kExternalUnsignedIntArray:
+            __ sll(t8, t0, 2);
+            __ addu(t8, a3, t8);
+            __ sw(t3, MemOperand(t8, 0));
+            break;
+          default:
+            UNREACHABLE();
+            break;
+        }
+      }
+    }
+  }
+
+  // Slow case, key and receiver still in a0 and a1.
+  __ bind(&slow);
+  __ IncrementCounter(
+      masm->isolate()->counters()->keyed_load_external_array_slow(),
+      1, a2, a3);
+  // Entry registers are intact.
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+  Handle<Code> slow_ic =
+      masm->isolate()->builtins()->KeyedStoreIC_Slow();
+  __ Jump(slow_ic, RelocInfo::CODE_TARGET);
+
+  // Miss case, call the runtime.
+  __ bind(&miss_force_generic);
+
+  // ---------- S t a t e --------------
+  //  -- ra     : return address
+  //  -- a0     : key
+  //  -- a1     : receiver
+  // -----------------------------------
+
+  Handle<Code> miss_ic =
+     masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+  __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  //  -- a0    : key
+  //  -- a1    : receiver
+  // -----------------------------------
+  Label miss_force_generic;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(a0, &miss_force_generic);
+
+  // Get the elements array.
+  __ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
+  __ AssertFastElements(a2);
+
+  // Check that the key is within bounds.
+  __ lw(a3, FieldMemOperand(a2, FixedArray::kLengthOffset));
+  __ Branch(&miss_force_generic, hs, a0, Operand(a3));
+
+  // Load the result and make sure it's not the hole.
+  __ Addu(a3, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(t0, t0, a3);
+  __ lw(t0, MemOperand(t0));
+  __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
+  __ Branch(&miss_force_generic, eq, t0, Operand(t1));
+  __ mov(v0, t0);
+  __ Ret();
+
+  __ bind(&miss_force_generic);
+  Code* stub = masm->isolate()->builtins()->builtin(
+      Builtins::kKeyedLoadIC_MissForceGeneric);
+  __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
+                                                      bool is_js_array) {
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  //  -- a1    : key
+  //  -- a2    : receiver
+  //  -- ra    : return address
+  //  -- a3    : scratch
+  //  -- a4    : scratch (elements)
+  // -----------------------------------
+  Label miss_force_generic;
+
+  Register value_reg = a0;
+  Register key_reg = a1;
+  Register receiver_reg = a2;
+  Register scratch = a3;
+  Register elements_reg = t0;
+  Register scratch2 = t1;
+  Register scratch3 = t2;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(a0, &miss_force_generic);
+
+  // Get the elements array and make sure it is a fast element array, not 'cow'.
+  __ lw(elements_reg,
+        FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+  __ CheckMap(elements_reg,
+              scratch,
+              Heap::kFixedArrayMapRootIndex,
+              &miss_force_generic,
+              DONT_DO_SMI_CHECK);
+
+  // Check that the key is within bounds.
+  if (is_js_array) {
+    __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+  } else {
+    __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+  }
+  // Compare smis.
+  __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
+
+  __ Addu(scratch,
+          elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(scratch3, scratch2, scratch);
+  __ sw(value_reg, MemOperand(scratch3));
+  __ RecordWrite(scratch, Operand(scratch2), receiver_reg , elements_reg);
+
+  // value_reg (a0) is preserved.
+  // Done.
+  __ Ret();
+
+  __ bind(&miss_force_generic);
+  Handle<Code> ic =
+      masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+  __ Jump(ic, RelocInfo::CODE_TARGET);
 }
 
 
diff --git a/src/mips/virtual-frame-mips.cc b/src/mips/virtual-frame-mips.cc
deleted file mode 100644
index 22fe9f0..0000000
--- a/src/mips/virtual-frame-mips.cc
+++ /dev/null
@@ -1,307 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm())
-
-void VirtualFrame::PopToA1A0() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::PopToA1() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::PopToA0() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::MergeTo(const VirtualFrame* expected,
-                           Condition cond,
-                           Register r1,
-                           const Operand& r2) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::MergeTo(VirtualFrame* expected,
-                           Condition cond,
-                           Register r1,
-                           const Operand& r2) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::MergeTOSTo(
-    VirtualFrame::TopOfStack expected_top_of_stack_state,
-    Condition cond,
-    Register r1,
-    const Operand& r2) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::Enter() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::Exit() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::AllocateStackSlots() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-
-void VirtualFrame::PushReceiverSlotAddress() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::PushTryHandler(HandlerType type) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallJSFunction(int arg_count) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallRuntime(const Runtime::Function* f, int arg_count) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void VirtualFrame::DebugBreak() {
-  UNIMPLEMENTED_MIPS();
-}
-#endif
-
-
-void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
-                                 InvokeJSFlags flags,
-                                 int arg_count) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallKeyedLoadIC() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallKeyedStoreIC() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallCodeObject(Handle<Code> code,
-                                  RelocInfo::Mode rmode,
-                                  int dropped_args) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-//    NO_TOS_REGISTERS, A0_TOS, A1_TOS, A1_A0_TOS, A0_A1_TOS.
-const bool VirtualFrame::kA0InUse[TOS_STATES] =
-    { false,            true,   false,  true,      true };
-const bool VirtualFrame::kA1InUse[TOS_STATES] =
-    { false,            false,  true,   true,      true };
-const int VirtualFrame::kVirtualElements[TOS_STATES] =
-    { 0,                1,      1,      2,         2 };
-const Register VirtualFrame::kTopRegister[TOS_STATES] =
-    { a0,               a0,     a1,     a1,        a0 };
-const Register VirtualFrame::kBottomRegister[TOS_STATES] =
-    { a0,               a0,     a1,     a0,        a1 };
-const Register VirtualFrame::kAllocatedRegisters[
-    VirtualFrame::kNumberOfAllocatedRegisters] = { a2, a3, t0, t1, t2 };
-// Popping is done by the transition implied by kStateAfterPop.  Of course if
-// there were no stack slots allocated to registers then the physical SP must
-// be adjusted.
-const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPop[TOS_STATES] =
-    { NO_TOS_REGISTERS, NO_TOS_REGISTERS, NO_TOS_REGISTERS, A0_TOS, A1_TOS };
-// Pushing is done by the transition implied by kStateAfterPush.  Of course if
-// the maximum number of registers was already allocated to the top of stack
-// slots then one register must be physically pushed onto the stack.
-const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPush[TOS_STATES] =
-    { A0_TOS, A1_A0_TOS, A0_A1_TOS, A0_A1_TOS, A1_A0_TOS };
-
-
-void VirtualFrame::Drop(int count) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::Pop() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitPop(Register reg) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToA0() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToA1() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToA1A0() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-Register VirtualFrame::Peek() {
-  UNIMPLEMENTED_MIPS();
-  return no_reg;
-}
-
-
-Register VirtualFrame::Peek2() {
-  UNIMPLEMENTED_MIPS();
-  return no_reg;
-}
-
-
-void VirtualFrame::Dup() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::Dup2() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-Register VirtualFrame::PopToRegister(Register but_not_to_this_one) {
-  UNIMPLEMENTED_MIPS();
-  return no_reg;
-}
-
-
-void VirtualFrame::EnsureOneFreeTOSRegister() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitMultiPop(RegList regs) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::SetElementAt(Register reg, int this_far_down) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-Register VirtualFrame::GetTOSRegister() {
-  UNIMPLEMENTED_MIPS();
-  return no_reg;
-}
-
-
-void VirtualFrame::EmitPush(Operand operand, TypeInfo info) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitPush(MemOperand operand, TypeInfo info) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitPushRoot(Heap::RootListIndex index) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitMultiPush(RegList regs) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitMultiPushReversed(RegList regs) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::SpillAll() {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/virtual-frame-mips.h b/src/mips/virtual-frame-mips.h
deleted file mode 100644
index cf30b09..0000000
--- a/src/mips/virtual-frame-mips.h
+++ /dev/null
@@ -1,530 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_MIPS_VIRTUAL_FRAME_MIPS_H_
-#define V8_MIPS_VIRTUAL_FRAME_MIPS_H_
-
-#include "register-allocator.h"
-
-namespace v8 {
-namespace internal {
-
-// This dummy class is only used to create invalid virtual frames.
-extern class InvalidVirtualFrameInitializer {}* kInvalidVirtualFrameInitializer;
-
-
-// -------------------------------------------------------------------------
-// Virtual frames
-//
-// The virtual frame is an abstraction of the physical stack frame. It
-// encapsulates the parameters, frame-allocated locals, and the expression
-// stack. It supports push/pop operations on the expression stack, as well
-// as random access to the expression stack elements, locals, and
-// parameters.
-
-class VirtualFrame : public ZoneObject {
- public:
-  class RegisterAllocationScope;
-  // A utility class to introduce a scope where the virtual frame is
-  // expected to remain spilled. The constructor spills the code
-  // generator's current frame, and keeps it spilled.
-  class SpilledScope BASE_EMBEDDED {
-   public:
-    explicit SpilledScope(VirtualFrame* frame)
-      : old_is_spilled_(
-          Isolate::Current()->is_virtual_frame_in_spilled_scope()) {
-      if (frame != NULL) {
-        if (!old_is_spilled_) {
-          frame->SpillAll();
-        } else {
-          frame->AssertIsSpilled();
-        }
-      }
-      Isolate::Current()->set_is_virtual_frame_in_spilled_scope(true);
-    }
-    ~SpilledScope() {
-      Isolate::Current()->set_is_virtual_frame_in_spilled_scope(
-          old_is_spilled_);
-    }
-    static bool is_spilled() {
-      return Isolate::Current()->is_virtual_frame_in_spilled_scope();
-    }
-
-   private:
-    int old_is_spilled_;
-
-    SpilledScope() {}
-
-    friend class RegisterAllocationScope;
-  };
-
-  class RegisterAllocationScope BASE_EMBEDDED {
-   public:
-    // A utility class to introduce a scope where the virtual frame
-    // is not spilled, ie. where register allocation occurs.  Eventually
-    // when RegisterAllocationScope is ubiquitous it can be removed
-    // along with the (by then unused) SpilledScope class.
-    inline explicit RegisterAllocationScope(CodeGenerator* cgen);
-    inline ~RegisterAllocationScope();
-
-   private:
-    CodeGenerator* cgen_;
-    bool old_is_spilled_;
-
-    RegisterAllocationScope() {}
-  };
-
-  // An illegal index into the virtual frame.
-  static const int kIllegalIndex = -1;
-
-  // Construct an initial virtual frame on entry to a JS function.
-  inline VirtualFrame();
-
-  // Construct an invalid virtual frame, used by JumpTargets.
-  explicit inline VirtualFrame(InvalidVirtualFrameInitializer* dummy);
-
-  // Construct a virtual frame as a clone of an existing one.
-  explicit inline VirtualFrame(VirtualFrame* original);
-
-  inline CodeGenerator* cgen() const;
-  inline MacroAssembler* masm();
-
-  // The number of elements on the virtual frame.
-  int element_count() const { return element_count_; }
-
-  // The height of the virtual expression stack.
-  inline int height() const;
-
-  bool is_used(int num) {
-    switch (num) {
-      case 0: {  // a0.
-        return kA0InUse[top_of_stack_state_];
-      }
-      case 1: {  // a1.
-        return kA1InUse[top_of_stack_state_];
-      }
-      case 2:
-      case 3:
-      case 4:
-      case 5:
-      case 6: {  // a2 to a3, t0 to t2.
-        ASSERT(num - kFirstAllocatedRegister < kNumberOfAllocatedRegisters);
-        ASSERT(num >= kFirstAllocatedRegister);
-        if ((register_allocation_map_ &
-             (1 << (num - kFirstAllocatedRegister))) == 0) {
-          return false;
-        } else {
-          return true;
-        }
-      }
-      default: {
-        ASSERT(num < kFirstAllocatedRegister ||
-               num >= kFirstAllocatedRegister + kNumberOfAllocatedRegisters);
-        return false;
-      }
-    }
-  }
-
-  // Add extra in-memory elements to the top of the frame to match an actual
-  // frame (eg, the frame after an exception handler is pushed). No code is
-  // emitted.
-  void Adjust(int count);
-
-  // Forget elements from the top of the frame to match an actual frame (eg,
-  // the frame after a runtime call). No code is emitted except to bring the
-  // frame to a spilled state.
-  void Forget(int count);
-
-
-  // Spill all values from the frame to memory.
-  void SpillAll();
-
-  void AssertIsSpilled() const {
-    ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
-    ASSERT(register_allocation_map_ == 0);
-  }
-
-  void AssertIsNotSpilled() {
-    ASSERT(!SpilledScope::is_spilled());
-  }
-
-  // Spill all occurrences of a specific register from the frame.
-  void Spill(Register reg) {
-    UNIMPLEMENTED();
-  }
-
-  // Spill all occurrences of an arbitrary register if possible. Return the
-  // register spilled or no_reg if it was not possible to free any register
-  // (ie, they all have frame-external references). Unimplemented.
-  Register SpillAnyRegister();
-
-  // Make this virtual frame have a state identical to an expected virtual
-  // frame. As a side effect, code may be emitted to make this frame match
-  // the expected one.
-  void MergeTo(const VirtualFrame* expected,
-               Condition cond = al,
-               Register r1 = no_reg,
-               const Operand& r2 = Operand(no_reg));
-
-  void MergeTo(VirtualFrame* expected,
-               Condition cond = al,
-               Register r1 = no_reg,
-               const Operand& r2 = Operand(no_reg));
-
-  // Checks whether this frame can be branched to by the other frame.
-  bool IsCompatibleWith(const VirtualFrame* other) const {
-    return (tos_known_smi_map_ & (~other->tos_known_smi_map_)) == 0;
-  }
-
-  inline void ForgetTypeInfo() {
-    tos_known_smi_map_ = 0;
-  }
-
-  // Detach a frame from its code generator, perhaps temporarily. This
-  // tells the register allocator that it is free to use frame-internal
-  // registers. Used when the code generator's frame is switched from this
-  // one to NULL by an unconditional jump.
-  void DetachFromCodeGenerator() {
-  }
-
-  // (Re)attach a frame to its code generator. This informs the register
-  // allocator that the frame-internal register references are active again.
-  // Used when a code generator's frame is switched from NULL to this one by
-  // binding a label.
-  void AttachToCodeGenerator() {
-  }
-
-  // Emit code for the physical JS entry and exit frame sequences. After
-  // calling Enter, the virtual frame is ready for use; and after calling
-  // Exit it should not be used. Note that Enter does not allocate space in
-  // the physical frame for storing frame-allocated locals.
-  void Enter();
-  void Exit();
-
-  // Prepare for returning from the frame by elements in the virtual frame.
-  // This avoids generating unnecessary merge code when jumping to the shared
-  // return site. No spill code emitted. Value to return should be in v0.
-  inline void PrepareForReturn();
-
-  // Number of local variables after when we use a loop for allocating.
-  static const int kLocalVarBound = 5;
-
-  // Allocate and initialize the frame-allocated locals.
-  void AllocateStackSlots();
-
-  // The current top of the expression stack as an assembly operand.
-  MemOperand Top() {
-    AssertIsSpilled();
-    return MemOperand(sp, 0);
-  }
-
-  // An element of the expression stack as an assembly operand.
-  MemOperand ElementAt(int index) {
-    int adjusted_index = index - kVirtualElements[top_of_stack_state_];
-    ASSERT(adjusted_index >= 0);
-    return MemOperand(sp, adjusted_index * kPointerSize);
-  }
-
-  bool KnownSmiAt(int index) {
-    if (index >= kTOSKnownSmiMapSize) return false;
-    return (tos_known_smi_map_ & (1 << index)) != 0;
-  }
-  // A frame-allocated local as an assembly operand.
-  inline MemOperand LocalAt(int index);
-
-  // Push the address of the receiver slot on the frame.
-  void PushReceiverSlotAddress();
-
-  // The function frame slot.
-  MemOperand Function() { return MemOperand(fp, kFunctionOffset); }
-
-  // The context frame slot.
-  MemOperand Context() { return MemOperand(fp, kContextOffset); }
-
-  // A parameter as an assembly operand.
-  inline MemOperand ParameterAt(int index);
-
-  // The receiver frame slot.
-  inline MemOperand Receiver();
-
-  // Push a try-catch or try-finally handler on top of the virtual frame.
-  void PushTryHandler(HandlerType type);
-
-  // Call stub given the number of arguments it expects on (and
-  // removes from) the stack.
-  inline void CallStub(CodeStub* stub, int arg_count);
-
-  // Call JS function from top of the stack with arguments
-  // taken from the stack.
-  void CallJSFunction(int arg_count);
-
-  // Call runtime given the number of arguments expected on (and
-  // removed from) the stack.
-  void CallRuntime(const Runtime::Function* f, int arg_count);
-  void CallRuntime(Runtime::FunctionId id, int arg_count);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  void DebugBreak();
-#endif
-
-  // Invoke builtin given the number of arguments it expects on (and
-  // removes from) the stack.
-  void InvokeBuiltin(Builtins::JavaScript id,
-                     InvokeJSFlags flag,
-                     int arg_count);
-
-  // Call load IC. Receiver is on the stack and is consumed. Result is returned
-  // in v0.
-  void CallLoadIC(Handle<String> name, RelocInfo::Mode mode);
-
-  // Call store IC. If the load is contextual, value is found on top of the
-  // frame. If not, value and receiver are on the frame. Both are consumed.
-  // Result is returned in v0.
-  void CallStoreIC(Handle<String> name, bool is_contextual);
-
-  // Call keyed load IC. Key and receiver are on the stack. Both are consumed.
-  // Result is returned in v0.
-  void CallKeyedLoadIC();
-
-  // Call keyed store IC. Value, key and receiver are on the stack. All three
-  // are consumed. Result is returned in v0 (and a0).
-  void CallKeyedStoreIC();
-
-  // Call into an IC stub given the number of arguments it removes
-  // from the stack. Register arguments to the IC stub are implicit,
-  // and depend on the type of IC stub.
-  void CallCodeObject(Handle<Code> ic,
-                      RelocInfo::Mode rmode,
-                      int dropped_args);
-
-  // Drop a number of elements from the top of the expression stack. May
-  // emit code to affect the physical frame. Does not clobber any registers
-  // excepting possibly the stack pointer.
-  void Drop(int count);
-
-  // Drop one element.
-  void Drop() { Drop(1); }
-
-  // Pop an element from the top of the expression stack. Discards
-  // the result.
-  void Pop();
-
-  // Pop an element from the top of the expression stack.  The register
-  // will be one normally used for the top of stack register allocation
-  // so you can't hold on to it if you push on the stack.
-  Register PopToRegister(Register but_not_to_this_one = no_reg);
-
-  // Look at the top of the stack.  The register returned is aliased and
-  // must be copied to a scratch register before modification.
-  Register Peek();
-
-  // Look at the value beneath the top of the stack. The register returned is
-  // aliased and must be copied to a scratch register before modification.
-  Register Peek2();
-
-  // Duplicate the top of stack.
-  void Dup();
-
-  // Duplicate the two elements on top of stack.
-  void Dup2();
-
-  // Flushes all registers, but it puts a copy of the top-of-stack in a0.
-  void SpillAllButCopyTOSToA0();
-
-  // Flushes all registers, but it puts a copy of the top-of-stack in a1.
-  void SpillAllButCopyTOSToA1();
-
-  // Flushes all registers, but it puts a copy of the top-of-stack in a1
-  // and the next value on the stack in a0.
-  void SpillAllButCopyTOSToA1A0();
-
-  // Pop and save an element from the top of the expression stack and
-  // emit a corresponding pop instruction.
-  void EmitPop(Register reg);
-  // Same but for multiple registers
-  void EmitMultiPop(RegList regs);
-  void EmitMultiPopReversed(RegList regs);
-
-
-  // Takes the top two elements and puts them in a0 (top element) and a1
-  // (second element).
-  void PopToA1A0();
-
-  // Takes the top element and puts it in a1.
-  void PopToA1();
-
-  // Takes the top element and puts it in a0.
-  void PopToA0();
-
-  // Push an element on top of the expression stack and emit a
-  // corresponding push instruction.
-  void EmitPush(Register reg, TypeInfo type_info = TypeInfo::Unknown());
-  void EmitPush(Operand operand, TypeInfo type_info = TypeInfo::Unknown());
-  void EmitPush(MemOperand operand, TypeInfo type_info = TypeInfo::Unknown());
-  void EmitPushRoot(Heap::RootListIndex index);
-
-  // Overwrite the nth thing on the stack.  If the nth position is in a
-  // register then this turns into a Move, otherwise an sw.  Afterwards
-  // you can still use the register even if it is a register that can be
-  // used for TOS (a0 or a1).
-  void SetElementAt(Register reg, int this_far_down);
-
-  // Get a register which is free and which must be immediately used to
-  // push on the top of the stack.
-  Register GetTOSRegister();
-
-  // Same but for multiple registers.
-  void EmitMultiPush(RegList regs);
-  void EmitMultiPushReversed(RegList regs);
-
-  static Register scratch0() { return t4; }
-  static Register scratch1() { return t5; }
-  static Register scratch2() { return t6; }
-
- private:
-  static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
-  static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
-  static const int kContextOffset = StandardFrameConstants::kContextOffset;
-
-  static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
-  static const int kPreallocatedElements = 5 + 8;  // 8 expression stack slots.
-
-  // 5 states for the top of stack, which can be in memory or in a0 and a1.
-  enum TopOfStack { NO_TOS_REGISTERS, A0_TOS, A1_TOS, A1_A0_TOS, A0_A1_TOS,
-                    TOS_STATES};
-  static const int kMaxTOSRegisters = 2;
-
-  static const bool kA0InUse[TOS_STATES];
-  static const bool kA1InUse[TOS_STATES];
-  static const int kVirtualElements[TOS_STATES];
-  static const TopOfStack kStateAfterPop[TOS_STATES];
-  static const TopOfStack kStateAfterPush[TOS_STATES];
-  static const Register kTopRegister[TOS_STATES];
-  static const Register kBottomRegister[TOS_STATES];
-
-  // We allocate up to 5 locals in registers.
-  static const int kNumberOfAllocatedRegisters = 5;
-  // r2 to r6 are allocated to locals.
-  static const int kFirstAllocatedRegister = 2;
-
-  static const Register kAllocatedRegisters[kNumberOfAllocatedRegisters];
-
-  static Register AllocatedRegister(int r) {
-    ASSERT(r >= 0 && r < kNumberOfAllocatedRegisters);
-    return kAllocatedRegisters[r];
-  }
-
-  // The number of elements on the stack frame.
-  int element_count_;
-  TopOfStack top_of_stack_state_:3;
-  int register_allocation_map_:kNumberOfAllocatedRegisters;
-  static const int kTOSKnownSmiMapSize = 4;
-  unsigned tos_known_smi_map_:kTOSKnownSmiMapSize;
-
-  // The index of the element that is at the processor's stack pointer
-  // (the sp register).  For now since everything is in memory it is given
-  // by the number of elements on the not-very-virtual stack frame.
-  int stack_pointer() { return element_count_ - 1; }
-
-  // The number of frame-allocated locals and parameters respectively.
-  inline int parameter_count() const;
-  inline int local_count() const;
-
-  // The index of the element that is at the processor's frame pointer
-  // (the fp register). The parameters, receiver, function, and context
-  // are below the frame pointer.
-  inline int frame_pointer() const;
-
-  // The index of the first parameter. The receiver lies below the first
-  // parameter.
-  int param0_index() { return 1; }
-
-  // The index of the context slot in the frame. It is immediately
-  // below the frame pointer.
-  inline int context_index();
-
-  // The index of the function slot in the frame. It is below the frame
-  // pointer and context slot.
-  inline int function_index();
-
-  // The index of the first local. Between the frame pointer and the
-  // locals lies the return address.
-  inline int local0_index() const;
-
-  // The index of the base of the expression stack.
-  inline int expression_base_index() const;
-
-  // Convert a frame index into a frame pointer relative offset into the
-  // actual stack.
-  inline int fp_relative(int index);
-
-  // Spill all elements in registers. Spill the top spilled_args elements
-  // on the frame. Sync all other frame elements.
-  // Then drop dropped_args elements from the virtual frame, to match
-  // the effect of an upcoming call that will drop them from the stack.
-  void PrepareForCall(int spilled_args, int dropped_args);
-
-  // If all top-of-stack registers are in use then the lowest one is pushed
-  // onto the physical stack and made free.
-  void EnsureOneFreeTOSRegister();
-
-  // Emit instructions to get the top of stack state from where we are to where
-  // we want to be.
-  void MergeTOSTo(TopOfStack expected_state,
-                  Condition cond = al,
-                  Register r1 = no_reg,
-                  const Operand& r2 = Operand(no_reg));
-
-  inline bool Equals(const VirtualFrame* other);
-
-  inline void LowerHeight(int count) {
-    element_count_ -= count;
-    if (count >= kTOSKnownSmiMapSize) {
-      tos_known_smi_map_ = 0;
-    } else {
-      tos_known_smi_map_ >>= count;
-    }
-  }
-
-  inline void RaiseHeight(int count, unsigned known_smi_map = 0) {
-    ASSERT(known_smi_map < (1u << count));
-    element_count_ += count;
-    if (count >= kTOSKnownSmiMapSize) {
-      tos_known_smi_map_ = known_smi_map;
-    } else {
-      tos_known_smi_map_ = ((tos_known_smi_map_ << count) | known_smi_map);
-    }
-  }
-  friend class JumpTarget;
-};
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_MIPS_VIRTUAL_FRAME_MIPS_H_
-
diff --git a/src/mirror-debugger.js b/src/mirror-debugger.js
index 99e9819..3a03535 100644
--- a/src/mirror-debugger.js
+++ b/src/mirror-debugger.js
@@ -174,11 +174,12 @@
 PropertyType.Field                   = 1;
 PropertyType.ConstantFunction        = 2;
 PropertyType.Callbacks               = 3;
-PropertyType.Interceptor             = 4;
-PropertyType.MapTransition           = 5;
-PropertyType.ExternalArrayTransition = 6;
-PropertyType.ConstantTransition      = 7;
-PropertyType.NullDescriptor          = 8;
+PropertyType.Handler                 = 4;
+PropertyType.Interceptor             = 5;
+PropertyType.MapTransition           = 6;
+PropertyType.ExternalArrayTransition = 7;
+PropertyType.ConstantTransition      = 8;
+PropertyType.NullDescriptor          = 9;
 
 
 // Different attributes for a property.
diff --git a/src/mksnapshot.cc b/src/mksnapshot.cc
index 6ecbc8c..dd49116 100644
--- a/src/mksnapshot.cc
+++ b/src/mksnapshot.cc
@@ -25,6 +25,9 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+#include <bzlib.h>
+#endif
 #include <signal.h>
 #include <string>
 #include <map>
@@ -95,11 +98,53 @@
 static CounterMap counter_table_;
 
 
-class CppByteSink : public i::SnapshotByteSink {
+class Compressor {
  public:
-  explicit CppByteSink(const char* snapshot_file)
-      : bytes_written_(0),
-        partial_sink_(this) {
+  virtual ~Compressor() {}
+  virtual bool Compress(i::Vector<char> input) = 0;
+  virtual i::Vector<char>* output() = 0;
+};
+
+
+class PartialSnapshotSink : public i::SnapshotByteSink {
+ public:
+  PartialSnapshotSink() : data_(), raw_size_(-1) { }
+  virtual ~PartialSnapshotSink() { data_.Free(); }
+  virtual void Put(int byte, const char* description) {
+    data_.Add(byte);
+  }
+  virtual int Position() { return data_.length(); }
+  void Print(FILE* fp) {
+    int length = Position();
+    for (int j = 0; j < length; j++) {
+      if ((j & 0x1f) == 0x1f) {
+        fprintf(fp, "\n");
+      }
+      if (j != 0) {
+        fprintf(fp, ",");
+      }
+      fprintf(fp, "%d", at(j));
+    }
+  }
+  char at(int i) { return data_[i]; }
+  bool Compress(Compressor* compressor) {
+    ASSERT_EQ(-1, raw_size_);
+    raw_size_ = data_.length();
+    if (!compressor->Compress(data_.ToVector())) return false;
+    data_.Clear();
+    data_.AddAll(*compressor->output());
+    return true;
+  }
+  int raw_size() { return raw_size_; }
+ private:
+  i::List<char> data_;
+  int raw_size_;
+};
+
+
+class CppByteSink : public PartialSnapshotSink {
+ public:
+  explicit CppByteSink(const char* snapshot_file) {
     fp_ = i::OS::FOpen(snapshot_file, "wb");
     if (fp_ == NULL) {
       i::PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
@@ -114,7 +159,18 @@
   }
 
   virtual ~CppByteSink() {
-    fprintf(fp_, "const int Snapshot::size_ = %d;\n\n", bytes_written_);
+    fprintf(fp_, "const int Snapshot::size_ = %d;\n", Position());
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+    fprintf(fp_, "const byte* Snapshot::raw_data_ = NULL;\n");
+    fprintf(fp_,
+            "const int Snapshot::raw_size_ = %d;\n\n",
+            raw_size());
+#else
+    fprintf(fp_,
+            "const byte* Snapshot::raw_data_ = Snapshot::data_;\n");
+    fprintf(fp_,
+            "const int Snapshot::raw_size_ = Snapshot::size_;\n\n");
+#endif
     fprintf(fp_, "} }  // namespace v8::internal\n");
     fclose(fp_);
   }
@@ -127,7 +183,6 @@
       int map_space_used,
       int cell_space_used,
       int large_space_used) {
-    fprintf(fp_, "};\n\n");
     fprintf(fp_, "const int Snapshot::new_space_used_ = %d;\n", new_space_used);
     fprintf(fp_,
             "const int Snapshot::pointer_space_used_ = %d;\n",
@@ -151,59 +206,68 @@
     int length = partial_sink_.Position();
     fprintf(fp_, "};\n\n");
     fprintf(fp_, "const int Snapshot::context_size_ = %d;\n",  length);
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+    fprintf(fp_,
+            "const int Snapshot::context_raw_size_ = %d;\n",
+            partial_sink_.raw_size());
+#else
+    fprintf(fp_,
+            "const int Snapshot::context_raw_size_ = "
+            "Snapshot::context_size_;\n");
+#endif
     fprintf(fp_, "const byte Snapshot::context_data_[] = {\n");
-    for (int j = 0; j < length; j++) {
-      if ((j & 0x1f) == 0x1f) {
-        fprintf(fp_, "\n");
-      }
-      char byte = partial_sink_.at(j);
-      if (j != 0) {
-        fprintf(fp_, ",");
-      }
-      fprintf(fp_, "%d", byte);
-    }
+    partial_sink_.Print(fp_);
+    fprintf(fp_, "};\n\n");
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+    fprintf(fp_, "const byte* Snapshot::context_raw_data_ = NULL;\n");
+#else
+    fprintf(fp_, "const byte* Snapshot::context_raw_data_ ="
+            " Snapshot::context_data_;\n");
+#endif
   }
 
-  virtual void Put(int byte, const char* description) {
-    if (bytes_written_ != 0) {
-      fprintf(fp_, ",");
-    }
-    fprintf(fp_, "%d", byte);
-    bytes_written_++;
-    if ((bytes_written_ & 0x1f) == 0) {
-      fprintf(fp_, "\n");
-    }
+  void WriteSnapshot() {
+    Print(fp_);
   }
 
-  virtual int Position() {
-    return bytes_written_;
-  }
-
-  i::SnapshotByteSink* partial_sink() { return &partial_sink_; }
-
-  class PartialSnapshotSink : public i::SnapshotByteSink {
-   public:
-    explicit PartialSnapshotSink(CppByteSink* parent)
-        : parent_(parent),
-          data_() { }
-    virtual ~PartialSnapshotSink() { data_.Free(); }
-    virtual void Put(int byte, const char* description) {
-      data_.Add(byte);
-    }
-    virtual int Position() { return data_.length(); }
-    char at(int i) { return data_[i]; }
-   private:
-    CppByteSink* parent_;
-    i::List<char> data_;
-  };
+  PartialSnapshotSink* partial_sink() { return &partial_sink_; }
 
  private:
   FILE* fp_;
-  int bytes_written_;
   PartialSnapshotSink partial_sink_;
 };
 
 
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+class BZip2Compressor : public Compressor {
+ public:
+  BZip2Compressor() : output_(NULL) {}
+  virtual ~BZip2Compressor() {
+    delete output_;
+  }
+  virtual bool Compress(i::Vector<char> input) {
+    delete output_;
+    output_ = new i::ScopedVector<char>((input.length() * 101) / 100 + 1000);
+    unsigned int output_length_ = output_->length();
+    int result = BZ2_bzBuffToBuffCompress(output_->start(), &output_length_,
+                                          input.start(), input.length(),
+                                          9, 1, 0);
+    if (result == BZ_OK) {
+      output_->Truncate(output_length_);
+      return true;
+    } else {
+      fprintf(stderr, "bzlib error code: %d\n", result);
+      return false;
+    }
+  }
+  virtual i::Vector<char>* output() { return output_; }
+
+ private:
+  i::ScopedVector<char>* output_;
+};
+#endif
+
+
 int main(int argc, char** argv) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   // By default, log code create information in the snapshot.
@@ -242,6 +306,14 @@
 
   ser.SerializeWeakReferences();
 
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+  BZip2Compressor compressor;
+  if (!sink.Compress(&compressor))
+    return 1;
+  if (!sink.partial_sink()->Compress(&compressor))
+    return 1;
+#endif
+  sink.WriteSnapshot();
   sink.WritePartialSnapshot();
 
   sink.WriteSpaceUsed(
diff --git a/src/natives.h b/src/natives.h
index 1df94b0..92f0d90 100644
--- a/src/natives.h
+++ b/src/natives.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -36,7 +36,7 @@
                                      int index);
 
 enum NativeType {
-  CORE, D8, I18N
+  CORE, EXPERIMENTAL, D8, I18N
 };
 
 template <NativeType type>
@@ -57,6 +57,7 @@
 };
 
 typedef NativesCollection<CORE> Natives;
+typedef NativesCollection<EXPERIMENTAL> ExperimentalNatives;
 
 } }  // namespace v8::internal
 
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index dd606dc..76c520e 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -116,6 +116,9 @@
     case EXTERNAL_FLOAT_ARRAY_TYPE:
       ExternalFloatArray::cast(this)->ExternalFloatArrayVerify();
       break;
+    case EXTERNAL_DOUBLE_ARRAY_TYPE:
+      ExternalDoubleArray::cast(this)->ExternalDoubleArrayVerify();
+      break;
     case CODE_TYPE:
       Code::cast(this)->CodeVerify();
       break;
@@ -152,8 +155,11 @@
       break;
     case FILLER_TYPE:
       break;
-    case PROXY_TYPE:
-      Proxy::cast(this)->ProxyVerify();
+    case JS_PROXY_TYPE:
+      JSProxy::cast(this)->JSProxyVerify();
+      break;
+    case FOREIGN_TYPE:
+      Foreign::cast(this)->ForeignVerify();
       break;
     case SHARED_FUNCTION_INFO_TYPE:
       SharedFunctionInfo::cast(this)->SharedFunctionInfoVerify();
@@ -232,6 +238,11 @@
 }
 
 
+void ExternalDoubleArray::ExternalDoubleArrayVerify() {
+  ASSERT(IsExternalDoubleArray());
+}
+
+
 void JSObject::JSObjectVerify() {
   VerifyHeapPointer(properties());
   VerifyHeapPointer(elements());
@@ -261,7 +272,7 @@
 void Map::SharedMapVerify() {
   MapVerify();
   ASSERT(is_shared());
-  ASSERT_EQ(GetHeap()->empty_descriptor_array(), instance_descriptors());
+  ASSERT(instance_descriptors()->IsEmpty());
   ASSERT_EQ(0, pre_allocated_property_fields());
   ASSERT_EQ(0, unused_property_fields());
   ASSERT_EQ(StaticVisitorBase::GetVisitorId(instance_type(), instance_size()),
@@ -433,14 +444,22 @@
 
       FixedArray* arr = FixedArray::cast(data());
       Object* ascii_data = arr->get(JSRegExp::kIrregexpASCIICodeIndex);
-      // TheHole : Not compiled yet.
+      // Smi : Not compiled yet (-1) or code prepared for flushing.
       // JSObject: Compilation error.
       // Code/ByteArray: Compiled code.
-      ASSERT(ascii_data->IsTheHole() || ascii_data->IsJSObject() ||
-          (is_native ? ascii_data->IsCode() : ascii_data->IsByteArray()));
+      ASSERT(ascii_data->IsSmi() ||
+             (is_native ? ascii_data->IsCode() : ascii_data->IsByteArray()));
       Object* uc16_data = arr->get(JSRegExp::kIrregexpUC16CodeIndex);
-      ASSERT(uc16_data->IsTheHole() || uc16_data->IsJSObject() ||
-          (is_native ? uc16_data->IsCode() : uc16_data->IsByteArray()));
+      ASSERT(uc16_data->IsSmi() ||
+             (is_native ? uc16_data->IsCode() : uc16_data->IsByteArray()));
+
+      Object* ascii_saved = arr->get(JSRegExp::kIrregexpASCIICodeSavedIndex);
+      ASSERT(ascii_saved->IsSmi() || ascii_saved->IsString() ||
+             ascii_saved->IsCode());
+      Object* uc16_saved = arr->get(JSRegExp::kIrregexpUC16CodeSavedIndex);
+      ASSERT(uc16_saved->IsSmi() || uc16_saved->IsString() ||
+             uc16_saved->IsCode());
+
       ASSERT(arr->get(JSRegExp::kIrregexpCaptureCountIndex)->IsSmi());
       ASSERT(arr->get(JSRegExp::kIrregexpMaxRegisterCountIndex)->IsSmi());
       break;
@@ -453,8 +472,13 @@
 }
 
 
-void Proxy::ProxyVerify() {
-  ASSERT(IsProxy());
+void JSProxy::JSProxyVerify() {
+  ASSERT(IsJSProxy());
+  VerifyPointer(handler());
+}
+
+void Foreign::ForeignVerify() {
+  ASSERT(IsForeign());
 }
 
 
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 65aec5d..aa5fc86 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -406,6 +406,13 @@
 }
 
 
+bool Object::IsExternalDoubleArray() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() ==
+      EXTERNAL_DOUBLE_ARRAY_TYPE;
+}
+
+
 bool MaybeObject::IsFailure() {
   return HAS_FAILURE_TAG(this);
 }
@@ -577,9 +584,15 @@
 }
 
 
-bool Object::IsProxy() {
+bool Object::IsJSProxy() {
   return Object::IsHeapObject()
-      && HeapObject::cast(this)->map()->instance_type() == PROXY_TYPE;
+      && HeapObject::cast(this)->map()->instance_type() == JS_PROXY_TYPE;
+}
+
+
+bool Object::IsForeign() {
+  return Object::IsHeapObject()
+      && HeapObject::cast(this)->map()->instance_type() == FOREIGN_TYPE;
 }
 
 
@@ -1661,9 +1674,21 @@
 
 
 bool DescriptorArray::IsEmpty() {
-  ASSERT(this->length() > kFirstIndex ||
+  ASSERT(this->IsSmi() ||
+         this->length() > kFirstIndex ||
          this == HEAP->empty_descriptor_array());
-  return length() <= kFirstIndex;
+  return this->IsSmi() || length() <= kFirstIndex;
+}
+
+
+int DescriptorArray::bit_field3_storage() {
+  Object* storage = READ_FIELD(this, kBitField3StorageOffset);
+  return Smi::cast(storage)->value();
+}
+
+void DescriptorArray::set_bit_field3_storage(int value) {
+  ASSERT(!IsEmpty());
+  WRITE_FIELD(this, kBitField3StorageOffset, Smi::FromInt(value));
 }
 
 
@@ -1744,8 +1769,8 @@
 
 AccessorDescriptor* DescriptorArray::GetCallbacks(int descriptor_number) {
   ASSERT(GetType(descriptor_number) == CALLBACKS);
-  Proxy* p = Proxy::cast(GetCallbacksObject(descriptor_number));
-  return reinterpret_cast<AccessorDescriptor*>(p->proxy());
+  Foreign* p = Foreign::cast(GetCallbacksObject(descriptor_number));
+  return reinterpret_cast<AccessorDescriptor*>(p->address());
 }
 
 
@@ -1891,7 +1916,8 @@
 CAST_ACCESSOR(Code)
 CAST_ACCESSOR(JSArray)
 CAST_ACCESSOR(JSRegExp)
-CAST_ACCESSOR(Proxy)
+CAST_ACCESSOR(JSProxy)
+CAST_ACCESSOR(Foreign)
 CAST_ACCESSOR(ByteArray)
 CAST_ACCESSOR(ExternalArray)
 CAST_ACCESSOR(ExternalByteArray)
@@ -1901,6 +1927,7 @@
 CAST_ACCESSOR(ExternalIntArray)
 CAST_ACCESSOR(ExternalUnsignedIntArray)
 CAST_ACCESSOR(ExternalFloatArray)
+CAST_ACCESSOR(ExternalDoubleArray)
 CAST_ACCESSOR(ExternalPixelArray)
 CAST_ACCESSOR(Struct)
 
@@ -2315,6 +2342,20 @@
 }
 
 
+double ExternalDoubleArray::get(int index) {
+  ASSERT((index >= 0) && (index < this->length()));
+  double* ptr = static_cast<double*>(external_pointer());
+  return ptr[index];
+}
+
+
+void ExternalDoubleArray::set(int index, double value) {
+  ASSERT((index >= 0) && (index < this->length()));
+  double* ptr = static_cast<double*>(external_pointer());
+  ptr[index] = value;
+}
+
+
 int Map::visitor_id() {
   return READ_BYTE_FIELD(this, kVisitorIdOffset);
 }
@@ -2499,14 +2540,14 @@
 
 void Map::set_is_shared(bool value) {
   if (value) {
-    set_bit_field2(bit_field2() | (1 << kIsShared));
+    set_bit_field3(bit_field3() | (1 << kIsShared));
   } else {
-    set_bit_field2(bit_field2() & ~(1 << kIsShared));
+    set_bit_field3(bit_field3() & ~(1 << kIsShared));
   }
 }
 
 bool Map::is_shared() {
-  return ((1 << kIsShared) & bit_field2()) != 0;
+  return ((1 << kIsShared) & bit_field3()) != 0;
 }
 
 
@@ -2566,7 +2607,6 @@
 
 
 PropertyType Code::type() {
-  ASSERT(ic_state() == MONOMORPHIC);
   return ExtractTypeFromFlags(flags());
 }
 
@@ -2579,7 +2619,8 @@
 
 int Code::major_key() {
   ASSERT(kind() == STUB ||
-         kind() == TYPE_RECORDING_BINARY_OP_IC ||
+         kind() == UNARY_OP_IC ||
+         kind() == BINARY_OP_IC ||
          kind() == COMPARE_IC);
   return READ_BYTE_FIELD(this, kStubMajorKeyOffset);
 }
@@ -2587,7 +2628,8 @@
 
 void Code::set_major_key(int major) {
   ASSERT(kind() == STUB ||
-         kind() == TYPE_RECORDING_BINARY_OP_IC ||
+         kind() == UNARY_OP_IC ||
+         kind() == BINARY_OP_IC ||
          kind() == COMPARE_IC);
   ASSERT(0 <= major && major < 256);
   WRITE_BYTE_FIELD(this, kStubMajorKeyOffset, major);
@@ -2683,38 +2725,50 @@
 
 
 ExternalArrayType Code::external_array_type() {
-  ASSERT(is_external_array_load_stub() || is_external_array_store_stub());
+  ASSERT(is_keyed_load_stub() || is_keyed_store_stub());
   byte type = READ_BYTE_FIELD(this, kExternalArrayTypeOffset);
   return static_cast<ExternalArrayType>(type);
 }
 
 
 void Code::set_external_array_type(ExternalArrayType value) {
-  ASSERT(is_external_array_load_stub() || is_external_array_store_stub());
+  ASSERT(is_keyed_load_stub() || is_keyed_store_stub());
   WRITE_BYTE_FIELD(this, kExternalArrayTypeOffset, value);
 }
 
 
-byte Code::type_recording_binary_op_type() {
-  ASSERT(is_type_recording_binary_op_stub());
+byte Code::unary_op_type() {
+  ASSERT(is_unary_op_stub());
+  return READ_BYTE_FIELD(this, kUnaryOpTypeOffset);
+}
+
+
+void Code::set_unary_op_type(byte value) {
+  ASSERT(is_unary_op_stub());
+  WRITE_BYTE_FIELD(this, kUnaryOpTypeOffset, value);
+}
+
+
+byte Code::binary_op_type() {
+  ASSERT(is_binary_op_stub());
   return READ_BYTE_FIELD(this, kBinaryOpTypeOffset);
 }
 
 
-void Code::set_type_recording_binary_op_type(byte value) {
-  ASSERT(is_type_recording_binary_op_stub());
+void Code::set_binary_op_type(byte value) {
+  ASSERT(is_binary_op_stub());
   WRITE_BYTE_FIELD(this, kBinaryOpTypeOffset, value);
 }
 
 
-byte Code::type_recording_binary_op_result_type() {
-  ASSERT(is_type_recording_binary_op_stub());
+byte Code::binary_op_result_type() {
+  ASSERT(is_binary_op_stub());
   return READ_BYTE_FIELD(this, kBinaryOpReturnTypeOffset);
 }
 
 
-void Code::set_type_recording_binary_op_result_type(byte value) {
-  ASSERT(is_type_recording_binary_op_stub());
+void Code::set_binary_op_result_type(byte value) {
+  ASSERT(is_binary_op_stub());
   WRITE_BYTE_FIELD(this, kBinaryOpReturnTypeOffset, value);
 }
 
@@ -2744,11 +2798,10 @@
                                PropertyType type,
                                int argc,
                                InlineCacheHolderFlag holder) {
-  // Extra IC state is only allowed for monomorphic call IC stubs
-  // or for store IC stubs.
+  // Extra IC state is only allowed for call IC stubs or for store IC
+  // stubs.
   ASSERT(extra_ic_state == kNoExtraICState ||
-         (kind == CALL_IC && (ic_state == MONOMORPHIC ||
-                              ic_state == MONOMORPHIC_PROTOTYPE_FAILURE)) ||
+         (kind == CALL_IC) ||
          (kind == STORE_IC) ||
          (kind == KEYED_STORE_IC));
   // Compute the bit mask.
@@ -2926,8 +2979,82 @@
 }
 
 
-ACCESSORS(Map, instance_descriptors, DescriptorArray,
-          kInstanceDescriptorsOffset)
+DescriptorArray* Map::instance_descriptors() {
+  Object* object = READ_FIELD(this, kInstanceDescriptorsOrBitField3Offset);
+  if (object->IsSmi()) {
+    return HEAP->empty_descriptor_array();
+  } else {
+    return DescriptorArray::cast(object);
+  }
+}
+
+
+void Map::init_instance_descriptors() {
+  WRITE_FIELD(this, kInstanceDescriptorsOrBitField3Offset, Smi::FromInt(0));
+}
+
+
+void Map::clear_instance_descriptors() {
+  Object* object = READ_FIELD(this,
+                              kInstanceDescriptorsOrBitField3Offset);
+  if (!object->IsSmi()) {
+    WRITE_FIELD(
+        this,
+        kInstanceDescriptorsOrBitField3Offset,
+        Smi::FromInt(DescriptorArray::cast(object)->bit_field3_storage()));
+  }
+}
+
+
+void Map::set_instance_descriptors(DescriptorArray* value,
+                                   WriteBarrierMode mode) {
+  Object* object = READ_FIELD(this,
+                              kInstanceDescriptorsOrBitField3Offset);
+  if (value == isolate()->heap()->empty_descriptor_array()) {
+    clear_instance_descriptors();
+    return;
+  } else {
+    if (object->IsSmi()) {
+      value->set_bit_field3_storage(Smi::cast(object)->value());
+    } else {
+      value->set_bit_field3_storage(
+          DescriptorArray::cast(object)->bit_field3_storage());
+    }
+  }
+  ASSERT(!is_shared());
+  WRITE_FIELD(this, kInstanceDescriptorsOrBitField3Offset, value);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(),
+                            this,
+                            kInstanceDescriptorsOrBitField3Offset,
+                            mode);
+}
+
+
+int Map::bit_field3() {
+  Object* object = READ_FIELD(this,
+                              kInstanceDescriptorsOrBitField3Offset);
+  if (object->IsSmi()) {
+    return Smi::cast(object)->value();
+  } else {
+    return DescriptorArray::cast(object)->bit_field3_storage();
+  }
+}
+
+
+void Map::set_bit_field3(int value) {
+  ASSERT(Smi::IsValid(value));
+  Object* object = READ_FIELD(this,
+                              kInstanceDescriptorsOrBitField3Offset);
+  if (object->IsSmi()) {
+    WRITE_FIELD(this,
+                kInstanceDescriptorsOrBitField3Offset,
+                Smi::FromInt(value));
+  } else {
+    DescriptorArray::cast(object)->set_bit_field3_storage(value);
+  }
+}
+
+
 ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
 ACCESSORS(Map, prototype_transitions, FixedArray, kPrototypeTransitionsOffset)
 ACCESSORS(Map, constructor, Object, kConstructorOffset)
@@ -3003,7 +3130,7 @@
 ACCESSORS(Script, column_offset, Smi, kColumnOffsetOffset)
 ACCESSORS(Script, data, Object, kDataOffset)
 ACCESSORS(Script, context_data, Object, kContextOffset)
-ACCESSORS(Script, wrapper, Proxy, kWrapperOffset)
+ACCESSORS(Script, wrapper, Foreign, kWrapperOffset)
 ACCESSORS(Script, type, Smi, kTypeOffset)
 ACCESSORS(Script, compilation_type, Smi, kCompilationTypeOffset)
 ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
@@ -3182,6 +3309,18 @@
 }
 
 
+bool SharedFunctionInfo::es5_native() {
+  return BooleanBit::get(compiler_hints(), kES5Native);
+}
+
+
+void SharedFunctionInfo::set_es5_native(bool value) {
+  set_compiler_hints(BooleanBit::set(compiler_hints(),
+                                     kES5Native,
+                                     value));
+}
+
+
 ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
 ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
 
@@ -3473,13 +3612,16 @@
 }
 
 
-Address Proxy::proxy() {
-  return AddressFrom<Address>(READ_INTPTR_FIELD(this, kProxyOffset));
+ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
+
+
+Address Foreign::address() {
+  return AddressFrom<Address>(READ_INTPTR_FIELD(this, kAddressOffset));
 }
 
 
-void Proxy::set_proxy(Address value) {
-  WRITE_INTPTR_FIELD(this, kProxyOffset, OffsetFrom(value));
+void Foreign::set_address(Address value) {
+  WRITE_INTPTR_FIELD(this, kAddressOffset, OffsetFrom(value));
 }
 
 
@@ -3512,6 +3654,8 @@
 INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
 ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
 ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
+ACCESSORS(Code, next_code_flushing_candidate,
+          Object, kNextCodeFlushingCandidateOffset)
 
 
 byte* Code::instruction_start()  {
@@ -3575,6 +3719,12 @@
 }
 
 
+JSRegExp::Type JSRegExp::TypeTagUnchecked() {
+  Smi* smi = Smi::cast(DataAtUnchecked(kTagIndex));
+  return static_cast<JSRegExp::Type>(smi->value());
+}
+
+
 int JSRegExp::CaptureCount() {
   switch (TypeTag()) {
     case ATOM:
@@ -3610,6 +3760,13 @@
 }
 
 
+Object* JSRegExp::DataAtUnchecked(int index) {
+  FixedArray* fa = reinterpret_cast<FixedArray*>(data());
+  int offset = FixedArray::kHeaderSize + index * kPointerSize;
+  return READ_FIELD(fa, offset);
+}
+
+
 void JSRegExp::SetDataAt(int index, Object* value) {
   ASSERT(TypeTag() != NOT_COMPILED);
   ASSERT(index >= kDataIndex);  // Only implementation data can be set this way.
@@ -3617,6 +3774,17 @@
 }
 
 
+void JSRegExp::SetDataAtUnchecked(int index, Object* value, Heap* heap) {
+  ASSERT(index >= kDataIndex);  // Only implementation data can be set this way.
+  FixedArray* fa = reinterpret_cast<FixedArray*>(data());
+  if (value->IsSmi()) {
+    fa->set_unchecked(index, Smi::cast(value));
+  } else {
+    fa->set_unchecked(heap, index, value, SKIP_WRITE_BARRIER);
+  }
+}
+
+
 JSObject::ElementsKind JSObject::GetElementsKind() {
   if (map()->has_fast_elements()) {
     ASSERT(elements()->map() == GetHeap()->fixed_array_map() ||
@@ -3645,14 +3813,18 @@
         return EXTERNAL_INT_ELEMENTS;
       case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
         return EXTERNAL_UNSIGNED_INT_ELEMENTS;
+      case EXTERNAL_FLOAT_ARRAY_TYPE:
+        return EXTERNAL_FLOAT_ELEMENTS;
+      case EXTERNAL_DOUBLE_ARRAY_TYPE:
+        return EXTERNAL_DOUBLE_ELEMENTS;
       case EXTERNAL_PIXEL_ARRAY_TYPE:
         return EXTERNAL_PIXEL_ELEMENTS;
       default:
         break;
     }
   }
-  ASSERT(array->map()->instance_type() == EXTERNAL_FLOAT_ARRAY_TYPE);
-  return EXTERNAL_FLOAT_ELEMENTS;
+  UNREACHABLE();
+  return DICTIONARY_ELEMENTS;
 }
 
 
@@ -3693,6 +3865,8 @@
                         EXTERNAL_UNSIGNED_INT_ARRAY_TYPE)
 EXTERNAL_ELEMENTS_CHECK(Float,
                         EXTERNAL_FLOAT_ARRAY_TYPE)
+EXTERNAL_ELEMENTS_CHECK(Double,
+                        EXTERNAL_DOUBLE_ARRAY_TYPE)
 EXTERNAL_ELEMENTS_CHECK(Pixel, EXTERNAL_PIXEL_ARRAY_TYPE)
 
 
@@ -4084,16 +4258,16 @@
 }
 
 
-void Proxy::ProxyIterateBody(ObjectVisitor* v) {
+void Foreign::ForeignIterateBody(ObjectVisitor* v) {
   v->VisitExternalReference(
-      reinterpret_cast<Address *>(FIELD_ADDR(this, kProxyOffset)));
+      reinterpret_cast<Address *>(FIELD_ADDR(this, kAddressOffset)));
 }
 
 
 template<typename StaticVisitor>
-void Proxy::ProxyIterateBody() {
+void Foreign::ForeignIterateBody() {
   StaticVisitor::VisitExternalReference(
-      reinterpret_cast<Address *>(FIELD_ADDR(this, kProxyOffset)));
+      reinterpret_cast<Address *>(FIELD_ADDR(this, kAddressOffset)));
 }
 
 
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index b7e2fdd..60028c0 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -114,6 +114,9 @@
     case EXTERNAL_FLOAT_ARRAY_TYPE:
       ExternalFloatArray::cast(this)->ExternalFloatArrayPrint(out);
       break;
+    case EXTERNAL_DOUBLE_ARRAY_TYPE:
+      ExternalDoubleArray::cast(this)->ExternalDoubleArrayPrint(out);
+      break;
     case FILLER_TYPE:
       PrintF(out, "filler");
       break;
@@ -145,8 +148,11 @@
     case CODE_TYPE:
       Code::cast(this)->CodePrint(out);
       break;
-    case PROXY_TYPE:
-      Proxy::cast(this)->ProxyPrint(out);
+    case JS_PROXY_TYPE:
+      JSProxy::cast(this)->JSProxyPrint(out);
+      break;
+    case FOREIGN_TYPE:
+      Foreign::cast(this)->ForeignPrint(out);
       break;
     case SHARED_FUNCTION_INFO_TYPE:
       SharedFunctionInfo::cast(this)->SharedFunctionInfoPrint(out);
@@ -217,6 +223,11 @@
 }
 
 
+void ExternalDoubleArray::ExternalDoubleArrayPrint(FILE* out) {
+  PrintF(out, "external double array");
+}
+
+
 void JSObject::PrintProperties(FILE* out) {
   if (HasFastProperties()) {
     DescriptorArray* descs = map()->instance_descriptors();
@@ -330,6 +341,13 @@
       }
       break;
     }
+    case EXTERNAL_DOUBLE_ELEMENTS: {
+      ExternalDoubleArray* p = ExternalDoubleArray::cast(elements());
+      for (int i = 0; i < p->length(); i++) {
+        PrintF(out, "  %d: %f\n", i, p->get(i));
+      }
+      break;
+    }
     case DICTIONARY_ELEMENTS:
       elements()->Print(out);
       break;
@@ -383,6 +401,7 @@
     case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
       return "EXTERNAL_UNSIGNED_INT_ARRAY";
     case EXTERNAL_FLOAT_ARRAY_TYPE: return "EXTERNAL_FLOAT_ARRAY";
+    case EXTERNAL_DOUBLE_ARRAY_TYPE: return "EXTERNAL_DOUBLE_ARRAY";
     case FILLER_TYPE: return "FILLER";
     case JS_OBJECT_TYPE: return "JS_OBJECT";
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE: return "JS_CONTEXT_EXTENSION_OBJECT";
@@ -392,19 +411,19 @@
     case JS_FUNCTION_TYPE: return "JS_FUNCTION";
     case CODE_TYPE: return "CODE";
     case JS_ARRAY_TYPE: return "JS_ARRAY";
+    case JS_PROXY_TYPE: return "JS_PROXY";
     case JS_REGEXP_TYPE: return "JS_REGEXP";
     case JS_VALUE_TYPE: return "JS_VALUE";
     case JS_GLOBAL_OBJECT_TYPE: return "JS_GLOBAL_OBJECT";
     case JS_BUILTINS_OBJECT_TYPE: return "JS_BUILTINS_OBJECT";
     case JS_GLOBAL_PROXY_TYPE: return "JS_GLOBAL_PROXY";
-    case PROXY_TYPE: return "PROXY";
-    case LAST_STRING_TYPE: return "LAST_STRING_TYPE";
+    case FOREIGN_TYPE: return "FOREIGN";
     case JS_MESSAGE_OBJECT_TYPE: return "JS_MESSAGE_OBJECT_TYPE";
 #define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return #NAME;
   STRUCT_LIST(MAKE_STRUCT_CASE)
 #undef MAKE_STRUCT_CASE
+    default: return "UNKNOWN";
   }
-  return "UNKNOWN";
 }
 
 
@@ -515,6 +534,15 @@
 }
 
 
+void JSProxy::JSProxyPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "JSProxy");
+  PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
+  PrintF(out, " - handler = ");
+  handler()->Print(out);
+  PrintF(out, "\n");
+}
+
+
 void JSFunction::JSFunctionPrint(FILE* out) {
   HeapObject::PrintHeader(out, "Function");
   PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
@@ -607,8 +635,8 @@
 }
 
 
-void Proxy::ProxyPrint(FILE* out) {
-  PrintF(out, "proxy to %p", proxy());
+void Foreign::ForeignPrint(FILE* out) {
+  PrintF(out, "foreign address : %p", address());
 }
 
 
diff --git a/src/objects-visiting.cc b/src/objects-visiting.cc
index 5a23658..685e8ad 100644
--- a/src/objects-visiting.cc
+++ b/src/objects-visiting.cc
@@ -85,13 +85,21 @@
     case JS_GLOBAL_PROPERTY_CELL_TYPE:
       return kVisitPropertyCell;
 
+    case JS_REGEXP_TYPE:
+      return kVisitJSRegExp;
+
     case SHARED_FUNCTION_INFO_TYPE:
       return kVisitSharedFunctionInfo;
 
-    case PROXY_TYPE:
+    case JS_PROXY_TYPE:
+      return GetVisitorIdForSize(kVisitStruct,
+                                 kVisitStructGeneric,
+                                 JSProxy::kSize);
+
+    case FOREIGN_TYPE:
       return GetVisitorIdForSize(kVisitDataObject,
                                  kVisitDataObjectGeneric,
-                                 Proxy::kSize);
+                                 Foreign::kSize);
 
     case FILLER_TYPE:
       return kVisitDataObjectGeneric;
@@ -100,7 +108,6 @@
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_VALUE_TYPE:
     case JS_ARRAY_TYPE:
-    case JS_REGEXP_TYPE:
     case JS_GLOBAL_PROXY_TYPE:
     case JS_GLOBAL_OBJECT_TYPE:
     case JS_BUILTINS_OBJECT_TYPE:
@@ -121,6 +128,7 @@
     case EXTERNAL_INT_ARRAY_TYPE:
     case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
     case EXTERNAL_FLOAT_ARRAY_TYPE:
+    case EXTERNAL_DOUBLE_ARRAY_TYPE:
       return GetVisitorIdForSize(kVisitDataObject,
                                  kVisitDataObjectGeneric,
                                  instance_size);
diff --git a/src/objects-visiting.h b/src/objects-visiting.h
index da955da..dcbf2f8 100644
--- a/src/objects-visiting.h
+++ b/src/objects-visiting.h
@@ -28,6 +28,8 @@
 #ifndef V8_OBJECTS_VISITING_H_
 #define V8_OBJECTS_VISITING_H_
 
+#include "allocation.h"
+
 // This file provides base classes and auxiliary methods for defining
 // static object visitors used during GC.
 // Visiting HeapObject body with a normal ObjectVisitor requires performing
@@ -102,6 +104,7 @@
     kVisitPropertyCell,
     kVisitSharedFunctionInfo,
     kVisitJSFunction,
+    kVisitJSRegExp,
 
     kVisitorIdCount,
     kMinObjectSizeInWords = 2
@@ -295,6 +298,8 @@
                                       SharedFunctionInfo::BodyDescriptor,
                                       int>::Visit);
 
+    table_.Register(kVisitJSRegExp, &VisitJSRegExp);
+
     table_.Register(kVisitSeqAsciiString, &VisitSeqAsciiString);
 
     table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
@@ -332,6 +337,10 @@
         SeqAsciiStringSize(map->instance_type());
   }
 
+  static inline int VisitJSRegExp(Map* map, HeapObject* object) {
+    return JSObjectVisitor::Visit(map, object);
+  }
+
   static inline int VisitSeqTwoByteString(Map* map, HeapObject* object) {
     return SeqTwoByteString::cast(object)->
         SeqTwoByteStringSize(map->instance_type());
diff --git a/src/objects.cc b/src/objects.cc
index fac83f1..b407c01 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -41,7 +41,6 @@
 #include "macro-assembler.h"
 #include "safepoint-table.h"
 #include "scanner-base.h"
-#include "scopeinfo.h"
 #include "string-stream.h"
 #include "utils.h"
 #include "vm-state-inl.h"
@@ -135,24 +134,22 @@
 void Object::Lookup(String* name, LookupResult* result) {
   Object* holder = NULL;
   if (IsSmi()) {
-    Heap* heap = Isolate::Current()->heap();
-    Context* global_context = heap->isolate()->context()->global_context();
+    Context* global_context = Isolate::Current()->context()->global_context();
     holder = global_context->number_function()->instance_prototype();
   } else {
     HeapObject* heap_object = HeapObject::cast(this);
     if (heap_object->IsJSObject()) {
       return JSObject::cast(this)->Lookup(name, result);
     }
-    Heap* heap = heap_object->GetHeap();
+    Context* global_context = Isolate::Current()->context()->global_context();
     if (heap_object->IsString()) {
-      Context* global_context = heap->isolate()->context()->global_context();
       holder = global_context->string_function()->instance_prototype();
     } else if (heap_object->IsHeapNumber()) {
-      Context* global_context = heap->isolate()->context()->global_context();
       holder = global_context->number_function()->instance_prototype();
     } else if (heap_object->IsBoolean()) {
-      Context* global_context = heap->isolate()->context()->global_context();
       holder = global_context->boolean_function()->instance_prototype();
+    } else if (heap_object->IsJSProxy()) {
+      return result->HandlerResult();
     }
   }
   ASSERT(holder != NULL);  // Cannot handle null or undefined.
@@ -177,11 +174,12 @@
                                              Object* holder) {
   Isolate* isolate = name->GetIsolate();
   // To accommodate both the old and the new api we switch on the
-  // data structure used to store the callbacks.  Eventually proxy
+  // data structure used to store the callbacks.  Eventually foreign
   // callbacks should be phased out.
-  if (structure->IsProxy()) {
+  if (structure->IsForeign()) {
     AccessorDescriptor* callback =
-        reinterpret_cast<AccessorDescriptor*>(Proxy::cast(structure)->proxy());
+        reinterpret_cast<AccessorDescriptor*>(
+            Foreign::cast(structure)->address());
     MaybeObject* value = (callback->getter)(receiver, callback->data);
     RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     return value;
@@ -228,6 +226,34 @@
 }
 
 
+MaybeObject* Object::GetPropertyWithHandler(Object* receiver_raw,
+                                            String* name_raw,
+                                            Object* handler_raw) {
+  Isolate* isolate = name_raw->GetIsolate();
+  HandleScope scope;
+  Handle<Object> receiver(receiver_raw);
+  Handle<Object> name(name_raw);
+  Handle<Object> handler(handler_raw);
+
+  // Extract trap function.
+  LookupResult lookup;
+  Handle<Object> trap(v8::internal::GetProperty(handler, "get", &lookup));
+  if (!lookup.IsFound()) {
+    // Get the derived `get' property.
+    trap = isolate->derived_get_trap();
+  }
+
+  // Call trap function.
+  Object** args[] = { receiver.location(), name.location() };
+  bool has_exception;
+  Handle<Object> result =
+      Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
+  if (has_exception) return Failure::Exception();
+
+  return *result;
+}
+
+
 MaybeObject* Object::GetPropertyWithDefinedGetter(Object* receiver,
                                                   JSFunction* getter) {
   HandleScope scope;
@@ -495,30 +521,34 @@
   Heap* heap = name->GetHeap();
 
   // Traverse the prototype chain from the current object (this) to
-  // the holder and check for access rights. This avoid traversing the
+  // the holder and check for access rights. This avoids traversing the
   // objects more than once in case of interceptors, because the
   // holder will always be the interceptor holder and the search may
   // only continue with a current object just after the interceptor
   // holder in the prototype chain.
-  Object* last = result->IsProperty() ? result->holder() : heap->null_value();
-  for (Object* current = this; true; current = current->GetPrototype()) {
-    if (current->IsAccessCheckNeeded()) {
-      // Check if we're allowed to read from the current object. Note
-      // that even though we may not actually end up loading the named
-      // property from the current object, we still check that we have
-      // access to it.
-      JSObject* checked = JSObject::cast(current);
-      if (!heap->isolate()->MayNamedAccess(checked, name, v8::ACCESS_GET)) {
-        return checked->GetPropertyWithFailedAccessCheck(receiver,
-                                                         result,
-                                                         name,
-                                                         attributes);
+  // Proxy handlers do not use the proxy's prototype, so we can skip this.
+  if (!result->IsHandler()) {
+    Object* last = result->IsProperty() ? result->holder() : heap->null_value();
+    ASSERT(this != this->GetPrototype());
+    for (Object* current = this; true; current = current->GetPrototype()) {
+      if (current->IsAccessCheckNeeded()) {
+        // Check if we're allowed to read from the current object. Note
+        // that even though we may not actually end up loading the named
+        // property from the current object, we still check that we have
+        // access to it.
+        JSObject* checked = JSObject::cast(current);
+        if (!heap->isolate()->MayNamedAccess(checked, name, v8::ACCESS_GET)) {
+          return checked->GetPropertyWithFailedAccessCheck(receiver,
+                                                           result,
+                                                           name,
+                                                           attributes);
+        }
       }
+      // Stop traversing the chain once we reach the last object in the
+      // chain; either the holder of the result or null in case of an
+      // absent property.
+      if (current == last) break;
     }
-    // Stop traversing the chain once we reach the last object in the
-    // chain; either the holder of the result or null in case of an
-    // absent property.
-    if (current == last) break;
   }
 
   if (!result->IsProperty()) {
@@ -544,14 +574,22 @@
                                      result->GetCallbackObject(),
                                      name,
                                      holder);
+    case HANDLER: {
+      JSProxy* proxy = JSProxy::cast(this);
+      return GetPropertyWithHandler(receiver, name, proxy->handler());
+    }
     case INTERCEPTOR: {
       JSObject* recvr = JSObject::cast(receiver);
       return holder->GetPropertyWithInterceptor(recvr, name, attributes);
     }
-    default:
-      UNREACHABLE();
-      return NULL;
+    case MAP_TRANSITION:
+    case EXTERNAL_ARRAY_TRANSITION:
+    case CONSTANT_TRANSITION:
+    case NULL_DESCRIPTOR:
+      break;
   }
+  UNREACHABLE();
+  return NULL;
 }
 
 
@@ -576,6 +614,8 @@
       holder = global_context->number_function()->instance_prototype();
     } else if (heap_object->IsBoolean()) {
       holder = global_context->boolean_function()->instance_prototype();
+    } else if (heap_object->IsJSProxy()) {
+      return heap->undefined_value();  // For now...
     } else {
       // Undefined and null have no indexed properties.
       ASSERT(heap_object->IsUndefined() || heap_object->IsNull());
@@ -596,9 +636,10 @@
 
   HeapObject* heap_object = HeapObject::cast(this);
 
-  // The object is either a number, a string, a boolean, or a real JS object.
-  if (heap_object->IsJSObject()) {
-    return JSObject::cast(this)->map()->prototype();
+  // The object is either a number, a string, a boolean,
+  // a real JS object, or a Harmony proxy.
+  if (heap_object->IsJSObject() || heap_object->IsJSProxy()) {
+    return heap_object->map()->prototype();
   }
   Heap* heap = heap_object->GetHeap();
   Context* context = heap->isolate()->context()->global_context();
@@ -1045,6 +1086,10 @@
       accumulator->Add("<ExternalFloatArray[%u]>",
                        ExternalFloatArray::cast(this)->length());
       break;
+    case EXTERNAL_DOUBLE_ARRAY_TYPE:
+      accumulator->Add("<ExternalDoubleArray[%u]>",
+                       ExternalDoubleArray::cast(this)->length());
+      break;
     case SHARED_FUNCTION_INFO_TYPE:
       accumulator->Add("<SharedFunctionInfo>");
       break;
@@ -1082,8 +1127,8 @@
       HeapNumber::cast(this)->HeapNumberPrint(accumulator);
       accumulator->Put('>');
       break;
-    case PROXY_TYPE:
-      accumulator->Add("<Proxy>");
+    case FOREIGN_TYPE:
+      accumulator->Add("<Foreign>");
       break;
     case JS_GLOBAL_PROPERTY_CELL_TYPE:
       accumulator->Add("Cell for ");
@@ -1151,8 +1196,11 @@
     case ODDBALL_TYPE:
       Oddball::BodyDescriptor::IterateBody(this, v);
       break;
-    case PROXY_TYPE:
-      reinterpret_cast<Proxy*>(this)->ProxyIterateBody(v);
+    case JS_PROXY_TYPE:
+      JSProxy::BodyDescriptor::IterateBody(this, v);
+      break;
+    case FOREIGN_TYPE:
+      reinterpret_cast<Foreign*>(this)->ForeignIterateBody(v);
       break;
     case MAP_TYPE:
       Map::BodyDescriptor::IterateBody(this, v);
@@ -1174,6 +1222,7 @@
     case EXTERNAL_INT_ARRAY_TYPE:
     case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
     case EXTERNAL_FLOAT_ARRAY_TYPE:
+    case EXTERNAL_DOUBLE_ARRAY_TYPE:
       break;
     case SHARED_FUNCTION_INFO_TYPE:
       SharedFunctionInfo::BodyDescriptor::IterateBody(this, v);
@@ -1732,11 +1781,12 @@
   Handle<Object> value_handle(value, isolate);
 
   // To accommodate both the old and the new api we switch on the
-  // data structure used to store the callbacks.  Eventually proxy
+  // data structure used to store the callbacks.  Eventually foreign
   // callbacks should be phased out.
-  if (structure->IsProxy()) {
+  if (structure->IsForeign()) {
     AccessorDescriptor* callback =
-        reinterpret_cast<AccessorDescriptor*>(Proxy::cast(structure)->proxy());
+        reinterpret_cast<AccessorDescriptor*>(
+            Foreign::cast(structure)->address());
     MaybeObject* obj = (callback->setter)(this,  value, callback->data);
     RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     if (obj->IsFailure()) return obj;
@@ -2481,7 +2531,8 @@
                                     fast->inobject_properties()) &&
     slow->instance_type() == fast->instance_type() &&
     slow->bit_field() == fast->bit_field() &&
-    (slow->bit_field2() & ~(1<<Map::kIsShared)) == fast->bit_field2();
+    slow->bit_field2() == fast->bit_field2() &&
+    (slow->bit_field3() & ~(1<<Map::kIsShared)) == fast->bit_field3();
 }
 
 
@@ -2572,6 +2623,7 @@
       case CONSTANT_TRANSITION:
       case NULL_DESCRIPTOR:
       case INTERCEPTOR:
+      case EXTERNAL_ARRAY_TRANSITION:
         break;
       default:
         UNREACHABLE();
@@ -2602,7 +2654,7 @@
                                      instance_size_delta);
 
   set_map(new_map);
-  new_map->set_instance_descriptors(current_heap->empty_descriptor_array());
+  new_map->clear_instance_descriptors();
 
   set_properties(dictionary);
 
@@ -2844,6 +2896,7 @@
     case EXTERNAL_INT_ELEMENTS:
     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
     case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS:
       // Pixel and external array elements cannot be deleted. Just
       // silently ignore here.
       break;
@@ -2858,8 +2911,9 @@
           // exception. dictionary->DeleteProperty will return false_value()
           // if a non-configurable property is being deleted.
           HandleScope scope;
+          Handle<Object> self(this);
           Handle<Object> i = isolate->factory()->NewNumberFromUint(index);
-          Handle<Object> args[2] = { i, Handle<Object>(this) };
+          Handle<Object> args[2] = { i, self };
           return isolate->Throw(*isolate->factory()->NewTypeError(
               "strict_delete_property", HandleVector(args, 2)));
         }
@@ -2963,6 +3017,7 @@
     case EXTERNAL_INT_ELEMENTS:
     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
     case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS:
       // Raw pixels and external arrays do not reference other
       // objects.
       break;
@@ -3050,6 +3105,17 @@
     return JSObject::cast(proto)->PreventExtensions();
   }
 
+  // It's not possible to seal objects with external array elements
+  if (HasExternalArrayElements()) {
+    HandleScope scope(isolate);
+    Handle<Object> object(this);
+    Handle<Object> error  =
+        isolate->factory()->NewTypeError(
+            "cant_prevent_ext_external_array_elements",
+            HandleVector(&object, 1));
+    return isolate->Throw(*error);
+  }
+
   // If there are fast elements we normalize.
   if (HasFastElements()) {
     Object* ok;
@@ -3234,6 +3300,7 @@
       case EXTERNAL_INT_ELEMENTS:
       case EXTERNAL_UNSIGNED_INT_ELEMENTS:
       case EXTERNAL_FLOAT_ELEMENTS:
+      case EXTERNAL_DOUBLE_ELEMENTS:
         // Ignore getters and setters on pixel and external array
         // elements.
         return heap->undefined_value();
@@ -3460,6 +3527,7 @@
       case EXTERNAL_INT_ELEMENTS:
       case EXTERNAL_UNSIGNED_INT_ELEMENTS:
       case EXTERNAL_FLOAT_ELEMENTS:
+      case EXTERNAL_DOUBLE_ELEMENTS:
         // Ignore getters and setters on pixel and external array
         // elements.
         return isolate->heap()->undefined_value();
@@ -3587,8 +3655,7 @@
   // pointing to the same transition which is bad because the garbage
   // collector relies on being able to reverse pointers from transitions
   // to maps.  If properties need to be retained use CopyDropTransitions.
-  Map::cast(result)->set_instance_descriptors(
-      heap->empty_descriptor_array());
+  Map::cast(result)->clear_instance_descriptors();
   // Please note instance_type and instance_size are set when allocated.
   Map::cast(result)->set_inobject_properties(inobject_properties());
   Map::cast(result)->set_unused_property_fields(unused_property_fields());
@@ -3610,6 +3677,7 @@
   }
   Map::cast(result)->set_bit_field(bit_field());
   Map::cast(result)->set_bit_field2(bit_field2());
+  Map::cast(result)->set_bit_field3(bit_field3());
   Map::cast(result)->set_is_shared(false);
   Map::cast(result)->ClearCodeCache(heap);
   return result;
@@ -3638,6 +3706,7 @@
 
   Map::cast(result)->set_bit_field(bit_field());
   Map::cast(result)->set_bit_field2(bit_field2());
+  Map::cast(result)->set_bit_field3(bit_field3());
 
   Map::cast(result)->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
 
@@ -3716,7 +3785,7 @@
   Object** map_or_index_field = NULL;
   while (current != meta_map) {
     DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
-        *RawField(current, Map::kInstanceDescriptorsOffset));
+        *RawField(current, Map::kInstanceDescriptorsOrBitField3Offset));
     if (!d->IsEmpty()) {
       FixedArray* contents = reinterpret_cast<FixedArray*>(
           d->get(DescriptorArray::kContentArrayIndex));
@@ -3782,8 +3851,6 @@
 
 
 MaybeObject* CodeCache::Update(String* name, Code* code) {
-  ASSERT(code->ic_state() == MONOMORPHIC);
-
   // The number of monomorphic stubs for normal load/store/call IC's can grow to
   // a large number and therefore they need to go into a hash table. They are
   // used to load global properties from cells.
@@ -4218,6 +4285,7 @@
         heap->AllocateFixedArray(number_of_descriptors << 1);
     if (!maybe_array->ToObject(&array)) return maybe_array;
   }
+  result->set(kBitField3StorageIndex, Smi::FromInt(0));
   result->set(kContentArrayIndex, array);
   result->set(kEnumerationIndexIndex,
               Smi::FromInt(PropertyDetails::kInitialIndex));
@@ -4976,8 +5044,7 @@
 
 
 // Archive statics that are thread local.
-char* Relocatable::ArchiveState(char* to) {
-  Isolate* isolate = Isolate::Current();
+char* Relocatable::ArchiveState(Isolate* isolate, char* to) {
   *reinterpret_cast<Relocatable**>(to) = isolate->relocatable_top();
   isolate->set_relocatable_top(NULL);
   return to + ArchiveSpacePerThread();
@@ -4985,8 +5052,7 @@
 
 
 // Restore statics that are thread local.
-char* Relocatable::RestoreState(char* from) {
-  Isolate* isolate = Isolate::Current();
+char* Relocatable::RestoreState(Isolate* isolate, char* from) {
   isolate->set_relocatable_top(*reinterpret_cast<Relocatable**>(from));
   return from + ArchiveSpacePerThread();
 }
@@ -5481,8 +5547,16 @@
 bool String::IsAsciiEqualTo(Vector<const char> str) {
   int slen = length();
   if (str.length() != slen) return false;
-  for (int i = 0; i < slen; i++) {
-    if (Get(i) != static_cast<uint16_t>(str[i])) return false;
+  if (this->IsSeqAsciiString()) {
+    SeqAsciiString* seq = SeqAsciiString::cast(this);
+    char* ch = seq->GetChars();
+    for (int i = 0; i < slen; i++, ch++) {
+      if (*ch != str[i]) return false;
+    }
+  } else {
+    for (int i = 0; i < slen; i++) {
+      if (Get(i) != static_cast<uint16_t>(str[i])) return false;
+    }
   }
   return true;
 }
@@ -5676,8 +5750,8 @@
   // Live DescriptorArray objects will be marked, so we must use
   // low-level accessors to get and modify their data.
   DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
-      *RawField(this, Map::kInstanceDescriptorsOffset));
-  if (d == heap->raw_unchecked_empty_descriptor_array()) return;
+      *RawField(this, Map::kInstanceDescriptorsOrBitField3Offset));
+  if (d->IsEmpty()) return;
   Smi* NullDescriptorDetails =
     PropertyDetails(NONE, NULL_DESCRIPTOR).AsSmi();
   FixedArray* contents = reinterpret_cast<FixedArray*>(
@@ -6071,6 +6145,29 @@
 }
 
 
+void SharedFunctionInfo::DisableOptimization(JSFunction* function) {
+  // Disable optimization for the shared function info and mark the
+  // code as non-optimizable. The marker on the shared function info
+  // is there because we flush non-optimized code thereby loosing the
+  // non-optimizable information for the code. When the code is
+  // regenerated and set on the shared function info it is marked as
+  // non-optimizable if optimization is disabled for the shared
+  // function info.
+  set_optimization_disabled(true);
+  // Code should be the lazy compilation stub or else unoptimized.  If the
+  // latter, disable optimization for the code too.
+  ASSERT(code()->kind() == Code::FUNCTION || code()->kind() == Code::BUILTIN);
+  if (code()->kind() == Code::FUNCTION) {
+    code()->set_optimizable(false);
+  }
+  if (FLAG_trace_opt) {
+    PrintF("[disabled optimization for: ");
+    function->PrintName();
+    PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
+  }
+}
+
+
 bool SharedFunctionInfo::VerifyBailoutId(int id) {
   // TODO(srdjan): debugging ARM crashes in hydrogen. OK to disable while
   // we are always bailing out on ARM.
@@ -6528,13 +6625,12 @@
     case BUILTIN: return "BUILTIN";
     case LOAD_IC: return "LOAD_IC";
     case KEYED_LOAD_IC: return "KEYED_LOAD_IC";
-    case KEYED_EXTERNAL_ARRAY_LOAD_IC: return "KEYED_EXTERNAL_ARRAY_LOAD_IC";
     case STORE_IC: return "STORE_IC";
     case KEYED_STORE_IC: return "KEYED_STORE_IC";
-    case KEYED_EXTERNAL_ARRAY_STORE_IC: return "KEYED_EXTERNAL_ARRAY_STORE_IC";
     case CALL_IC: return "CALL_IC";
     case KEYED_CALL_IC: return "KEYED_CALL_IC";
-    case TYPE_RECORDING_BINARY_OP_IC: return "TYPE_RECORDING_BINARY_OP_IC";
+    case UNARY_OP_IC: return "UNARY_OP_IC";
+    case BINARY_OP_IC: return "BINARY_OP_IC";
     case COMPARE_IC: return "COMPARE_IC";
   }
   UNREACHABLE();
@@ -6563,6 +6659,7 @@
     case FIELD: return "FIELD";
     case CONSTANT_FUNCTION: return "CONSTANT_FUNCTION";
     case CALLBACKS: return "CALLBACKS";
+    case HANDLER: return "HANDLER";
     case INTERCEPTOR: return "INTERCEPTOR";
     case MAP_TRANSITION: return "MAP_TRANSITION";
     case EXTERNAL_ARRAY_TRANSITION: return "EXTERNAL_ARRAY_TRANSITION";
@@ -7047,7 +7144,8 @@
     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
     case EXTERNAL_INT_ELEMENTS:
     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS: {
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS: {
       ExternalArray* array = ExternalArray::cast(elements());
       if (index < static_cast<uint32_t>(array->length())) {
         return true;
@@ -7169,7 +7267,8 @@
     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
     case EXTERNAL_INT_ELEMENTS:
     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS: {
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS: {
       ExternalArray* array = ExternalArray::cast(elements());
       if (index < static_cast<uint32_t>(array->length())) return FAST_ELEMENT;
       break;
@@ -7228,7 +7327,8 @@
     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
     case EXTERNAL_INT_ELEMENTS:
     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS: {
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS: {
       ExternalArray* array = ExternalArray::cast(elements());
       if (index < static_cast<uint32_t>(array->length())) {
         return true;
@@ -7299,11 +7399,11 @@
                                               uint32_t index,
                                               Object* holder) {
   Isolate* isolate = GetIsolate();
-  ASSERT(!structure->IsProxy());
+  ASSERT(!structure->IsForeign());
 
   // api style callbacks.
   if (structure->IsAccessorInfo()) {
-    AccessorInfo* data = AccessorInfo::cast(structure);
+    Handle<AccessorInfo> data(AccessorInfo::cast(structure));
     Object* fun_obj = data->getter();
     v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
     HandleScope scope(isolate);
@@ -7354,20 +7454,22 @@
   Handle<Object> value_handle(value, isolate);
 
   // To accommodate both the old and the new api we switch on the
-  // data structure used to store the callbacks.  Eventually proxy
+  // data structure used to store the callbacks.  Eventually foreign
   // callbacks should be phased out.
-  ASSERT(!structure->IsProxy());
+  ASSERT(!structure->IsForeign());
 
   if (structure->IsAccessorInfo()) {
     // api style callbacks
-    AccessorInfo* data = AccessorInfo::cast(structure);
+    Handle<JSObject> self(this);
+    Handle<JSObject> holder_handle(JSObject::cast(holder));
+    Handle<AccessorInfo> data(AccessorInfo::cast(structure));
     Object* call_obj = data->setter();
     v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
     if (call_fun == NULL) return value;
     Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
     Handle<String> key(isolate->factory()->NumberToString(number));
-    LOG(isolate, ApiNamedPropertyAccess("store", this, *key));
-    CustomArguments args(isolate, data->data(), this, JSObject::cast(holder));
+    LOG(isolate, ApiNamedPropertyAccess("store", *self, *key));
+    CustomArguments args(isolate, data->data(), *self, *holder_handle);
     v8::AccessorInfo info(args.end());
     {
       // Leaving JavaScript.
@@ -7549,6 +7651,10 @@
       ExternalFloatArray* array = ExternalFloatArray::cast(elements());
       return array->SetValue(index, value);
     }
+    case EXTERNAL_DOUBLE_ELEMENTS: {
+      ExternalDoubleArray* array = ExternalDoubleArray::cast(elements());
+      return array->SetValue(index, value);
+    }
     case DICTIONARY_ELEMENTS: {
       // Insert element in the dictionary.
       FixedArray* elms = FixedArray::cast(elements());
@@ -7565,8 +7671,8 @@
           // If put fails instrict mode, throw exception.
           if (!dictionary->ValueAtPut(entry, value) &&
               strict_mode == kStrictMode) {
-            Handle<Object> number(isolate->factory()->NewNumberFromUint(index));
             Handle<Object> holder(this);
+            Handle<Object> number(isolate->factory()->NewNumberFromUint(index));
             Handle<Object> args[2] = { number, holder };
             return isolate->Throw(
                 *isolate->factory()->NewTypeError("strict_read_only_property",
@@ -7690,7 +7796,8 @@
     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
     case EXTERNAL_INT_ELEMENTS:
     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS: {
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS: {
       MaybeObject* maybe_value = GetExternalElement(index);
       Object* value;
       if (!maybe_value->ToObject(&value)) return maybe_value;
@@ -7792,7 +7899,8 @@
     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
     case EXTERNAL_INT_ELEMENTS:
     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS: {
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS: {
       MaybeObject* maybe_value = GetExternalElement(index);
       Object* value;
       if (!maybe_value->ToObject(&value)) return maybe_value;
@@ -7895,6 +8003,14 @@
       }
       break;
     }
+    case EXTERNAL_DOUBLE_ELEMENTS: {
+      ExternalDoubleArray* array = ExternalDoubleArray::cast(elements());
+      if (index < static_cast<uint32_t>(array->length())) {
+        double value = array->get(index);
+        return GetHeap()->AllocateHeapNumber(value);
+      }
+      break;
+    }
     case FAST_ELEMENTS:
     case DICTIONARY_ELEMENTS:
       UNREACHABLE();
@@ -7924,7 +8040,8 @@
     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
     case EXTERNAL_INT_ELEMENTS:
     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS: {
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS: {
       return true;
     }
     case DICTIONARY_ELEMENTS: {
@@ -8161,7 +8278,8 @@
     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
     case EXTERNAL_INT_ELEMENTS:
     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS: {
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS: {
       ExternalArray* array = ExternalArray::cast(elements());
       return index < static_cast<uint32_t>(array->length());
     }
@@ -8342,8 +8460,7 @@
     }
     ASSERT(storage->length() >= index);
   } else {
-    property_dictionary()->CopyKeysTo(storage,
-                                      index);
+    property_dictionary()->CopyKeysTo(storage, index);
   }
 }
 
@@ -8403,7 +8520,8 @@
     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
     case EXTERNAL_INT_ELEMENTS:
     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS: {
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS: {
       int length = ExternalArray::cast(elements())->length();
       while (counter < length) {
         if (storage != NULL) {
@@ -8688,6 +8806,71 @@
 };
 
 
+class SubStringAsciiSymbolKey : public HashTableKey {
+ public:
+  explicit SubStringAsciiSymbolKey(Handle<SeqAsciiString> string,
+                                   int from,
+                                   int length)
+      : string_(string), from_(from), length_(length) { }
+
+  uint32_t Hash() {
+    ASSERT(length_ >= 0);
+    ASSERT(from_ + length_ <= string_->length());
+    StringHasher hasher(length_);
+
+    // Very long strings have a trivial hash that doesn't inspect the
+    // string contents.
+    if (hasher.has_trivial_hash()) {
+      hash_field_ = hasher.GetHashField();
+    } else {
+      int i = 0;
+      // Do the iterative array index computation as long as there is a
+      // chance this is an array index.
+      while (i < length_ && hasher.is_array_index()) {
+        hasher.AddCharacter(static_cast<uc32>(
+            string_->SeqAsciiStringGet(i + from_)));
+        i++;
+      }
+
+      // Process the remaining characters without updating the array
+      // index.
+      while (i < length_) {
+        hasher.AddCharacterNoIndex(static_cast<uc32>(
+            string_->SeqAsciiStringGet(i + from_)));
+        i++;
+      }
+      hash_field_ = hasher.GetHashField();
+    }
+
+    uint32_t result = hash_field_ >> String::kHashShift;
+    ASSERT(result != 0);  // Ensure that the hash value of 0 is never computed.
+    return result;
+  }
+
+
+  uint32_t HashForObject(Object* other) {
+    return String::cast(other)->Hash();
+  }
+
+  bool IsMatch(Object* string) {
+    Vector<const char> chars(string_->GetChars() + from_, length_);
+    return String::cast(string)->IsAsciiEqualTo(chars);
+  }
+
+  MaybeObject* AsObject() {
+    if (hash_field_ == 0) Hash();
+    Vector<const char> chars(string_->GetChars() + from_, length_);
+    return HEAP->AllocateAsciiSymbol(chars, hash_field_);
+  }
+
+ private:
+  Handle<SeqAsciiString> string_;
+  int from_;
+  int length_;
+  uint32_t hash_field_;
+};
+
+
 class TwoByteSymbolKey : public SequentialSymbolKey<uc16> {
  public:
   explicit TwoByteSymbolKey(Vector<const uc16> str)
@@ -9318,6 +9501,26 @@
 }
 
 
+MaybeObject* ExternalDoubleArray::SetValue(uint32_t index, Object* value) {
+  double double_value = 0;
+  Heap* heap = GetHeap();
+  if (index < static_cast<uint32_t>(length())) {
+    if (value->IsSmi()) {
+      int int_value = Smi::cast(value)->value();
+      double_value = static_cast<double>(int_value);
+    } else if (value->IsHeapNumber()) {
+      double_value = HeapNumber::cast(value)->value();
+    } else {
+      // Clamp undefined to zero (default). All other types have been
+      // converted to a number type further up in the call chain.
+      ASSERT(value->IsUndefined());
+    }
+    set(index, double_value);
+  }
+  return heap->AllocateHeapNumber(double_value);
+}
+
+
 JSGlobalPropertyCell* GlobalObject::GetPropertyCell(LookupResult* result) {
   ASSERT(!HasFastProperties());
   Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
@@ -9462,6 +9665,15 @@
 }
 
 
+MaybeObject* SymbolTable::LookupSubStringAsciiSymbol(Handle<SeqAsciiString> str,
+                                                     int from,
+                                                     int length,
+                                                     Object** s) {
+  SubStringAsciiSymbolKey key(str, from, length);
+  return LookupKey(&key, s);
+}
+
+
 MaybeObject* SymbolTable::LookupTwoByteSymbol(Vector<const uc16> str,
                                               Object** s) {
   TwoByteSymbolKey key(str);
@@ -9984,7 +10196,8 @@
 
 template<typename Shape, typename Key>
 void Dictionary<Shape, Key>::CopyKeysTo(
-    FixedArray* storage, int index) {
+    FixedArray* storage,
+    int index) {
   ASSERT(storage->length() >= NumberOfElementsFilterAttributes(
       static_cast<PropertyAttributes>(NONE)));
   int capacity = HashTable<Shape, Key>::Capacity();
diff --git a/src/objects.h b/src/objects.h
index 72daad9..332b2e4 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -28,7 +28,9 @@
 #ifndef V8_OBJECTS_H_
 #define V8_OBJECTS_H_
 
+#include "allocation.h"
 #include "builtins.h"
+#include "list.h"
 #include "smart-pointer.h"
 #include "unicode-inl.h"
 #if V8_TARGET_ARCH_ARM
@@ -89,7 +91,8 @@
 //       - Code
 //       - Map
 //       - Oddball
-//       - Proxy
+//       - JSProxy
+//       - Foreign
 //       - SharedFunctionInfo
 //       - Struct
 //         - AccessorInfo
@@ -286,7 +289,8 @@
   V(JS_GLOBAL_PROPERTY_CELL_TYPE)                                              \
                                                                                \
   V(HEAP_NUMBER_TYPE)                                                          \
-  V(PROXY_TYPE)                                                                \
+  V(JS_PROXY_TYPE)                                                             \
+  V(FOREIGN_TYPE)                                                              \
   V(BYTE_ARRAY_TYPE)                                                           \
   /* Note: the order of these external array */                                \
   /* types is relied upon in */                                                \
@@ -484,7 +488,6 @@
 
 enum InstanceType {
   // String types.
-  // FIRST_STRING_TYPE
   SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kSeqStringTag,
   ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kSeqStringTag,
   CONS_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kConsStringTag,
@@ -514,7 +517,8 @@
   // "Data", objects that cannot contain non-map-word pointers to heap
   // objects.
   HEAP_NUMBER_TYPE,
-  PROXY_TYPE,
+  FOREIGN_TYPE,
+  JS_PROXY_TYPE,
   BYTE_ARRAY_TYPE,
   EXTERNAL_BYTE_ARRAY_TYPE,  // FIRST_EXTERNAL_ARRAY_TYPE
   EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
@@ -523,6 +527,7 @@
   EXTERNAL_INT_ARRAY_TYPE,
   EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
   EXTERNAL_FLOAT_ARRAY_TYPE,
+  EXTERNAL_DOUBLE_ARRAY_TYPE,
   EXTERNAL_PIXEL_ARRAY_TYPE,  // LAST_EXTERNAL_ARRAY_TYPE
   FILLER_TYPE,  // LAST_DATA_TYPE
 
@@ -566,8 +571,6 @@
   LAST_TYPE = JS_FUNCTION_TYPE,
   INVALID_TYPE = FIRST_TYPE - 1,
   FIRST_NONSTRING_TYPE = MAP_TYPE,
-  FIRST_STRING_TYPE = FIRST_TYPE,
-  LAST_STRING_TYPE = FIRST_NONSTRING_TYPE - 1,
   // Boundaries for testing for an external array.
   FIRST_EXTERNAL_ARRAY_TYPE = EXTERNAL_BYTE_ARRAY_TYPE,
   LAST_EXTERNAL_ARRAY_TYPE = EXTERNAL_PIXEL_ARRAY_TYPE,
@@ -588,7 +591,7 @@
 
 STATIC_CHECK(JS_OBJECT_TYPE == Internals::kJSObjectType);
 STATIC_CHECK(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType);
-STATIC_CHECK(PROXY_TYPE == Internals::kProxyType);
+STATIC_CHECK(FOREIGN_TYPE == Internals::kForeignType);
 
 
 enum CompareResult {
@@ -613,7 +616,6 @@
 
 class StringStream;
 class ObjectVisitor;
-class Failure;
 
 struct ValueInfo : public Malloced {
   ValueInfo() : type(FIRST_TYPE), ptr(NULL), str(NULL), number(0) { }
@@ -627,6 +629,7 @@
 // A template-ized version of the IsXXX functions.
 template <class C> static inline bool Is(Object* obj);
 
+class Failure;
 
 class MaybeObject BASE_EMBEDDED {
  public:
@@ -703,6 +706,7 @@
   V(ExternalIntArray)                          \
   V(ExternalUnsignedIntArray)                  \
   V(ExternalFloatArray)                        \
+  V(ExternalDoubleArray)                       \
   V(ExternalPixelArray)                        \
   V(ByteArray)                                 \
   V(JSObject)                                  \
@@ -722,9 +726,10 @@
   V(JSValue)                                   \
   V(JSMessageObject)                           \
   V(StringWrapper)                             \
-  V(Proxy)                                     \
+  V(Foreign)                                   \
   V(Boolean)                                   \
   V(JSArray)                                   \
+  V(JSProxy)                                   \
   V(JSRegExp)                                  \
   V(HashTable)                                 \
   V(Dictionary)                                \
@@ -809,6 +814,9 @@
                                                        Object* structure,
                                                        String* name,
                                                        Object* holder);
+  MUST_USE_RESULT MaybeObject* GetPropertyWithHandler(Object* receiver,
+                                                      String* name,
+                                                      Object* handler);
   MUST_USE_RESULT MaybeObject* GetPropertyWithDefinedGetter(Object* receiver,
                                                             JSFunction* getter);
 
@@ -1348,6 +1356,7 @@
     EXTERNAL_INT_ELEMENTS,
     EXTERNAL_UNSIGNED_INT_ELEMENTS,
     EXTERNAL_FLOAT_ELEMENTS,
+    EXTERNAL_DOUBLE_ELEMENTS,
     EXTERNAL_PIXEL_ELEMENTS
   };
 
@@ -1389,6 +1398,7 @@
   inline bool HasExternalIntElements();
   inline bool HasExternalUnsignedIntElements();
   inline bool HasExternalFloatElements();
+  inline bool HasExternalDoubleElements();
   inline bool AllowsSetElementsLength();
   inline NumberDictionary* element_dictionary();  // Gets slow elements.
   // Requires: this->HasFastElements().
@@ -2029,16 +2039,22 @@
 
 // DescriptorArrays are fixed arrays used to hold instance descriptors.
 // The format of the these objects is:
-//   [0]: point to a fixed array with (value, detail) pairs.
-//   [1]: next enumeration index (Smi), or pointer to small fixed array:
+// TODO(1399): It should be possible to make room for bit_field3 in the map
+//             without overloading the instance descriptors field in the map
+//             (and storing it in the DescriptorArray when the map has one).
+//   [0]: storage for bit_field3 for Map owning this object (Smi)
+//   [1]: point to a fixed array with (value, detail) pairs.
+//   [2]: next enumeration index (Smi), or pointer to small fixed array:
 //          [0]: next enumeration index (Smi)
 //          [1]: pointer to fixed array with enum cache
-//   [2]: first key
+//   [3]: first key
 //   [length() - 1]: last key
 //
 class DescriptorArray: public FixedArray {
  public:
-  // Is this the singleton empty_descriptor_array?
+  // Returns true for both shared empty_descriptor_array and for smis, which the
+  // map uses to encode additional bit fields when the descriptor array is not
+  // yet used.
   inline bool IsEmpty();
 
   // Returns the number of descriptors in the array.
@@ -2075,6 +2091,12 @@
     return bridge->get(kEnumCacheBridgeCacheIndex);
   }
 
+  // TODO(1399): It should be possible to make room for bit_field3 in the map
+  //             without overloading the instance descriptors field in the map
+  //             (and storing it in the DescriptorArray when the map has one).
+  inline int bit_field3_storage();
+  inline void set_bit_field3_storage(int value);
+
   // Initialize or change the enum cache,
   // using the supplied storage for the small "bridge".
   void SetEnumCache(FixedArray* bridge_storage, FixedArray* new_cache);
@@ -2153,9 +2175,10 @@
   // Constant for denoting key was not found.
   static const int kNotFound = -1;
 
-  static const int kContentArrayIndex = 0;
-  static const int kEnumerationIndexIndex = 1;
-  static const int kFirstIndex = 2;
+  static const int kBitField3StorageIndex = 0;
+  static const int kContentArrayIndex = 1;
+  static const int kEnumerationIndexIndex = 2;
+  static const int kFirstIndex = 3;
 
   // The length of the "bridge" to the enum cache.
   static const int kEnumCacheBridgeLength = 2;
@@ -2163,7 +2186,8 @@
   static const int kEnumCacheBridgeCacheIndex = 1;
 
   // Layout description.
-  static const int kContentArrayOffset = FixedArray::kHeaderSize;
+  static const int kBitField3StorageOffset = FixedArray::kHeaderSize;
+  static const int kContentArrayOffset = kBitField3StorageOffset + kPointerSize;
   static const int kEnumerationIndexOffset = kContentArrayOffset + kPointerSize;
   static const int kFirstOffset = kEnumerationIndexOffset + kPointerSize;
 
@@ -2427,6 +2451,8 @@
   static const int kEntrySize = 1;
 };
 
+class SeqAsciiString;
+
 // SymbolTable.
 //
 // No special elements in the prefix and the element size is 1
@@ -2440,6 +2466,11 @@
   MUST_USE_RESULT MaybeObject* LookupSymbol(Vector<const char> str, Object** s);
   MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Vector<const char> str,
                                                  Object** s);
+  MUST_USE_RESULT MaybeObject* LookupSubStringAsciiSymbol(
+      Handle<SeqAsciiString> str,
+      int from,
+      int length,
+      Object** s);
   MUST_USE_RESULT MaybeObject* LookupTwoByteSymbol(Vector<const uc16> str,
                                                    Object** s);
   MUST_USE_RESULT MaybeObject* LookupString(String* key, Object** s);
@@ -3105,6 +3136,34 @@
 };
 
 
+class ExternalDoubleArray: public ExternalArray {
+ public:
+  // Setter and getter.
+  inline double get(int index);
+  inline void set(int index, double value);
+
+  // This accessor applies the correct conversion from Smi, HeapNumber
+  // and undefined.
+  MaybeObject* SetValue(uint32_t index, Object* value);
+
+  // Casting.
+  static inline ExternalDoubleArray* cast(Object* obj);
+
+#ifdef OBJECT_PRINT
+  inline void ExternalDoubleArrayPrint() {
+    ExternalDoubleArrayPrint(stdout);
+  }
+  void ExternalDoubleArrayPrint(FILE* out);
+#endif  // OBJECT_PRINT
+#ifdef DEBUG
+  void ExternalDoubleArrayVerify();
+#endif  // DEBUG
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalDoubleArray);
+};
+
+
 // DeoptimizationInputData is a fixed array used to hold the deoptimization
 // data for code generated by the Hydrogen/Lithium compiler.  It also
 // contains information about functions that were inlined.  If N different
@@ -3243,13 +3302,12 @@
     BUILTIN,
     LOAD_IC,
     KEYED_LOAD_IC,
-    KEYED_EXTERNAL_ARRAY_LOAD_IC,
     CALL_IC,
     KEYED_CALL_IC,
     STORE_IC,
     KEYED_STORE_IC,
-    KEYED_EXTERNAL_ARRAY_STORE_IC,
-    TYPE_RECORDING_BINARY_OP_IC,
+    UNARY_OP_IC,
+    BINARY_OP_IC,
     COMPARE_IC,
     // No more than 16 kinds. The value currently encoded in four bits in
     // Flags.
@@ -3291,6 +3349,12 @@
   // [deoptimization_data]: Array containing data for deopt.
   DECL_ACCESSORS(deoptimization_data, FixedArray)
 
+  // [code_flushing_candidate]: Field only used during garbage
+  // collection to hold code flushing candidates. The contents of this
+  // field does not have to be traced during garbage collection since
+  // it is only used by the garbage collector itself.
+  DECL_ACCESSORS(next_code_flushing_candidate, Object)
+
   // Unchecked accessors to be used during GC.
   inline ByteArray* unchecked_relocation_info();
   inline FixedArray* unchecked_deoptimization_data();
@@ -3317,16 +3381,13 @@
   inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
   inline bool is_call_stub() { return kind() == CALL_IC; }
   inline bool is_keyed_call_stub() { return kind() == KEYED_CALL_IC; }
-  inline bool is_type_recording_binary_op_stub() {
-    return kind() == TYPE_RECORDING_BINARY_OP_IC;
+  inline bool is_unary_op_stub() {
+    return kind() == UNARY_OP_IC;
+  }
+  inline bool is_binary_op_stub() {
+    return kind() == BINARY_OP_IC;
   }
   inline bool is_compare_ic_stub() { return kind() == COMPARE_IC; }
-  inline bool is_external_array_load_stub() {
-    return kind() == KEYED_EXTERNAL_ARRAY_LOAD_IC;
-  }
-  inline bool is_external_array_store_stub() {
-    return kind() == KEYED_EXTERNAL_ARRAY_STORE_IC;
-  }
 
   // [major_key]: For kind STUB or BINARY_OP_IC, the major key.
   inline int major_key();
@@ -3374,11 +3435,15 @@
   inline ExternalArrayType external_array_type();
   inline void set_external_array_type(ExternalArrayType value);
 
+  // [type-recording unary op type]: For all UNARY_OP_IC.
+  inline byte unary_op_type();
+  inline void set_unary_op_type(byte value);
+
   // [type-recording binary op type]: For all TYPE_RECORDING_BINARY_OP_IC.
-  inline byte type_recording_binary_op_type();
-  inline void set_type_recording_binary_op_type(byte value);
-  inline byte type_recording_binary_op_result_type();
-  inline void set_type_recording_binary_op_result_type(byte value);
+  inline byte binary_op_type();
+  inline void set_binary_op_type(byte value);
+  inline byte binary_op_result_type();
+  inline void set_binary_op_result_type(byte value);
 
   // [compare state]: For kind compare IC stubs, tells what state the
   // stub is in.
@@ -3504,9 +3569,12 @@
   static const int kRelocationInfoOffset = kInstructionSizeOffset + kIntSize;
   static const int kDeoptimizationDataOffset =
       kRelocationInfoOffset + kPointerSize;
-  static const int kFlagsOffset = kDeoptimizationDataOffset + kPointerSize;
-  static const int kKindSpecificFlagsOffset  = kFlagsOffset + kIntSize;
+  static const int kNextCodeFlushingCandidateOffset =
+      kDeoptimizationDataOffset + kPointerSize;
+  static const int kFlagsOffset =
+      kNextCodeFlushingCandidateOffset + kPointerSize;
 
+  static const int kKindSpecificFlagsOffset = kFlagsOffset + kIntSize;
   static const int kKindSpecificFlagsSize = 2 * kIntSize;
 
   static const int kHeaderPaddingStart = kKindSpecificFlagsOffset +
@@ -3525,6 +3593,7 @@
   static const int kExternalArrayTypeOffset = kKindSpecificFlagsOffset;
 
   static const int kCompareStateOffset = kStubMajorKeyOffset + 1;
+  static const int kUnaryOpTypeOffset = kStubMajorKeyOffset + 1;
   static const int kBinaryOpTypeOffset = kStubMajorKeyOffset + 1;
   static const int kHasDeoptimizationSupportOffset = kOptimizableOffset + 1;
 
@@ -3597,6 +3666,13 @@
   inline byte bit_field2();
   inline void set_bit_field2(byte value);
 
+  // Bit field 3.
+  // TODO(1399): It should be possible to make room for bit_field3 in the map
+  // without overloading the instance descriptors field (and storing it in the
+  // DescriptorArray when the map has one).
+  inline int bit_field3();
+  inline void set_bit_field3(int value);
+
   // Tells whether the object in the prototype property will be used
   // for instances created from this function.  If the prototype
   // property is set to a value that is not a JSObject, the prototype
@@ -3718,9 +3794,17 @@
 
   inline JSFunction* unchecked_constructor();
 
+  // Should only be called by the code that initializes map to set initial valid
+  // value of the instance descriptor member.
+  inline void init_instance_descriptors();
+
   // [instance descriptors]: describes the object.
   DECL_ACCESSORS(instance_descriptors, DescriptorArray)
 
+  // Sets the instance descriptor array for the map to be an empty descriptor
+  // array.
+  inline void clear_instance_descriptors();
+
   // [stub cache]: contains stubs compiled for this map.
   DECL_ACCESSORS(code_cache, Object)
 
@@ -3846,9 +3930,19 @@
   static const int kInstanceAttributesOffset = kInstanceSizesOffset + kIntSize;
   static const int kPrototypeOffset = kInstanceAttributesOffset + kIntSize;
   static const int kConstructorOffset = kPrototypeOffset + kPointerSize;
-  static const int kInstanceDescriptorsOffset =
+  // Storage for instance descriptors is overloaded to also contain additional
+  // map flags when unused (bit_field3). When the map has instance descriptors,
+  // the flags are transferred to the instance descriptor array and accessed
+  // through an extra indirection.
+  // TODO(1399): It should be possible to make room for bit_field3 in the map
+  // without overloading the instance descriptors field, but the map is
+  // currently perfectly aligned to 32 bytes and extending it at all would
+  // double its size.  After the increment GC work lands, this size restriction
+  // could be loosened and bit_field3 moved directly back in the map.
+  static const int kInstanceDescriptorsOrBitField3Offset =
       kConstructorOffset + kPointerSize;
-  static const int kCodeCacheOffset = kInstanceDescriptorsOffset + kPointerSize;
+  static const int kCodeCacheOffset =
+      kInstanceDescriptorsOrBitField3Offset + kPointerSize;
   static const int kPrototypeTransitionsOffset =
       kCodeCacheOffset + kPointerSize;
   static const int kPadStart = kPrototypeTransitionsOffset + kPointerSize;
@@ -3895,8 +3989,10 @@
   static const int kHasFastElements = 2;
   static const int kStringWrapperSafeForDefaultValueOf = 3;
   static const int kAttachedToSharedFunctionInfo = 4;
-  static const int kIsShared = 5;
-  static const int kHasExternalArrayElements = 6;
+  static const int kHasExternalArrayElements = 5;
+
+  // Bit positions for bit field 3
+  static const int kIsShared = 1;
 
   // Layout of the default cache. It holds alternating name and code objects.
   static const int kCodeCacheEntrySize = 2;
@@ -3913,7 +4009,7 @@
 
 
 // An abstract superclass, a marker class really, for simple structure classes.
-// It doesn't carry much functionality but allows struct classes to me
+// It doesn't carry much functionality but allows struct classes to be
 // identified in the type system.
 class Struct: public HeapObject {
  public:
@@ -3961,7 +4057,7 @@
   DECL_ACCESSORS(context_data, Object)
 
   // [wrapper]: the wrapper cache.
-  DECL_ACCESSORS(wrapper, Proxy)
+  DECL_ACCESSORS(wrapper, Foreign)
 
   // [type]: the script type.
   DECL_ACCESSORS(type, Smi)
@@ -4307,6 +4403,13 @@
   inline bool strict_mode();
   inline void set_strict_mode(bool value);
 
+  // Indicates whether the function is a native ES5 function.
+  // These needs special threatment in .call and .apply since
+  // null passed as the receiver should not be translated to the
+  // global object.
+  inline bool es5_native();
+  inline void set_es5_native(bool value);
+
   // Indicates whether or not the code in the shared function support
   // deoptimization.
   inline bool has_deoptimization_support();
@@ -4314,6 +4417,11 @@
   // Enable deoptimization support through recompiled code.
   void EnableDeoptimizationSupport(Code* recompiled);
 
+  // Disable (further) attempted optimization of all functions sharing this
+  // shared function info.  The function is the one we actually tried to
+  // optimize.
+  void DisableOptimization(JSFunction* function);
+
   // Lookup the bailout ID and ASSERT that it exists in the non-optimized
   // code, returns whether it asserted (i.e., always true if assertions are
   // disabled).
@@ -4487,6 +4595,7 @@
   static const int kCodeAgeMask = 0x7;
   static const int kOptimizationDisabled = 6;
   static const int kStrictModeFunction = 7;
+  static const int kES5Native = 8;
 
  private:
 #if V8_HOST_ARCH_32_BIT
@@ -4500,18 +4609,27 @@
 #endif
 
  public:
-  // Constants for optimizing codegen for strict mode function tests.
+  // Constants for optimizing codegen for strict mode function and
+  // es5 native tests.
   // Allows to use byte-widgh instructions.
   static const int kStrictModeBitWithinByte =
       (kStrictModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
 
+  static const int kES5NativeBitWithinByte =
+      (kES5Native + kCompilerHintsSmiTagSize) % kBitsPerByte;
+
 #if __BYTE_ORDER == __LITTLE_ENDIAN
   static const int kStrictModeByteOffset = kCompilerHintsOffset +
-    (kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
+      (kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
+  static const int kES5NativeByteOffset = kCompilerHintsOffset +
+      (kES5Native + kCompilerHintsSmiTagSize) / kBitsPerByte;
 #elif __BYTE_ORDER == __BIG_ENDIAN
   static const int kStrictModeByteOffset = kCompilerHintsOffset +
-    (kCompilerHintsSize - 1) -
-    ((kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
+      (kCompilerHintsSize - 1) -
+      ((kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
+  static const int kES5NativeByteOffset = kCompilerHintsOffset +
+      (kCompilerHintsSize - 1) -
+      ((kES5Native + kCompilerHintsSmiTagSize) / kBitsPerByte);
 #else
 #error Unknown byte ordering
 #endif
@@ -4685,7 +4803,7 @@
 
 class JSGlobalProxy : public JSObject {
  public:
-  // [context]: the owner global context of this proxy object.
+  // [context]: the owner global context of this global proxy object.
   // It is null value if this object is not used by any context.
   DECL_ACCESSORS(context, Object)
 
@@ -4936,8 +5054,10 @@
 // If it is an atom regexp
 // - a reference to a literal string to search for
 // If it is an irregexp regexp:
-// - a reference to code for ASCII inputs (bytecode or compiled).
-// - a reference to code for UC16 inputs (bytecode or compiled).
+// - a reference to code for ASCII inputs (bytecode or compiled), or a smi
+// used for tracking the last usage (used for code flushing).
+// - a reference to code for UC16 inputs (bytecode or compiled), or a smi
+// used for tracking the last usage (used for code flushing)..
 // - max number of registers used by irregexp implementations.
 // - number of capture registers (output values) of the regexp.
 class JSRegExp: public JSObject {
@@ -4970,6 +5090,12 @@
   inline Object* DataAt(int index);
   // Set implementation data after the object has been prepared.
   inline void SetDataAt(int index, Object* value);
+
+  // Used during GC when flushing code or setting age.
+  inline Object* DataAtUnchecked(int index);
+  inline void SetDataAtUnchecked(int index, Object* value, Heap* heap);
+  inline Type TypeTagUnchecked();
+
   static int code_index(bool is_ascii) {
     if (is_ascii) {
       return kIrregexpASCIICodeIndex;
@@ -4978,6 +5104,14 @@
     }
   }
 
+  static int saved_code_index(bool is_ascii) {
+    if (is_ascii) {
+      return kIrregexpASCIICodeSavedIndex;
+    } else {
+      return kIrregexpUC16CodeSavedIndex;
+    }
+  }
+
   static inline JSRegExp* cast(Object* obj);
 
   // Dispatched behavior.
@@ -5008,11 +5142,19 @@
   // fails, this fields hold an exception object that should be
   // thrown if the regexp is used again.
   static const int kIrregexpUC16CodeIndex = kDataIndex + 1;
+
+  // Saved instance of Irregexp compiled code or bytecode for ASCII that
+  // is a potential candidate for flushing.
+  static const int kIrregexpASCIICodeSavedIndex = kDataIndex + 2;
+  // Saved instance of Irregexp compiled code or bytecode for UC16 that is
+  // a potential candidate for flushing.
+  static const int kIrregexpUC16CodeSavedIndex = kDataIndex + 3;
+
   // Maximal number of registers used by either ASCII or UC16.
   // Only used to check that there is enough stack space
-  static const int kIrregexpMaxRegisterCountIndex = kDataIndex + 2;
+  static const int kIrregexpMaxRegisterCountIndex = kDataIndex + 4;
   // Number of captures in the compiled regexp.
-  static const int kIrregexpCaptureCountIndex = kDataIndex + 3;
+  static const int kIrregexpCaptureCountIndex = kDataIndex + 5;
 
   static const int kIrregexpDataSize = kIrregexpCaptureCountIndex + 1;
 
@@ -5033,6 +5175,18 @@
   static const int kMultilineFieldIndex = 3;
   static const int kLastIndexFieldIndex = 4;
   static const int kInObjectFieldCount = 5;
+
+  // The uninitialized value for a regexp code object.
+  static const int kUninitializedValue = -1;
+
+  // The compilation error value for the regexp code object. The real error
+  // object is in the saved code field.
+  static const int kCompilationErrorValue = -2;
+
+  // When we store the sweep generation at which we moved the code from the
+  // code index to the saved code index we mask it of to be in the [0:255]
+  // range.
+  static const int kCodeAgeMask = 0xff;
 };
 
 
@@ -5889,8 +6043,8 @@
 
   static void PostGarbageCollectionProcessing();
   static int ArchiveSpacePerThread();
-  static char* ArchiveState(char* to);
-  static char* RestoreState(char* from);
+  static char* ArchiveState(Isolate* isolate, char* to);
+  static char* RestoreState(Isolate* isolate, char* from);
   static void Iterate(ObjectVisitor* v);
   static void Iterate(ObjectVisitor* v, Relocatable* top);
   static char* Iterate(ObjectVisitor* v, char* t);
@@ -6044,44 +6198,77 @@
 };
 
 
-
-// Proxy describes objects pointing from JavaScript to C structures.
-// Since they cannot contain references to JS HeapObjects they can be
-// placed in old_data_space.
-class Proxy: public HeapObject {
+// The JSProxy describes EcmaScript Harmony proxies
+class JSProxy: public HeapObject {
  public:
-  // [proxy]: field containing the address.
-  inline Address proxy();
-  inline void set_proxy(Address value);
+  // [handler]: The handler property.
+  DECL_ACCESSORS(handler, Object)
 
   // Casting.
-  static inline Proxy* cast(Object* obj);
+  static inline JSProxy* cast(Object* obj);
 
   // Dispatched behavior.
-  inline void ProxyIterateBody(ObjectVisitor* v);
-
-  template<typename StaticVisitor>
-  inline void ProxyIterateBody();
-
 #ifdef OBJECT_PRINT
-  inline void ProxyPrint() {
-    ProxyPrint(stdout);
+  inline void JSProxyPrint() {
+    JSProxyPrint(stdout);
   }
-  void ProxyPrint(FILE* out);
+  void JSProxyPrint(FILE* out);
 #endif
 #ifdef DEBUG
-  void ProxyVerify();
+  void JSProxyVerify();
+#endif
+
+  // Layout description.
+  static const int kHandlerOffset = HeapObject::kHeaderSize;
+  static const int kSize = kHandlerOffset + kPointerSize;
+
+  typedef FixedBodyDescriptor<kHandlerOffset,
+                              kHandlerOffset + kPointerSize,
+                              kSize> BodyDescriptor;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxy);
+};
+
+
+
+// Foreign describes objects pointing from JavaScript to C structures.
+// Since they cannot contain references to JS HeapObjects they can be
+// placed in old_data_space.
+class Foreign: public HeapObject {
+ public:
+  // [address]: field containing the address.
+  inline Address address();
+  inline void set_address(Address value);
+
+  // Casting.
+  static inline Foreign* cast(Object* obj);
+
+  // Dispatched behavior.
+  inline void ForeignIterateBody(ObjectVisitor* v);
+
+  template<typename StaticVisitor>
+  inline void ForeignIterateBody();
+
+#ifdef OBJECT_PRINT
+  inline void ForeignPrint() {
+    ForeignPrint(stdout);
+  }
+  void ForeignPrint(FILE* out);
+#endif
+#ifdef DEBUG
+  void ForeignVerify();
 #endif
 
   // Layout description.
 
-  static const int kProxyOffset = HeapObject::kHeaderSize;
-  static const int kSize = kProxyOffset + kPointerSize;
+  static const int kAddressOffset = HeapObject::kHeaderSize;
+  static const int kSize = kAddressOffset + kPointerSize;
 
-  STATIC_CHECK(kProxyOffset == Internals::kProxyProxyOffset);
+  STATIC_CHECK(kAddressOffset == Internals::kForeignAddressOffset);
 
  private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(Proxy);
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Foreign);
 };
 
 
diff --git a/src/parser.cc b/src/parser.cc
index ce9b7c3..7ad6440 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -28,7 +28,7 @@
 #include "v8.h"
 
 #include "api.h"
-#include "ast.h"
+#include "ast-inl.h"
 #include "bootstrapper.h"
 #include "codegen.h"
 #include "compiler.h"
@@ -41,8 +41,6 @@
 #include "scopeinfo.h"
 #include "string-stream.h"
 
-#include "ast-inl.h"
-
 namespace v8 {
 namespace internal {
 
@@ -129,7 +127,7 @@
 void RegExpBuilder::AddCharacter(uc16 c) {
   pending_empty_ = false;
   if (characters_ == NULL) {
-    characters_ = new ZoneList<uc16>(4);
+    characters_ = new(zone()) ZoneList<uc16>(4);
   }
   characters_->Add(c);
   LAST(ADD_CHAR);
@@ -594,7 +592,7 @@
 FunctionLiteral* Parser::ParseProgram(Handle<String> source,
                                       bool in_global_context,
                                       StrictModeFlag strict_mode) {
-  CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
+  CompilationZoneScope zone_scope(isolate(), DONT_DELETE_ON_EXIT);
 
   HistogramTimerScope timer(isolate()->counters()->parse());
   isolate()->counters()->total_parse_size()->Increment(source->length());
@@ -641,7 +639,7 @@
     if (strict_mode == kStrictMode) {
       top_scope_->EnableStrictMode();
     }
-    ZoneList<Statement*>* body = new ZoneList<Statement*>(16);
+    ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16);
     bool ok = true;
     int beg_loc = scanner().location().beg_pos;
     ParseSourceElements(body, Token::EOS, &ok);
@@ -676,7 +674,7 @@
 }
 
 FunctionLiteral* Parser::ParseLazy(CompilationInfo* info) {
-  CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
+  CompilationZoneScope zone_scope(isolate(), DONT_DELETE_ON_EXIT);
   HistogramTimerScope timer(isolate()->counters()->parse_lazy());
   Handle<String> source(String::cast(script_->source()));
   isolate()->counters()->total_parse_size()->Increment(source->length());
@@ -1051,9 +1049,10 @@
     if (names_ == NULL) {
       ASSERT(assigned_arguments_ == NULL);
       ASSERT(assigned_constants_ == NULL);
-      names_ = new ZoneStringList(4);
-      assigned_arguments_ = new ZoneList<int>(4);
-      assigned_constants_ = new ZoneObjectList(4);
+      Zone* zone = isolate_->zone();
+      names_ = new(zone) ZoneStringList(4);
+      assigned_arguments_ = new(zone) ZoneList<int>(4);
+      assigned_constants_ = new(zone) ZoneObjectList(4);
     }
   }
 
@@ -1303,7 +1302,10 @@
   // to the corresponding activation frame at runtime if necessary.
   // For instance declarations inside an eval scope need to be added
   // to the calling function context.
-  if (top_scope_->is_function_scope()) {
+  // Similarly, strict mode eval scope does not leak variable declarations to
+  // the caller's scope so we declare all locals, too.
+  if (top_scope_->is_function_scope() ||
+      top_scope_->is_strict_mode_eval_scope()) {
     // Declare the variable in the function scope.
     var = top_scope_->LocalLookup(name);
     if (var == NULL) {
@@ -1652,7 +1654,7 @@
 
     if (top_scope_->is_global_scope()) {
       // Compute the arguments for the runtime call.
-      ZoneList<Expression*>* arguments = new ZoneList<Expression*>(3);
+      ZoneList<Expression*>* arguments = new(zone()) ZoneList<Expression*>(3);
       // We have at least 1 parameter.
       arguments->Add(new(zone()) Literal(name));
       CallRuntime* initialize;
@@ -1769,7 +1771,7 @@
       *ok = false;
       return NULL;
     }
-    if (labels == NULL) labels = new ZoneStringList(4);
+    if (labels == NULL) labels = new(zone()) ZoneStringList(4);
     labels->Add(label);
     // Remove the "ghost" variable that turned out to be a label
     // from the top scope. This way, we don't try to resolve it
@@ -1910,7 +1912,7 @@
                           bool is_catch_block,
                           bool* ok) {
   // Parse the statement and collect escaping labels.
-  ZoneList<Label*>* target_list = new ZoneList<Label*>(0);
+  ZoneList<Label*>* target_list = new(zone()) ZoneList<Label*>(0);
   TargetCollector collector(target_list);
   Statement* stat;
   { Target target(&this->target_stack_, &collector);
@@ -1985,7 +1987,7 @@
   }
   Expect(Token::COLON, CHECK_OK);
   int pos = scanner().location().beg_pos;
-  ZoneList<Statement*>* statements = new ZoneList<Statement*>(5);
+  ZoneList<Statement*>* statements = new(zone()) ZoneList<Statement*>(5);
   while (peek() != Token::CASE &&
          peek() != Token::DEFAULT &&
          peek() != Token::RBRACE) {
@@ -2011,7 +2013,7 @@
   Expect(Token::RPAREN, CHECK_OK);
 
   bool default_seen = false;
-  ZoneList<CaseClause*>* cases = new ZoneList<CaseClause*>(4);
+  ZoneList<CaseClause*>* cases = new(zone()) ZoneList<CaseClause*>(4);
   Expect(Token::LBRACE, CHECK_OK);
   while (peek() != Token::RBRACE) {
     CaseClause* clause = ParseCaseClause(&default_seen, CHECK_OK);
@@ -2056,7 +2058,7 @@
 
   Expect(Token::TRY, CHECK_OK);
 
-  ZoneList<Label*>* target_list = new ZoneList<Label*>(0);
+  ZoneList<Label*>* target_list = new(zone()) ZoneList<Label*>(0);
   TargetCollector collector(target_list);
   Block* try_block;
 
@@ -2079,7 +2081,7 @@
   // then we will need to collect jump targets from the catch block. Since
   // we don't know yet if there will be a finally block, we always collect
   // the jump targets.
-  ZoneList<Label*>* catch_target_list = new ZoneList<Label*>(0);
+  ZoneList<Label*>* catch_target_list = new(zone()) ZoneList<Label*>(0);
   TargetCollector catch_collector(catch_target_list);
   bool has_catch = false;
   if (tok == Token::CATCH) {
@@ -2490,7 +2492,7 @@
         x = NewCompareNode(cmp, x, y, position);
         if (cmp != op) {
           // The comparison was negated - add a NOT.
-          x = new(zone()) UnaryOperation(Token::NOT, x);
+          x = new(zone()) UnaryOperation(Token::NOT, x, position);
         }
 
       } else {
@@ -2540,6 +2542,7 @@
   Token::Value op = peek();
   if (Token::IsUnaryOp(op)) {
     op = Next();
+    int position = scanner().location().beg_pos;
     Expression* expression = ParseUnaryExpression(CHECK_OK);
 
     // Compute some expressions involving only number literals.
@@ -2567,7 +2570,7 @@
       }
     }
 
-    return new(zone()) UnaryOperation(op, expression);
+    return new(zone()) UnaryOperation(op, expression, position);
 
   } else if (Token::IsCountOp(op)) {
     op = Next();
@@ -2724,7 +2727,9 @@
 
   if (!stack->is_empty()) {
     int last = stack->pop();
-    result = new(zone()) CallNew(result, new ZoneList<Expression*>(0), last);
+    result = new(zone()) CallNew(result,
+                                 new(zone()) ZoneList<Expression*>(0),
+                                 last);
   }
   return result;
 }
@@ -2991,7 +2996,7 @@
   // ArrayLiteral ::
   //   '[' Expression? (',' Expression?)* ']'
 
-  ZoneList<Expression*>* values = new ZoneList<Expression*>(4);
+  ZoneList<Expression*>* values = new(zone()) ZoneList<Expression*>(4);
   Expect(Token::LBRACK, CHECK_OK);
   while (peek() != Token::RBRACK) {
     Expression* elem;
@@ -3333,7 +3338,7 @@
   //    )*[','] '}'
 
   ZoneList<ObjectLiteral::Property*>* properties =
-      new ZoneList<ObjectLiteral::Property*>(4);
+      new(zone()) ZoneList<ObjectLiteral::Property*>(4);
   int number_of_boilerplate_properties = 0;
   bool has_function = false;
 
@@ -3495,7 +3500,7 @@
   // Arguments ::
   //   '(' (AssignmentExpression)*[','] ')'
 
-  ZoneList<Expression*>* result = new ZoneList<Expression*>(4);
+  ZoneList<Expression*>* result = new(zone()) ZoneList<Expression*>(4);
   Expect(Token::LPAREN, CHECK_OK);
   bool done = (peek() == Token::RPAREN);
   while (!done) {
@@ -3538,7 +3543,7 @@
 
   int num_parameters = 0;
   Scope* scope = NewScope(top_scope_, Scope::FUNCTION_SCOPE, inside_with());
-  ZoneList<Statement*>* body = new ZoneList<Statement*>(8);
+  ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(8);
   int materialized_literal_count;
   int expected_property_count;
   int start_pos;
@@ -3553,9 +3558,9 @@
     //    '(' (Identifier)*[','] ')'
     Expect(Token::LPAREN, CHECK_OK);
     start_pos = scanner().location().beg_pos;
-    Scanner::Location name_loc = Scanner::NoLocation();
-    Scanner::Location dupe_loc = Scanner::NoLocation();
-    Scanner::Location reserved_loc = Scanner::NoLocation();
+    Scanner::Location name_loc = Scanner::Location::invalid();
+    Scanner::Location dupe_loc = Scanner::Location::invalid();
+    Scanner::Location reserved_loc = Scanner::Location::invalid();
 
     bool done = (peek() == Token::RPAREN);
     while (!done) {
@@ -3876,12 +3881,14 @@
 }
 
 
-// Checks whether octal literal last seen is between beg_pos and end_pos.
-// If so, reports an error.
+// Checks whether an octal literal was last seen between beg_pos and end_pos.
+// If so, reports an error. Only called for strict mode.
 void Parser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
-  int octal = scanner().octal_position();
-  if (beg_pos <= octal && octal <= end_pos) {
-    ReportMessageAt(Scanner::Location(octal, octal + 1), "strict_octal_literal",
+  Scanner::Location octal = scanner().octal_position();
+  if (octal.IsValid() &&
+      beg_pos <= octal.beg_pos &&
+      octal.end_pos <= end_pos) {
+    ReportMessageAt(octal, "strict_octal_literal",
                     Vector<const char*>::empty());
     scanner().clear_octal_position();
     *ok = false;
@@ -4009,7 +4016,7 @@
   Handle<JSArray> array = isolate()->factory()->NewJSArrayWithElements(elements,
                                                                        TENURED);
 
-  ZoneList<Expression*>* args = new ZoneList<Expression*>(2);
+  ZoneList<Expression*>* args = new(zone()) ZoneList<Expression*>(2);
   args->Add(new(zone()) Literal(type));
   args->Add(new(zone()) Literal(array));
   return new(zone()) Throw(new(zone()) CallRuntime(constructor, NULL, args),
@@ -4017,186 +4024,6 @@
 }
 
 // ----------------------------------------------------------------------------
-// JSON
-
-Handle<Object> JsonParser::ParseJson(Handle<String> script,
-                                     UC16CharacterStream* source) {
-  scanner_.Initialize(source);
-  stack_overflow_ = false;
-  Handle<Object> result = ParseJsonValue();
-  if (result.is_null() || scanner_.Next() != Token::EOS) {
-    if (stack_overflow_) {
-      // Scanner failed.
-      isolate()->StackOverflow();
-    } else {
-      // Parse failed. Scanner's current token is the unexpected token.
-      Token::Value token = scanner_.current_token();
-
-      const char* message;
-      const char* name_opt = NULL;
-
-      switch (token) {
-        case Token::EOS:
-          message = "unexpected_eos";
-          break;
-        case Token::NUMBER:
-          message = "unexpected_token_number";
-          break;
-        case Token::STRING:
-          message = "unexpected_token_string";
-          break;
-        case Token::IDENTIFIER:
-        case Token::FUTURE_RESERVED_WORD:
-          message = "unexpected_token_identifier";
-          break;
-        default:
-          message = "unexpected_token";
-          name_opt = Token::String(token);
-          ASSERT(name_opt != NULL);
-          break;
-      }
-
-      Scanner::Location source_location = scanner_.location();
-      Factory* factory = isolate()->factory();
-      MessageLocation location(factory->NewScript(script),
-                               source_location.beg_pos,
-                               source_location.end_pos);
-      Handle<JSArray> array;
-      if (name_opt == NULL) {
-        array = factory->NewJSArray(0);
-      } else {
-        Handle<String> name = factory->NewStringFromUtf8(CStrVector(name_opt));
-        Handle<FixedArray> element = factory->NewFixedArray(1);
-        element->set(0, *name);
-        array = factory->NewJSArrayWithElements(element);
-      }
-      Handle<Object> result = factory->NewSyntaxError(message, array);
-      isolate()->Throw(*result, &location);
-      return Handle<Object>::null();
-    }
-  }
-  return result;
-}
-
-
-Handle<String> JsonParser::GetString() {
-  int literal_length = scanner_.literal_length();
-  if (literal_length == 0) {
-    return isolate()->factory()->empty_string();
-  }
-  if (scanner_.is_literal_ascii()) {
-    return isolate()->factory()->NewStringFromAscii(
-        scanner_.literal_ascii_string());
-  } else {
-    return isolate()->factory()->NewStringFromTwoByte(
-        scanner_.literal_uc16_string());
-  }
-}
-
-
-// Parse any JSON value.
-Handle<Object> JsonParser::ParseJsonValue() {
-  Token::Value token = scanner_.Next();
-  switch (token) {
-    case Token::STRING:
-      return GetString();
-    case Token::NUMBER:
-      return isolate()->factory()->NewNumber(scanner_.number());
-    case Token::FALSE_LITERAL:
-      return isolate()->factory()->false_value();
-    case Token::TRUE_LITERAL:
-      return isolate()->factory()->true_value();
-    case Token::NULL_LITERAL:
-      return isolate()->factory()->null_value();
-    case Token::LBRACE:
-      return ParseJsonObject();
-    case Token::LBRACK:
-      return ParseJsonArray();
-    default:
-      return ReportUnexpectedToken();
-  }
-}
-
-
-// Parse a JSON object. Scanner must be right after '{' token.
-Handle<Object> JsonParser::ParseJsonObject() {
-  Handle<JSFunction> object_constructor(
-      isolate()->global_context()->object_function());
-  Handle<JSObject> json_object =
-      isolate()->factory()->NewJSObject(object_constructor);
-  if (scanner_.peek() == Token::RBRACE) {
-    scanner_.Next();
-  } else {
-    if (StackLimitCheck(isolate()).HasOverflowed()) {
-      stack_overflow_ = true;
-      return Handle<Object>::null();
-    }
-    do {
-      if (scanner_.Next() != Token::STRING) {
-        return ReportUnexpectedToken();
-      }
-      Handle<String> key = GetString();
-      if (scanner_.Next() != Token::COLON) {
-        return ReportUnexpectedToken();
-      }
-      Handle<Object> value = ParseJsonValue();
-      if (value.is_null()) return Handle<Object>::null();
-      uint32_t index;
-      if (key->AsArrayIndex(&index)) {
-        SetOwnElement(json_object, index, value, kNonStrictMode);
-      } else if (key->Equals(isolate()->heap()->Proto_symbol())) {
-        // We can't remove the __proto__ accessor since it's hardcoded
-        // in several places. Instead go along and add the value as
-        // the prototype of the created object if possible.
-        SetPrototype(json_object, value);
-      } else {
-        SetLocalPropertyIgnoreAttributes(json_object, key, value, NONE);
-      }
-    } while (scanner_.Next() == Token::COMMA);
-    if (scanner_.current_token() != Token::RBRACE) {
-      return ReportUnexpectedToken();
-    }
-  }
-  return json_object;
-}
-
-
-// Parse a JSON array. Scanner must be right after '[' token.
-Handle<Object> JsonParser::ParseJsonArray() {
-  ZoneScope zone_scope(DELETE_ON_EXIT);
-  ZoneList<Handle<Object> > elements(4);
-
-  Token::Value token = scanner_.peek();
-  if (token == Token::RBRACK) {
-    scanner_.Next();
-  } else {
-    if (StackLimitCheck(isolate()).HasOverflowed()) {
-      stack_overflow_ = true;
-      return Handle<Object>::null();
-    }
-    do {
-      Handle<Object> element = ParseJsonValue();
-      if (element.is_null()) return Handle<Object>::null();
-      elements.Add(element);
-      token = scanner_.Next();
-    } while (token == Token::COMMA);
-    if (token != Token::RBRACK) {
-      return ReportUnexpectedToken();
-    }
-  }
-
-  // Allocate a fixed array with all the elements.
-  Handle<FixedArray> fast_elements =
-      isolate()->factory()->NewFixedArray(elements.length());
-
-  for (int i = 0, n = elements.length(); i < n; i++) {
-    fast_elements->set(i, *elements[i]);
-  }
-
-  return isolate()->factory()->NewJSArrayWithElements(fast_elements);
-}
-
-// ----------------------------------------------------------------------------
 // Regular expressions
 
 
@@ -4383,7 +4210,8 @@
     case '.': {
       Advance();
       // everything except \x0a, \x0d, \u2028 and \u2029
-      ZoneList<CharacterRange>* ranges = new ZoneList<CharacterRange>(2);
+      ZoneList<CharacterRange>* ranges =
+          new(zone()) ZoneList<CharacterRange>(2);
       CharacterRange::AddClassEscape('.', ranges);
       RegExpTree* atom = new(zone()) RegExpCharacterClass(ranges, false);
       builder->AddAtom(atom);
@@ -4410,7 +4238,7 @@
         Advance(2);
       } else {
         if (captures_ == NULL) {
-          captures_ = new ZoneList<RegExpCapture*>(2);
+          captures_ = new(zone()) ZoneList<RegExpCapture*>(2);
         }
         if (captures_started() >= kMaxCaptures) {
           ReportError(CStrVector("Too many captures") CHECK_FAILED);
@@ -4453,7 +4281,8 @@
       case 'd': case 'D': case 's': case 'S': case 'w': case 'W': {
         uc32 c = Next();
         Advance(2);
-        ZoneList<CharacterRange>* ranges = new ZoneList<CharacterRange>(2);
+        ZoneList<CharacterRange>* ranges =
+            new(zone()) ZoneList<CharacterRange>(2);
         CharacterRange::AddClassEscape(c, ranges);
         RegExpTree* atom = new(zone()) RegExpCharacterClass(ranges, false);
         builder->AddAtom(atom);
@@ -4949,7 +4778,7 @@
     is_negated = true;
     Advance();
   }
-  ZoneList<CharacterRange>* ranges = new ZoneList<CharacterRange>(2);
+  ZoneList<CharacterRange>* ranges = new(zone()) ZoneList<CharacterRange>(2);
   while (has_more() && current() != ']') {
     uc16 char_class = kNoCharClass;
     CharacterRange first = ParseClassAtom(&char_class CHECK_FAILED);
diff --git a/src/parser.h b/src/parser.h
index 64f1303..a7132ce 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -32,6 +32,7 @@
 #include "ast.h"
 #include "scanner.h"
 #include "scopes.h"
+#include "preparse-data-format.h"
 #include "preparse-data.h"
 
 namespace v8 {
@@ -759,66 +760,6 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(CompileTimeValue);
 };
 
-
-// ----------------------------------------------------------------------------
-// JSON PARSING
-
-// JSON is a subset of JavaScript, as specified in, e.g., the ECMAScript 5
-// specification section 15.12.1 (and appendix A.8).
-// The grammar is given section 15.12.1.2 (and appendix A.8.2).
-class JsonParser BASE_EMBEDDED {
- public:
-  // Parse JSON input as a single JSON value.
-  // Returns null handle and sets exception if parsing failed.
-  static Handle<Object> Parse(Handle<String> source) {
-    if (source->IsExternalTwoByteString()) {
-      ExternalTwoByteStringUC16CharacterStream stream(
-          Handle<ExternalTwoByteString>::cast(source), 0, source->length());
-      return JsonParser().ParseJson(source, &stream);
-    } else {
-      GenericStringUC16CharacterStream stream(source, 0, source->length());
-      return JsonParser().ParseJson(source, &stream);
-    }
-  }
-
- private:
-  JsonParser()
-      : isolate_(Isolate::Current()),
-        scanner_(isolate_->unicode_cache()) { }
-  ~JsonParser() { }
-
-  Isolate* isolate() { return isolate_; }
-
-  // Parse a string containing a single JSON value.
-  Handle<Object> ParseJson(Handle<String> script, UC16CharacterStream* source);
-  // Parse a single JSON value from input (grammar production JSONValue).
-  // A JSON value is either a (double-quoted) string literal, a number literal,
-  // one of "true", "false", or "null", or an object or array literal.
-  Handle<Object> ParseJsonValue();
-  // Parse a JSON object literal (grammar production JSONObject).
-  // An object literal is a squiggly-braced and comma separated sequence
-  // (possibly empty) of key/value pairs, where the key is a JSON string
-  // literal, the value is a JSON value, and the two are separated by a colon.
-  // A JSON array dosn't allow numbers and identifiers as keys, like a
-  // JavaScript array.
-  Handle<Object> ParseJsonObject();
-  // Parses a JSON array literal (grammar production JSONArray). An array
-  // literal is a square-bracketed and comma separated sequence (possibly empty)
-  // of JSON values.
-  // A JSON array doesn't allow leaving out values from the sequence, nor does
-  // it allow a terminal comma, like a JavaScript array does.
-  Handle<Object> ParseJsonArray();
-
-  // Mark that a parsing error has happened at the current token, and
-  // return a null handle. Primarily for readability.
-  Handle<Object> ReportUnexpectedToken() { return Handle<Object>::null(); }
-  // Converts the currently parsed literal to a JavaScript String.
-  Handle<String> GetString();
-
-  Isolate* isolate_;
-  JsonScanner scanner_;
-  bool stack_overflow_;
-};
 } }  // namespace v8::internal
 
 #endif  // V8_PARSER_H_
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 8b83f2b..99264d2 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -52,6 +52,7 @@
 #undef MAP_TYPE
 
 #include "v8.h"
+#include "v8threads.h"
 
 #include "platform.h"
 #include "vm-state-inl.h"
@@ -397,31 +398,6 @@
 };
 
 
-ThreadHandle::ThreadHandle(Kind kind) {
-  data_ = new PlatformData(kind);
-}
-
-
-void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
-  data_->Initialize(kind);
-}
-
-
-ThreadHandle::~ThreadHandle() {
-  delete data_;
-}
-
-
-bool ThreadHandle::IsSelf() const {
-  return pthread_equal(data_->thread_, pthread_self());
-}
-
-
-bool ThreadHandle::IsValid() const {
-  return data_->thread_ != kNoThread;
-}
-
-
 Thread::Thread(Isolate* isolate, const Options& options)
     : data_(new PlatformData),
       isolate_(isolate),
@@ -448,8 +424,8 @@
   // This is also initialized by the first argument to pthread_create() but we
   // don't know which thread will run first (the original thread or the new
   // one) so we initialize it here too.
-  thread_->data_->thread_ = pthread_self();
-  ASSERT(thread->IsValid());
+  thread->data()->thread_ = pthread_self();
+  ASSERT(thread->data()->thread_ != kNoThread);
   Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
   thread->Run();
   return NULL;
@@ -470,13 +446,13 @@
     pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
     attr_ptr = &attr;
   }
-  pthread_create(&thread_handle_data()->thread_, attr_ptr, ThreadEntry, this);
-  ASSERT(IsValid());
+  pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
+  ASSERT(data_->thread_ != kNoThread);
 }
 
 
 void Thread::Join() {
-  pthread_join(thread_handle_data()->thread_, NULL);
+  pthread_join(data_->thread_, NULL);
 }
 
 
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index c60658f..d4d772c 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -87,18 +87,30 @@
   uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
   srandom(static_cast<unsigned int>(seed));
   limit_mutex = CreateMutex();
+
+#ifdef __arm__
+  // When running on ARM hardware check that the EABI used by V8 and
+  // by the C code is the same.
+  bool hard_float = OS::ArmUsingHardFloat();
+  if (hard_float) {
+#if !USE_EABI_HARDFLOAT
+    PrintF("ERROR: Binary compiled with -mfloat-abi=hard but without "
+           "-DUSE_EABI_HARDFLOAT\n");
+    exit(1);
+#endif
+  } else {
+#if USE_EABI_HARDFLOAT
+    PrintF("ERROR: Binary not compiled with -mfloat-abi=hard but with "
+           "-DUSE_EABI_HARDFLOAT\n");
+    exit(1);
+#endif
+  }
+#endif
 }
 
 
 uint64_t OS::CpuFeaturesImpliedByPlatform() {
-#if (defined(__VFP_FP__) && !defined(__SOFTFP__))
-  // Here gcc is telling us that we are on an ARM and gcc is assuming
-  // that we have VFP3 instructions.  If gcc can assume it then so can
-  // we. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
-  return 1u << VFP3 | 1u << ARMv7;
-#elif CAN_USE_ARMV7_INSTRUCTIONS
-  return 1u << ARMv7;
-#elif(defined(__mips_hard_float) && __mips_hard_float != 0)
+#if(defined(__mips_hard_float) && __mips_hard_float != 0)
     // Here gcc is telling us that we are on an MIPS and gcc is assuming that we
     // have FPU instructions.  If gcc can assume it then so can we.
     return 1u << FPU;
@@ -142,6 +154,7 @@
   return false;
 }
 
+
 bool OS::ArmCpuHasFeature(CpuFeature feature) {
   const char* search_string = NULL;
   // Simple detection of VFP at runtime for Linux.
@@ -177,6 +190,50 @@
 
   return false;
 }
+
+
+// Simple helper function to detect whether the C code is compiled with
+// option -mfloat-abi=hard. The register d0 is loaded with 1.0 and the register
+// pair r0, r1 is loaded with 0.0. If -mfloat-abi=hard is pased to GCC then
+// calling this will return 1.0 and otherwise 0.0.
+static void ArmUsingHardFloatHelper() {
+  asm("mov r0, #0");
+#if defined(__VFP_FP__) && !defined(__SOFTFP__)
+  // Load 0x3ff00000 into r1 using instructions available in both ARM
+  // and Thumb mode.
+  asm("mov r1, #3");
+  asm("mov r2, #255");
+  asm("lsl r1, r1, #8");
+  asm("orr r1, r1, r2");
+  asm("lsl r1, r1, #20");
+  // For vmov d0, r0, r1 use ARM mode.
+#ifdef __thumb__
+  asm volatile(
+    "@   Enter ARM Mode  \n\t"
+    "    adr r3, 1f      \n\t"
+    "    bx  r3          \n\t"
+    "    .ALIGN 4        \n\t"
+    "    .ARM            \n"
+    "1:  vmov d0, r0, r1 \n\t"
+    "@   Enter THUMB Mode\n\t"
+    "    adr r3, 2f+1    \n\t"
+    "    bx  r3          \n\t"
+    "    .THUMB          \n"
+    "2:                  \n\t");
+#else
+  asm("vmov d0, r0, r1");
+#endif  // __thumb__
+#endif  // defined(__VFP_FP__) && !defined(__SOFTFP__)
+  asm("mov r1, #0");
+}
+
+
+bool OS::ArmUsingHardFloat() {
+  // Cast helper function from returning void to returning double.
+  typedef double (*F)();
+  F f = FUNCTION_CAST<F>(FUNCTION_ADDR(ArmUsingHardFloatHelper));
+  return f() == 1.0;
+}
 #endif  // def __arm__
 
 
diff --git a/src/platform-nullos.cc b/src/platform-nullos.cc
index aacad14..295482d 100644
--- a/src/platform-nullos.cc
+++ b/src/platform-nullos.cc
@@ -186,6 +186,11 @@
 }
 
 
+bool OS::ArmUsingHardFloat() {
+  UNIMPLEMENTED();
+}
+
+
 bool OS::IsOutsideAllocatedSpace(void* address) {
   UNIMPLEMENTED();
   return false;
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 8673f04..390d3d9 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -407,13 +407,11 @@
   }
 
   // Make standard and DST timezone names.
-  OS::SNPrintF(Vector<char>(std_tz_name_, kTzNameSize),
-               "%S",
-               tzinfo_.StandardName);
+  WideCharToMultiByte(CP_UTF8, 0, tzinfo_.StandardName, -1,
+                      std_tz_name_, kTzNameSize, NULL, NULL);
   std_tz_name_[kTzNameSize - 1] = '\0';
-  OS::SNPrintF(Vector<char>(dst_tz_name_, kTzNameSize),
-               "%S",
-               tzinfo_.DaylightName);
+  WideCharToMultiByte(CP_UTF8, 0, tzinfo_.DaylightName, -1,
+                      dst_tz_name_, kTzNameSize, NULL, NULL);
   dst_tz_name_[kTzNameSize - 1] = '\0';
 
   // If OS returned empty string or resource id (like "@tzres.dll,-211")
diff --git a/src/platform.h b/src/platform.h
index fc417ef..725008a 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -294,6 +294,10 @@
   // Support runtime detection of VFP3 on ARM CPUs.
   static bool ArmCpuHasFeature(CpuFeature feature);
 
+  // Support runtime detection of whether the hard float option of the
+  // EABI is used.
+  static bool ArmUsingHardFloat();
+
   // Support runtime detection of FPU on MIPS CPUs.
   static bool MipsCpuHasFeature(CpuFeature feature);
 
diff --git a/src/mips/virtual-frame-mips-inl.h b/src/preparse-data-format.h
similarity index 62%
copy from src/mips/virtual-frame-mips-inl.h
copy to src/preparse-data-format.h
index f0d2fab..64c4f18 100644
--- a/src/mips/virtual-frame-mips-inl.h
+++ b/src/preparse-data-format.h
@@ -25,34 +25,38 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef V8_VIRTUAL_FRAME_MIPS_INL_H_
-#define V8_VIRTUAL_FRAME_MIPS_INL_H_
-
-#include "assembler-mips.h"
-#include "virtual-frame-mips.h"
+#ifndef V8_PREPARSE_DATA_FORMAT_H_
+#define V8_PREPARSE_DATA_FORMAT_H_
 
 namespace v8 {
 namespace internal {
 
+// Generic and general data used by preparse data recorders and readers.
 
-MemOperand VirtualFrame::ParameterAt(int index) {
-  UNIMPLEMENTED_MIPS();
-  return MemOperand(zero_reg, 0);
-}
+struct PreparseDataConstants {
+ public:
+  // Layout and constants of the preparse data exchange format.
+  static const unsigned kMagicNumber = 0xBadDead;
+  static const unsigned kCurrentVersion = 6;
+
+  static const int kMagicOffset = 0;
+  static const int kVersionOffset = 1;
+  static const int kHasErrorOffset = 2;
+  static const int kFunctionsSizeOffset = 3;
+  static const int kSymbolCountOffset = 4;
+  static const int kSizeOffset = 5;
+  static const int kHeaderSize = 6;
+
+  // If encoding a message, the following positions are fixed.
+  static const int kMessageStartPos = 0;
+  static const int kMessageEndPos = 1;
+  static const int kMessageArgCountPos = 2;
+  static const int kMessageTextPos = 3;
+
+  static const unsigned char kNumberTerminator = 0x80u;
+};
 
 
-// The receiver frame slot.
-MemOperand VirtualFrame::Receiver() {
-  UNIMPLEMENTED_MIPS();
-  return MemOperand(zero_reg, 0);
-}
+} }  // namespace v8::internal.
 
-
-void VirtualFrame::Forget(int count) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_VIRTUAL_FRAME_MIPS_INL_H_
+#endif  // V8_PREPARSE_DATA_FORMAT_H_
diff --git a/src/preparse-data.cc b/src/preparse-data.cc
index 92a0338..98c343e 100644
--- a/src/preparse-data.cc
+++ b/src/preparse-data.cc
@@ -26,15 +26,13 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include "../include/v8stdint.h"
-#include "globals.h"
-#include "checks.h"
-#include "allocation.h"
-#include "utils.h"
-#include "list-inl.h"
-#include "hashmap.h"
 
+#include "preparse-data-format.h"
 #include "preparse-data.h"
 
+#include "checks.h"
+#include "globals.h"
+#include "hashmap.h"
 
 namespace v8 {
 namespace internal {
@@ -76,7 +74,7 @@
   function_store_.Add((arg_opt == NULL) ? 0 : 1);
   STATIC_ASSERT(PreparseDataConstants::kMessageTextPos == 3);
   WriteString(CStrVector(message));
-  if (arg_opt) WriteString(CStrVector(arg_opt));
+  if (arg_opt != NULL) WriteString(CStrVector(arg_opt));
   is_recording_ = false;
 }
 
diff --git a/src/preparse-data.h b/src/preparse-data.h
index bb5707b..551d2e8 100644
--- a/src/preparse-data.h
+++ b/src/preparse-data.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,40 +25,16 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef V8_PREPARSER_DATA_H_
-#define V8_PREPARSER_DATA_H_
+#ifndef V8_PREPARSE_DATA_H_
+#define V8_PREPARSE_DATA_H_
 
+#include "allocation.h"
 #include "hashmap.h"
+#include "utils-inl.h"
 
 namespace v8 {
 namespace internal {
 
-// Generic and general data used by preparse data recorders and readers.
-
-class PreparseDataConstants : public AllStatic {
- public:
-  // Layout and constants of the preparse data exchange format.
-  static const unsigned kMagicNumber = 0xBadDead;
-  static const unsigned kCurrentVersion = 6;
-
-  static const int kMagicOffset = 0;
-  static const int kVersionOffset = 1;
-  static const int kHasErrorOffset = 2;
-  static const int kFunctionsSizeOffset = 3;
-  static const int kSymbolCountOffset = 4;
-  static const int kSizeOffset = 5;
-  static const int kHeaderSize = 6;
-
-  // If encoding a message, the following positions are fixed.
-  static const int kMessageStartPos = 0;
-  static const int kMessageEndPos = 1;
-  static const int kMessageArgCountPos = 2;
-  static const int kMessageTextPos = 3;
-
-  static const byte kNumberTerminator = 0x80u;
-};
-
-
 // ----------------------------------------------------------------------------
 // ParserRecorder - Logging of preparser data.
 
@@ -246,4 +222,4 @@
 
 } }  // namespace v8::internal.
 
-#endif  // V8_PREPARSER_DATA_H_
+#endif  // V8_PREPARSE_DATA_H_
diff --git a/src/preparser-api.cc b/src/preparser-api.cc
index 9646eb6..a0d13ed 100644
--- a/src/preparser-api.cc
+++ b/src/preparser-api.cc
@@ -33,6 +33,7 @@
 #include "utils.h"
 #include "list.h"
 #include "scanner-base.h"
+#include "preparse-data-format.h"
 #include "preparse-data.h"
 #include "preparser.h"
 
diff --git a/src/preparser.cc b/src/preparser.cc
index fec1567..86db379 100644
--- a/src/preparser.cc
+++ b/src/preparser.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -34,6 +34,7 @@
 #include "list.h"
 
 #include "scanner-base.h"
+#include "preparse-data-format.h"
 #include "preparse-data.h"
 #include "preparser.h"
 
@@ -55,13 +56,6 @@
 
 namespace i = ::v8::internal;
 
-#define CHECK_OK  ok);  \
-  if (!*ok) return -1;  \
-  ((void)0
-#define DUMMY )  // to make indentation work
-#undef DUMMY
-
-
 void PreParser::ReportUnexpectedToken(i::Token::Value token) {
   // We don't report stack overflows here, to avoid increasing the
   // stack depth even further.  Instead we report it after parsing is
@@ -94,18 +88,53 @@
 }
 
 
+// Checks whether octal literal last seen is between beg_pos and end_pos.
+// If so, reports an error.
+void PreParser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
+  i::Scanner::Location octal = scanner_->octal_position();
+  if (beg_pos <= octal.beg_pos && octal.end_pos <= end_pos) {
+    ReportMessageAt(octal.beg_pos, octal.end_pos, "strict_octal_literal", NULL);
+    scanner_->clear_octal_position();
+    *ok = false;
+  }
+}
+
+
+#define CHECK_OK  ok);                      \
+  if (!*ok) return kUnknownSourceElements;  \
+  ((void)0
+#define DUMMY )  // to make indentation work
+#undef DUMMY
+
+
 PreParser::SourceElements PreParser::ParseSourceElements(int end_token,
                                                          bool* ok) {
   // SourceElements ::
   //   (Statement)* <end_token>
 
+  bool allow_directive_prologue = true;
   while (peek() != end_token) {
-    ParseStatement(CHECK_OK);
+    Statement statement = ParseStatement(CHECK_OK);
+    if (allow_directive_prologue) {
+      if (statement.IsUseStrictLiteral()) {
+        set_strict_mode();
+      } else if (!statement.IsStringLiteral()) {
+        allow_directive_prologue = false;
+      }
+    }
   }
   return kUnknownSourceElements;
 }
 
 
+#undef CHECK_OK
+#define CHECK_OK  ok);                   \
+  if (!*ok) return Statement::Default();  \
+  ((void)0
+#define DUMMY )  // to make indentation work
+#undef DUMMY
+
+
 PreParser::Statement PreParser::ParseStatement(bool* ok) {
   // Statement ::
   //   Block
@@ -142,10 +171,10 @@
 
     case i::Token::SEMICOLON:
       Next();
-      return kUnknownStatement;
+      return Statement::Default();
 
     case i::Token::IF:
-      return  ParseIfStatement(ok);
+      return ParseIfStatement(ok);
 
     case i::Token::DO:
       return ParseDoWhileStatement(ok);
@@ -196,9 +225,24 @@
   // FunctionDeclaration ::
   //   'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
   Expect(i::Token::FUNCTION, CHECK_OK);
-  ParseIdentifier(CHECK_OK);
-  ParseFunctionLiteral(CHECK_OK);
-  return kUnknownStatement;
+
+  Identifier identifier = ParseIdentifier(CHECK_OK);
+  i::Scanner::Location location = scanner_->location();
+
+  Expression function_value = ParseFunctionLiteral(CHECK_OK);
+
+  if (function_value.IsStrictFunction() &&
+      !identifier.IsValidStrictVariable()) {
+    // Strict mode violation, using either reserved word or eval/arguments
+    // as name of strict function.
+    const char* type = "strict_function_name";
+    if (identifier.IsFutureReserved()) {
+      type = "strict_reserved_word";
+    }
+    ReportMessageAt(location.beg_pos, location.end_pos, type, NULL);
+    *ok = false;
+  }
+  return Statement::FunctionDeclaration();
 }
 
 
@@ -221,7 +265,7 @@
   }
   Expect(i::Token::RPAREN, CHECK_OK);
   Expect(i::Token::SEMICOLON, CHECK_OK);
-  return kUnknownStatement;
+  return Statement::Default();
 }
 
 
@@ -234,10 +278,18 @@
   //
   Expect(i::Token::LBRACE, CHECK_OK);
   while (peek() != i::Token::RBRACE) {
-    ParseStatement(CHECK_OK);
+    i::Scanner::Location start_location = scanner_->peek_location();
+    Statement statement = ParseStatement(CHECK_OK);
+    i::Scanner::Location end_location = scanner_->location();
+    if (strict_mode() && statement.IsFunctionDeclaration()) {
+      ReportMessageAt(start_location.beg_pos, end_location.end_pos,
+                      "strict_function", NULL);
+      *ok = false;
+      return Statement::Default();
+    }
   }
-  Expect(i::Token::RBRACE, CHECK_OK);
-  return kUnknownStatement;
+  Expect(i::Token::RBRACE, ok);
+  return Statement::Default();
 }
 
 
@@ -265,10 +317,17 @@
   if (peek() == i::Token::VAR) {
     Consume(i::Token::VAR);
   } else if (peek() == i::Token::CONST) {
+    if (strict_mode()) {
+      i::Scanner::Location location = scanner_->peek_location();
+      ReportMessageAt(location.beg_pos, location.end_pos,
+                      "strict_const", NULL);
+      *ok = false;
+      return Statement::Default();
+    }
     Consume(i::Token::CONST);
   } else {
     *ok = false;
-    return 0;
+    return Statement::Default();
   }
 
   // The scope of a variable/const declared anywhere inside a function
@@ -277,7 +336,14 @@
   do {
     // Parse variable name.
     if (nvars > 0) Consume(i::Token::COMMA);
-    ParseIdentifier(CHECK_OK);
+    Identifier identifier  = ParseIdentifier(CHECK_OK);
+    if (strict_mode() && !identifier.IsValidStrictVariable()) {
+      StrictModeIdentifierViolation(scanner_->location(),
+                                    "strict_var_name",
+                                    identifier,
+                                    ok);
+      return Statement::Default();
+    }
     nvars++;
     if (peek() == i::Token::ASSIGN) {
       Expect(i::Token::ASSIGN, CHECK_OK);
@@ -286,24 +352,33 @@
   } while (peek() == i::Token::COMMA);
 
   if (num_decl != NULL) *num_decl = nvars;
-  return kUnknownStatement;
+  return Statement::Default();
 }
 
 
-PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(
-    bool* ok) {
+PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
   // ExpressionStatement | LabelledStatement ::
   //   Expression ';'
   //   Identifier ':' Statement
 
   Expression expr = ParseExpression(true, CHECK_OK);
-  if (peek() == i::Token::COLON && expr == kIdentifierExpression) {
-    Consume(i::Token::COLON);
-    return ParseStatement(ok);
+  if (peek() == i::Token::COLON && expr.IsRawIdentifier()) {
+    if (!strict_mode() || !expr.AsIdentifier().IsFutureReserved()) {
+      Consume(i::Token::COLON);
+      i::Scanner::Location start_location = scanner_->peek_location();
+      Statement statement = ParseStatement(CHECK_OK);
+      if (strict_mode() && statement.IsFunctionDeclaration()) {
+        i::Scanner::Location end_location = scanner_->location();
+        ReportMessageAt(start_location.beg_pos, end_location.end_pos,
+                        "strict_function", NULL);
+        *ok = false;
+      }
+      return Statement::Default();
+    }
   }
   // Parsed expression statement.
   ExpectSemicolon(CHECK_OK);
-  return kUnknownStatement;
+  return Statement::ExpressionStatement(expr);
 }
 
 
@@ -320,7 +395,7 @@
     Next();
     ParseStatement(CHECK_OK);
   }
-  return kUnknownStatement;
+  return Statement::Default();
 }
 
 
@@ -337,7 +412,7 @@
     ParseIdentifier(CHECK_OK);
   }
   ExpectSemicolon(CHECK_OK);
-  return kUnknownStatement;
+  return Statement::Default();
 }
 
 
@@ -354,7 +429,7 @@
     ParseIdentifier(CHECK_OK);
   }
   ExpectSemicolon(CHECK_OK);
-  return kUnknownStatement;
+  return Statement::Default();
 }
 
 
@@ -380,7 +455,7 @@
     ParseExpression(true, CHECK_OK);
   }
   ExpectSemicolon(CHECK_OK);
-  return kUnknownStatement;
+  return Statement::Default();
 }
 
 
@@ -388,6 +463,13 @@
   // WithStatement ::
   //   'with' '(' Expression ')' Statement
   Expect(i::Token::WITH, CHECK_OK);
+  if (strict_mode()) {
+    i::Scanner::Location location = scanner_->location();
+    ReportMessageAt(location.beg_pos, location.end_pos,
+                    "strict_mode_with", NULL);
+    *ok = false;
+    return Statement::Default();
+  }
   Expect(i::Token::LPAREN, CHECK_OK);
   ParseExpression(true, CHECK_OK);
   Expect(i::Token::RPAREN, CHECK_OK);
@@ -395,7 +477,7 @@
   scope_->EnterWith();
   ParseStatement(CHECK_OK);
   scope_->LeaveWith();
-  return kUnknownStatement;
+  return Statement::Default();
 }
 
 
@@ -419,13 +501,20 @@
       Expect(i::Token::DEFAULT, CHECK_OK);
       Expect(i::Token::COLON, CHECK_OK);
     } else {
-      ParseStatement(CHECK_OK);
+      i::Scanner::Location start_location = scanner_->peek_location();
+      Statement statement = ParseStatement(CHECK_OK);
+      if (strict_mode() && statement.IsFunctionDeclaration()) {
+        i::Scanner::Location end_location = scanner_->location();
+        ReportMessageAt(start_location.beg_pos, end_location.end_pos,
+                        "strict_function", NULL);
+        *ok = false;
+        return Statement::Default();
+      }
     }
     token = peek();
   }
-  Expect(i::Token::RBRACE, CHECK_OK);
-
-  return kUnknownStatement;
+  Expect(i::Token::RBRACE, ok);
+  return Statement::Default();
 }
 
 
@@ -438,8 +527,8 @@
   Expect(i::Token::WHILE, CHECK_OK);
   Expect(i::Token::LPAREN, CHECK_OK);
   ParseExpression(true, CHECK_OK);
-  Expect(i::Token::RPAREN, CHECK_OK);
-  return kUnknownStatement;
+  Expect(i::Token::RPAREN, ok);
+  return Statement::Default();
 }
 
 
@@ -451,8 +540,8 @@
   Expect(i::Token::LPAREN, CHECK_OK);
   ParseExpression(true, CHECK_OK);
   Expect(i::Token::RPAREN, CHECK_OK);
-  ParseStatement(CHECK_OK);
-  return kUnknownStatement;
+  ParseStatement(ok);
+  return Statement::Default();
 }
 
 
@@ -472,7 +561,7 @@
         Expect(i::Token::RPAREN, CHECK_OK);
 
         ParseStatement(CHECK_OK);
-        return kUnknownStatement;
+        return Statement::Default();
       }
     } else {
       ParseExpression(false, CHECK_OK);
@@ -482,7 +571,7 @@
         Expect(i::Token::RPAREN, CHECK_OK);
 
         ParseStatement(CHECK_OK);
-        return kUnknownStatement;
+        return Statement::Default();
       }
     }
   }
@@ -500,8 +589,8 @@
   }
   Expect(i::Token::RPAREN, CHECK_OK);
 
-  ParseStatement(CHECK_OK);
-  return kUnknownStatement;
+  ParseStatement(ok);
+  return Statement::Default();
 }
 
 
@@ -515,12 +604,11 @@
     ReportMessageAt(pos.beg_pos, pos.end_pos,
                     "newline_after_throw", NULL);
     *ok = false;
-    return kUnknownStatement;
+    return Statement::Default();
   }
   ParseExpression(true, CHECK_OK);
-  ExpectSemicolon(CHECK_OK);
-
-  return kUnknownStatement;
+  ExpectSemicolon(ok);
+  return Statement::Default();
 }
 
 
@@ -547,12 +635,19 @@
   if (peek() == i::Token::CATCH) {
     Consume(i::Token::CATCH);
     Expect(i::Token::LPAREN, CHECK_OK);
-    ParseIdentifier(CHECK_OK);
+    Identifier id = ParseIdentifier(CHECK_OK);
+    if (strict_mode() && !id.IsValidStrictVariable()) {
+      StrictModeIdentifierViolation(scanner_->location(),
+                                    "strict_catch_variable",
+                                    id,
+                                    ok);
+      return Statement::Default();
+    }
     Expect(i::Token::RPAREN, CHECK_OK);
     scope_->EnterWith();
     ParseBlock(ok);
     scope_->LeaveWith();
-    if (!*ok) return kUnknownStatement;
+    if (!*ok) Statement::Default();
     catch_or_finally_seen = true;
   }
   if (peek() == i::Token::FINALLY) {
@@ -563,7 +658,7 @@
   if (!catch_or_finally_seen) {
     *ok = false;
   }
-  return kUnknownStatement;
+  return Statement::Default();
 }
 
 
@@ -575,11 +670,19 @@
   //   'debugger' ';'
 
   Expect(i::Token::DEBUGGER, CHECK_OK);
-  ExpectSemicolon(CHECK_OK);
-  return kUnknownStatement;
+  ExpectSemicolon(ok);
+  return Statement::Default();
 }
 
 
+#undef CHECK_OK
+#define CHECK_OK  ok);                     \
+  if (!*ok) return Expression::Default();  \
+  ((void)0
+#define DUMMY )  // to make indentation work
+#undef DUMMY
+
+
 // Precedence = 1
 PreParser::Expression PreParser::ParseExpression(bool accept_IN, bool* ok) {
   // Expression ::
@@ -590,7 +693,7 @@
   while (peek() == i::Token::COMMA) {
     Expect(i::Token::COMMA, CHECK_OK);
     ParseAssignmentExpression(accept_IN, CHECK_OK);
-    result = kUnknownExpression;
+    result = Expression::Default();
   }
   return result;
 }
@@ -603,6 +706,7 @@
   //   ConditionalExpression
   //   LeftHandSideExpression AssignmentOperator AssignmentExpression
 
+  i::Scanner::Location before = scanner_->peek_location();
   Expression expression = ParseConditionalExpression(accept_IN, CHECK_OK);
 
   if (!i::Token::IsAssignmentOp(peek())) {
@@ -610,14 +714,23 @@
     return expression;
   }
 
+  if (strict_mode() && expression.IsIdentifier() &&
+      expression.AsIdentifier().IsEvalOrArguments()) {
+    i::Scanner::Location after = scanner_->location();
+    ReportMessageAt(before.beg_pos, after.end_pos,
+                    "strict_lhs_assignment", NULL);
+    *ok = false;
+    return Expression::Default();
+  }
+
   i::Token::Value op = Next();  // Get assignment operator.
   ParseAssignmentExpression(accept_IN, CHECK_OK);
 
-  if ((op == i::Token::ASSIGN) && (expression == kThisPropertyExpression)) {
+  if ((op == i::Token::ASSIGN) && expression.IsThisProperty()) {
     scope_->AddProperty();
   }
 
-  return kUnknownExpression;
+  return Expression::Default();
 }
 
 
@@ -638,7 +751,7 @@
   ParseAssignmentExpression(true, CHECK_OK);
   Expect(i::Token::COLON, CHECK_OK);
   ParseAssignmentExpression(accept_IN, CHECK_OK);
-  return kUnknownExpression;
+  return Expression::Default();
 }
 
 
@@ -660,7 +773,7 @@
     while (Precedence(peek(), accept_IN) == prec1) {
       Next();
       ParseBinaryExpression(prec1 + 1, accept_IN, CHECK_OK);
-      result = kUnknownExpression;
+      result = Expression::Default();
     }
   }
   return result;
@@ -681,10 +794,22 @@
   //   '!' UnaryExpression
 
   i::Token::Value op = peek();
-  if (i::Token::IsUnaryOp(op) || i::Token::IsCountOp(op)) {
+  if (i::Token::IsUnaryOp(op)) {
     op = Next();
     ParseUnaryExpression(ok);
-    return kUnknownExpression;
+    return Expression::Default();
+  } else if (i::Token::IsCountOp(op)) {
+    op = Next();
+    i::Scanner::Location before = scanner_->peek_location();
+    Expression expression = ParseUnaryExpression(CHECK_OK);
+    if (strict_mode() && expression.IsIdentifier() &&
+        expression.AsIdentifier().IsEvalOrArguments()) {
+      i::Scanner::Location after = scanner_->location();
+      ReportMessageAt(before.beg_pos, after.end_pos,
+                      "strict_lhs_prefix", NULL);
+      *ok = false;
+    }
+    return Expression::Default();
   } else {
     return ParsePostfixExpression(ok);
   }
@@ -695,11 +820,20 @@
   // PostfixExpression ::
   //   LeftHandSideExpression ('++' | '--')?
 
+  i::Scanner::Location before = scanner_->peek_location();
   Expression expression = ParseLeftHandSideExpression(CHECK_OK);
   if (!scanner_->has_line_terminator_before_next() &&
       i::Token::IsCountOp(peek())) {
+    if (strict_mode() && expression.IsIdentifier() &&
+        expression.AsIdentifier().IsEvalOrArguments()) {
+      i::Scanner::Location after = scanner_->location();
+      ReportMessageAt(before.beg_pos, after.end_pos,
+                      "strict_lhs_postfix", NULL);
+      *ok = false;
+      return Expression::Default();
+    }
     Next();
-    return kUnknownExpression;
+    return Expression::Default();
   }
   return expression;
 }
@@ -709,7 +843,7 @@
   // LeftHandSideExpression ::
   //   (NewExpression | MemberExpression) ...
 
-  Expression result;
+  Expression result = Expression::Default();
   if (peek() == i::Token::NEW) {
     result = ParseNewExpression(CHECK_OK);
   } else {
@@ -722,27 +856,27 @@
         Consume(i::Token::LBRACK);
         ParseExpression(true, CHECK_OK);
         Expect(i::Token::RBRACK, CHECK_OK);
-        if (result == kThisExpression) {
-          result = kThisPropertyExpression;
+        if (result.IsThis()) {
+          result = Expression::ThisProperty();
         } else {
-          result = kUnknownExpression;
+          result = Expression::Default();
         }
         break;
       }
 
       case i::Token::LPAREN: {
         ParseArguments(CHECK_OK);
-        result = kUnknownExpression;
+        result = Expression::Default();
         break;
       }
 
       case i::Token::PERIOD: {
         Consume(i::Token::PERIOD);
         ParseIdentifierName(CHECK_OK);
-        if (result == kThisExpression) {
-          result = kThisPropertyExpression;
+        if (result.IsThis()) {
+          result = Expression::ThisProperty();
         } else {
-          result = kUnknownExpression;
+          result = Expression::Default();
         }
         break;
       }
@@ -788,13 +922,21 @@
   //     ('[' Expression ']' | '.' Identifier | Arguments)*
 
   // Parse the initial primary or function expression.
-  Expression result = kUnknownExpression;
+  Expression result = Expression::Default();
   if (peek() == i::Token::FUNCTION) {
     Consume(i::Token::FUNCTION);
+    Identifier identifier = Identifier::Default();
     if (peek_any_identifier()) {
-      ParseIdentifier(CHECK_OK);
+      identifier = ParseIdentifier(CHECK_OK);
     }
     result = ParseFunctionLiteral(CHECK_OK);
+    if (result.IsStrictFunction() && !identifier.IsValidStrictVariable()) {
+      StrictModeIdentifierViolation(scanner_->location(),
+                                    "strict_function_name",
+                                    identifier,
+                                    ok);
+      return Expression::Default();
+    }
   } else {
     result = ParsePrimaryExpression(CHECK_OK);
   }
@@ -805,20 +947,20 @@
         Consume(i::Token::LBRACK);
         ParseExpression(true, CHECK_OK);
         Expect(i::Token::RBRACK, CHECK_OK);
-        if (result == kThisExpression) {
-          result = kThisPropertyExpression;
+        if (result.IsThis()) {
+          result = Expression::ThisProperty();
         } else {
-          result = kUnknownExpression;
+          result = Expression::Default();
         }
         break;
       }
       case i::Token::PERIOD: {
         Consume(i::Token::PERIOD);
         ParseIdentifierName(CHECK_OK);
-        if (result == kThisExpression) {
-          result = kThisPropertyExpression;
+        if (result.IsThis()) {
+          result = Expression::ThisProperty();
         } else {
-          result = kUnknownExpression;
+          result = Expression::Default();
         }
         break;
       }
@@ -827,7 +969,7 @@
         // Consume one of the new prefixes (already parsed).
         ParseArguments(CHECK_OK);
         new_count--;
-        result = kUnknownExpression;
+        result = Expression::Default();
         break;
       }
       default:
@@ -851,18 +993,27 @@
   //   RegExpLiteral
   //   '(' Expression ')'
 
-  Expression result = kUnknownExpression;
+  Expression result = Expression::Default();
   switch (peek()) {
     case i::Token::THIS: {
       Next();
-      result = kThisExpression;
+      result = Expression::This();
       break;
     }
 
-    case i::Token::IDENTIFIER:
-    case i::Token::FUTURE_RESERVED_WORD: {
-      ParseIdentifier(CHECK_OK);
-      result = kIdentifierExpression;
+    case i::Token::FUTURE_RESERVED_WORD:
+      if (strict_mode()) {
+        Next();
+        i::Scanner::Location location = scanner_->location();
+        ReportMessageAt(location.beg_pos, location.end_pos,
+                        "strict_reserved_word", NULL);
+        *ok = false;
+        return Expression::Default();
+      }
+      // FALLTHROUGH
+    case i::Token::IDENTIFIER: {
+      Identifier id = ParseIdentifier(CHECK_OK);
+      result = Expression::FromIdentifier(id);
       break;
     }
 
@@ -900,7 +1051,7 @@
       parenthesized_function_ = (peek() == i::Token::FUNCTION);
       result = ParseExpression(true, CHECK_OK);
       Expect(i::Token::RPAREN, CHECK_OK);
-      if (result == kIdentifierExpression) result = kUnknownExpression;
+      result = result.Parenthesize();
       break;
 
     case i::Token::MOD:
@@ -910,7 +1061,7 @@
     default: {
       Next();
       *ok = false;
-      return kUnknownExpression;
+      return Expression::Default();
     }
   }
 
@@ -933,7 +1084,7 @@
   Expect(i::Token::RBRACK, CHECK_OK);
 
   scope_->NextMaterializedLiteralIndex();
-  return kUnknownExpression;
+  return Expression::Default();
 }
 
 
@@ -962,7 +1113,7 @@
                 name != i::Token::STRING &&
                 !is_keyword) {
               *ok = false;
-              return kUnknownExpression;
+              return Expression::Default();
             }
             if (!is_keyword) {
               LogSymbol();
@@ -988,7 +1139,7 @@
         } else {
           // Unexpected token.
           *ok = false;
-          return kUnknownExpression;
+          return Expression::Default();
         }
     }
 
@@ -1001,7 +1152,7 @@
   Expect(i::Token::RBRACE, CHECK_OK);
 
   scope_->NextMaterializedLiteralIndex();
-  return kUnknownExpression;
+  return Expression::Default();
 }
 
 
@@ -1013,7 +1164,7 @@
     ReportMessageAt(location.beg_pos, location.end_pos,
                     "unterminated_regexp", NULL);
     *ok = false;
-    return kUnknownExpression;
+    return Expression::Default();
   }
 
   scope_->NextMaterializedLiteralIndex();
@@ -1024,10 +1175,10 @@
     ReportMessageAt(location.beg_pos, location.end_pos,
                     "invalid_regexp_flags", NULL);
     *ok = false;
-    return kUnknownExpression;
+    return Expression::Default();
   }
   Next();
-  return kUnknownExpression;
+  return Expression::Default();
 }
 
 
@@ -1035,16 +1186,21 @@
   // Arguments ::
   //   '(' (AssignmentExpression)*[','] ')'
 
-  Expect(i::Token::LPAREN, CHECK_OK);
+  Expect(i::Token::LPAREN, ok);
+  if (!*ok) return -1;
   bool done = (peek() == i::Token::RPAREN);
   int argc = 0;
   while (!done) {
-    ParseAssignmentExpression(true, CHECK_OK);
+    ParseAssignmentExpression(true, ok);
+    if (!*ok) return -1;
     argc++;
     done = (peek() == i::Token::RPAREN);
-    if (!done) Expect(i::Token::COMMA, CHECK_OK);
+    if (!done) {
+      Expect(i::Token::COMMA, ok);
+      if (!*ok) return -1;
+    }
   }
-  Expect(i::Token::RPAREN, CHECK_OK);
+  Expect(i::Token::RPAREN, ok);
   return argc;
 }
 
@@ -1057,13 +1213,19 @@
   ScopeType outer_scope_type = scope_->type();
   bool inside_with = scope_->IsInsideWith();
   Scope function_scope(&scope_, kFunctionScope);
-
   //  FormalParameterList ::
   //    '(' (Identifier)*[','] ')'
   Expect(i::Token::LPAREN, CHECK_OK);
+  int start_position = scanner_->location().beg_pos;
   bool done = (peek() == i::Token::RPAREN);
   while (!done) {
-    ParseIdentifier(CHECK_OK);
+    Identifier id = ParseIdentifier(CHECK_OK);
+    if (!id.IsValidStrictVariable()) {
+      StrictModeIdentifierViolation(scanner_->location(),
+                                    "strict_param_name",
+                                    id,
+                                    CHECK_OK);
+    }
     done = (peek() == i::Token::RPAREN);
     if (!done) {
       Expect(i::Token::COMMA, CHECK_OK);
@@ -1086,7 +1248,7 @@
     log_->PauseRecording();
     ParseSourceElements(i::Token::RBRACE, ok);
     log_->ResumeRecording();
-    if (!*ok) return kUnknownExpression;
+    if (!*ok) Expression::Default();
 
     Expect(i::Token::RBRACE, CHECK_OK);
 
@@ -1099,7 +1261,15 @@
     ParseSourceElements(i::Token::RBRACE, CHECK_OK);
     Expect(i::Token::RBRACE, CHECK_OK);
   }
-  return kUnknownExpression;
+
+  if (strict_mode()) {
+    int end_position = scanner_->location().end_pos;
+    CheckOctalLiteral(start_position, end_position, CHECK_OK);
+    CheckDelayedStrictModeViolation(start_position, end_position, CHECK_OK);
+    return Expression::StrictFunction();
+  }
+
+  return Expression::Default();
 }
 
 
@@ -1109,11 +1279,13 @@
 
   Expect(i::Token::MOD, CHECK_OK);
   ParseIdentifier(CHECK_OK);
-  ParseArguments(CHECK_OK);
+  ParseArguments(ok);
 
-  return kUnknownExpression;
+  return Expression::Default();
 }
 
+#undef CHECK_OK
+
 
 void PreParser::ExpectSemicolon(bool* ok) {
   // Check for automatic semicolon insertion according to
@@ -1142,27 +1314,98 @@
 }
 
 
-PreParser::Identifier PreParser::GetIdentifierSymbol() {
+PreParser::Expression PreParser::GetStringSymbol() {
+  const int kUseStrictLength = 10;
+  const char* kUseStrictChars = "use strict";
   LogSymbol();
-  return kUnknownIdentifier;
+  if (scanner_->is_literal_ascii() &&
+      scanner_->literal_length() == kUseStrictLength &&
+      !scanner_->literal_contains_escapes() &&
+      !strncmp(scanner_->literal_ascii_string().start(), kUseStrictChars,
+               kUseStrictLength)) {
+    return Expression::UseStrictStringLiteral();
+  }
+  return Expression::StringLiteral();
 }
 
 
-PreParser::Expression PreParser::GetStringSymbol() {
+PreParser::Identifier PreParser::GetIdentifierSymbol() {
   LogSymbol();
-  return kUnknownExpression;
+  if (scanner_->current_token() == i::Token::FUTURE_RESERVED_WORD) {
+    return Identifier::FutureReserved();
+  }
+  if (scanner_->is_literal_ascii()) {
+    // Detect strict-mode poison words.
+    if (scanner_->literal_length() == 4 &&
+        !strncmp(scanner_->literal_ascii_string().start(), "eval", 4)) {
+      return Identifier::Eval();
+    }
+    if (scanner_->literal_length() == 9 &&
+        !strncmp(scanner_->literal_ascii_string().start(), "arguments", 9)) {
+      return Identifier::Arguments();
+    }
+  }
+  return Identifier::Default();
 }
 
 
 PreParser::Identifier PreParser::ParseIdentifier(bool* ok) {
   if (!Check(i::Token::FUTURE_RESERVED_WORD)) {
     Expect(i::Token::IDENTIFIER, ok);
+    if (!*ok) return Identifier::Default();
   }
-  if (!*ok) return kUnknownIdentifier;
   return GetIdentifierSymbol();
 }
 
 
+void PreParser::SetStrictModeViolation(i::Scanner::Location location,
+                                       const char* type,
+                                       bool* ok) {
+  if (strict_mode()) {
+    ReportMessageAt(location.beg_pos, location.end_pos, type, NULL);
+    *ok = false;
+    return;
+  }
+  // Delay report in case this later turns out to be strict code
+  // (i.e., for function names and parameters prior to a "use strict"
+  // directive).
+  strict_mode_violation_location_ = location;
+  strict_mode_violation_type_ = type;
+}
+
+
+void PreParser::CheckDelayedStrictModeViolation(int beg_pos,
+                                                int end_pos,
+                                                bool* ok) {
+  i::Scanner::Location location = strict_mode_violation_location_;
+  if (location.IsValid() &&
+      location.beg_pos > beg_pos && location.end_pos < end_pos) {
+    ReportMessageAt(location.beg_pos, location.end_pos,
+                    strict_mode_violation_type_, NULL);
+    *ok = false;
+  }
+  strict_mode_violation_location_ = i::Scanner::Location::invalid();
+}
+
+
+void PreParser::StrictModeIdentifierViolation(i::Scanner::Location location,
+                                              const char* eval_args_type,
+                                              Identifier identifier,
+                                              bool* ok) {
+  const char* type = eval_args_type;
+  if (identifier.IsFutureReserved()) {
+    type = "strict_reserved_word";
+  }
+  if (strict_mode()) {
+    ReportMessageAt(location.beg_pos, location.end_pos, type, NULL);
+    *ok = false;
+    return;
+  }
+  strict_mode_violation_location_ = location;
+  strict_mode_violation_type_ = type;
+}
+
+
 PreParser::Identifier PreParser::ParseIdentifierName(bool* ok) {
   i::Token::Value next = Next();
   if (i::Token::IsKeyword(next)) {
@@ -1170,24 +1413,28 @@
     const char* keyword = i::Token::String(next);
     log_->LogAsciiSymbol(pos, i::Vector<const char>(keyword,
                                                     i::StrLength(keyword)));
-    return kUnknownExpression;
+    return Identifier::Default();
   }
   if (next == i::Token::IDENTIFIER ||
       next == i::Token::FUTURE_RESERVED_WORD) {
     return GetIdentifierSymbol();
   }
   *ok = false;
-  return kUnknownIdentifier;
+  return Identifier::Default();
 }
 
+#undef CHECK_OK
+
 
 // This function reads an identifier and determines whether or not it
 // is 'get' or 'set'.
 PreParser::Identifier PreParser::ParseIdentifierOrGetOrSet(bool* is_get,
                                                            bool* is_set,
                                                            bool* ok) {
-  PreParser::Identifier result = ParseIdentifier(CHECK_OK);
-  if (scanner_->is_literal_ascii() && scanner_->literal_length() == 3) {
+  Identifier result = ParseIdentifier(ok);
+  if (!*ok) return Identifier::Default();
+  if (scanner_->is_literal_ascii() &&
+      scanner_->literal_length() == 3) {
     const char* token = scanner_->literal_ascii_string().start();
     *is_get = strncmp(token, "get", 3) == 0;
     *is_set = !*is_get && strncmp(token, "set", 3) == 0;
@@ -1200,6 +1447,4 @@
   return next == i::Token::IDENTIFIER ||
          next == i::Token::FUTURE_RESERVED_WORD;
 }
-
-#undef CHECK_OK
 } }  // v8::preparser
diff --git a/src/preparser.h b/src/preparser.h
index b7fa6c7..2efd53e 100644
--- a/src/preparser.h
+++ b/src/preparser.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -33,7 +33,7 @@
 
 // Preparsing checks a JavaScript program and emits preparse-data that helps
 // a later parsing to be faster.
-// See preparse-data.h for the data.
+// See preparse-data-format.h for the data format.
 
 // The PreParser checks that the syntax follows the grammar for JavaScript,
 // and collects some information about the program along the way.
@@ -67,41 +67,226 @@
   }
 
  private:
+  // These types form an algebra over syntactic categories that is just
+  // rich enough to let us recognize and propagate the constructs that
+  // are either being counted in the preparser data, or is important
+  // to throw the correct syntax error exceptions.
+
   enum ScopeType {
     kTopLevelScope,
     kFunctionScope
   };
 
-  // Types that allow us to recognize simple this-property assignments.
-  // A simple this-property assignment is a statement on the form
-  // "this.propertyName = {primitive constant or function parameter name);"
-  // where propertyName isn't "__proto__".
-  // The result is only relevant if the function body contains only
-  // simple this-property assignments.
+  class Expression;
 
-  enum StatementType {
-    kUnknownStatement
+  class Identifier {
+   public:
+    static Identifier Default() {
+      return Identifier(kUnknownIdentifier);
+    }
+    static Identifier Eval()  {
+      return Identifier(kEvalIdentifier);
+    }
+    static Identifier Arguments()  {
+      return Identifier(kArgumentsIdentifier);
+    }
+    static Identifier FutureReserved()  {
+      return Identifier(kFutureReservedIdentifier);
+    }
+    bool IsEval() { return type_ == kEvalIdentifier; }
+    bool IsArguments() { return type_ == kArgumentsIdentifier; }
+    bool IsEvalOrArguments() { return type_ >= kEvalIdentifier; }
+    bool IsFutureReserved() { return type_ == kFutureReservedIdentifier; }
+    bool IsValidStrictVariable() { return type_ == kUnknownIdentifier; }
+   private:
+    enum Type {
+      kUnknownIdentifier,
+      kFutureReservedIdentifier,
+      kEvalIdentifier,
+      kArgumentsIdentifier
+    };
+    explicit Identifier(Type type) : type_(type) { }
+    Type type_;
+
+    friend class Expression;
   };
 
-  enum ExpressionType {
-    kUnknownExpression,
-    kIdentifierExpression,  // Used to detect labels.
-    kThisExpression,
-    kThisPropertyExpression
+  // Bits 0 and 1 are used to identify the type of expression:
+  // If bit 0 is set, it's an identifier.
+  // if bit 1 is set, it's a string literal.
+  // If neither is set, it's no particular type, and both set isn't
+  // use yet.
+  // Bit 2 is used to mark the expression as being parenthesized,
+  // so "(foo)" isn't recognized as a pure identifier (and possible label).
+  class Expression {
+   public:
+    static Expression Default() {
+      return Expression(kUnknownExpression);
+    }
+
+    static Expression FromIdentifier(Identifier id) {
+      return Expression(kIdentifierFlag | (id.type_ << kIdentifierShift));
+    }
+
+    static Expression StringLiteral() {
+      return Expression(kUnknownStringLiteral);
+    }
+
+    static Expression UseStrictStringLiteral() {
+      return Expression(kUseStrictString);
+    }
+
+    static Expression This() {
+      return Expression(kThisExpression);
+    }
+
+    static Expression ThisProperty() {
+      return Expression(kThisPropertyExpression);
+    }
+
+    static Expression StrictFunction() {
+      return Expression(kStrictFunctionExpression);
+    }
+
+    bool IsIdentifier() {
+      return (code_ & kIdentifierFlag) != 0;
+    }
+
+    // Only works corretly if it is actually an identifier expression.
+    PreParser::Identifier AsIdentifier() {
+      return PreParser::Identifier(
+          static_cast<PreParser::Identifier::Type>(code_ >> kIdentifierShift));
+    }
+
+    bool IsParenthesized() {
+      // If bit 0 or 1 is set, we interpret bit 2 as meaning parenthesized.
+      return (code_ & 7) > 4;
+    }
+
+    bool IsRawIdentifier() {
+      return !IsParenthesized() && IsIdentifier();
+    }
+
+    bool IsStringLiteral() { return (code_ & kStringLiteralFlag) != 0; }
+
+    bool IsRawStringLiteral() {
+      return !IsParenthesized() && IsStringLiteral();
+    }
+
+    bool IsUseStrictLiteral() {
+      return (code_ & kStringLiteralMask) == kUseStrictString;
+    }
+
+    bool IsThis() {
+      return code_ == kThisExpression;
+    }
+
+    bool IsThisProperty() {
+      return code_ == kThisPropertyExpression;
+    }
+
+    bool IsStrictFunction() {
+      return code_ == kStrictFunctionExpression;
+    }
+
+    Expression Parenthesize() {
+      int type = code_ & 3;
+      if (type != 0) {
+        // Identifiers and string literals can be parenthesized.
+        // They no longer work as labels or directive prologues,
+        // but are still recognized in other contexts.
+        return Expression(code_ | kParentesizedExpressionFlag);
+      }
+      // For other types of expressions, it's not important to remember
+      // the parentheses.
+      return *this;
+    }
+
+   private:
+    // First two/three bits are used as flags.
+    // Bit 0 and 1 represent identifiers or strings literals, and are
+    // mutually exclusive, but can both be absent.
+    // If bit 0 or 1 are set, bit 2 marks that the expression has
+    // been wrapped in parentheses (a string literal can no longer
+    // be a directive prologue, and an identifier can no longer be
+    // a label.
+    enum  {
+      kUnknownExpression = 0,
+      // Identifiers
+      kIdentifierFlag = 1,  // Used to detect labels.
+      kIdentifierShift = 3,
+
+      kStringLiteralFlag = 2,  // Used to detect directive prologue.
+      kUnknownStringLiteral = kStringLiteralFlag,
+      kUseStrictString = kStringLiteralFlag | 8,
+      kStringLiteralMask = kUseStrictString,
+
+      kParentesizedExpressionFlag = 4,  // Only if identifier or string literal.
+
+      // Below here applies if neither identifier nor string literal.
+      kThisExpression = 4,
+      kThisPropertyExpression = 8,
+      kStrictFunctionExpression = 12
+    };
+
+    explicit Expression(int expression_code) : code_(expression_code) { }
+
+    int code_;
   };
 
-  enum IdentifierType {
-    kUnknownIdentifier
+  class Statement {
+   public:
+    static Statement Default() {
+      return Statement(kUnknownStatement);
+    }
+
+    static Statement FunctionDeclaration() {
+      return Statement(kFunctionDeclaration);
+    }
+
+    // Creates expression statement from expression.
+    // Preserves being an unparenthesized string literal, possibly
+    // "use strict".
+    static Statement ExpressionStatement(Expression expression) {
+      if (!expression.IsParenthesized()) {
+        if (expression.IsUseStrictLiteral()) {
+          return Statement(kUseStrictExpressionStatement);
+        }
+        if (expression.IsStringLiteral()) {
+          return Statement(kStringLiteralExpressionStatement);
+        }
+      }
+      return Default();
+    }
+
+    bool IsStringLiteral() {
+      return code_ != kUnknownStatement;
+    }
+
+    bool IsUseStrictLiteral() {
+      return code_ == kUseStrictExpressionStatement;
+    }
+
+    bool IsFunctionDeclaration() {
+      return code_ == kFunctionDeclaration;
+    }
+
+   private:
+    enum Type {
+      kUnknownStatement,
+      kStringLiteralExpressionStatement,
+      kUseStrictExpressionStatement,
+      kFunctionDeclaration
+    };
+
+    explicit Statement(Type code) : code_(code) {}
+    Type code_;
   };
 
-  enum SourceElementTypes {
+  enum SourceElements {
     kUnknownSourceElements
   };
 
-  typedef int SourceElements;
-  typedef int Expression;
-  typedef int Statement;
-  typedef int Identifier;
   typedef int Arguments;
 
   class Scope {
@@ -112,7 +297,8 @@
           type_(type),
           materialized_literal_count_(0),
           expected_properties_(0),
-          with_nesting_count_(0) {
+          with_nesting_count_(0),
+          strict_((prev_ != NULL) && prev_->is_strict()) {
       *variable = this;
     }
     ~Scope() { *variable_ = prev_; }
@@ -122,6 +308,8 @@
     int expected_properties() { return expected_properties_; }
     int materialized_literal_count() { return materialized_literal_count_; }
     bool IsInsideWith() { return with_nesting_count_ != 0; }
+    bool is_strict() { return strict_; }
+    void set_strict() { strict_ = true; }
     void EnterWith() { with_nesting_count_++; }
     void LeaveWith() { with_nesting_count_--; }
 
@@ -132,6 +320,7 @@
     int materialized_literal_count_;
     int expected_properties_;
     int with_nesting_count_;
+    bool strict_;
   };
 
   // Private constructor only used in PreParseProgram.
@@ -143,6 +332,8 @@
         log_(log),
         scope_(NULL),
         stack_limit_(stack_limit),
+        strict_mode_violation_location_(i::Scanner::Location::invalid()),
+        strict_mode_violation_type_(NULL),
         stack_overflow_(false),
         allow_lazy_(true),
         parenthesized_function_(false) { }
@@ -152,10 +343,13 @@
   PreParseResult PreParse() {
     Scope top_scope(&scope_, kTopLevelScope);
     bool ok = true;
+    int start_position = scanner_->peek_location().beg_pos;
     ParseSourceElements(i::Token::EOS, &ok);
     if (stack_overflow_) return kPreParseStackOverflow;
     if (!ok) {
       ReportUnexpectedToken(scanner_->current_token());
+    } else if (scope_->is_strict()) {
+      CheckOctalLiteral(start_position, scanner_->location().end_pos, &ok);
     }
     return kPreParseSuccess;
   }
@@ -169,6 +363,8 @@
     log_->LogMessage(start_pos, end_pos, type, name_opt);
   }
 
+  void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok);
+
   // All ParseXXX functions take as the last argument an *ok parameter
   // which is set to false if parsing failed; it is unchanged otherwise.
   // By making the 'exception handling' explicit, we are forced to check
@@ -245,6 +441,12 @@
 
   bool peek_any_identifier();
 
+  void set_strict_mode() {
+    scope_->set_strict();
+  }
+
+  bool strict_mode() { return scope_->is_strict(); }
+
   void Consume(i::Token::Value token) { Next(); }
 
   void Expect(i::Token::Value token, bool* ok) {
@@ -265,10 +467,23 @@
 
   static int Precedence(i::Token::Value tok, bool accept_IN);
 
+  void SetStrictModeViolation(i::Scanner::Location,
+                              const char* type,
+                              bool *ok);
+
+  void CheckDelayedStrictModeViolation(int beg_pos, int end_pos, bool* ok);
+
+  void StrictModeIdentifierViolation(i::Scanner::Location,
+                                     const char* eval_args_type,
+                                     Identifier identifier,
+                                     bool* ok);
+
   i::JavaScriptScanner* scanner_;
   i::ParserRecorder* log_;
   Scope* scope_;
   uintptr_t stack_limit_;
+  i::Scanner::Location strict_mode_violation_location_;
+  const char* strict_mode_violation_type_;
   bool stack_overflow_;
   bool allow_lazy_;
   bool parenthesized_function_;
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index c777ab4..60288a9 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -370,7 +370,10 @@
 
 
 void PrettyPrinter::VisitUnaryOperation(UnaryOperation* node) {
-  Print("(%s", Token::String(node->op()));
+  Token::Value op = node->op();
+  bool needsSpace =
+      op == Token::DELETE || op == Token::TYPEOF || op == Token::VOID;
+  Print("(%s%s", Token::String(op), needsSpace ? " " : "");
   Visit(node->expression());
   Print(")");
 }
@@ -388,7 +391,7 @@
 void PrettyPrinter::VisitBinaryOperation(BinaryOperation* node) {
   Print("(");
   Visit(node->left());
-  Print("%s", Token::String(node->op()));
+  Print(" %s ", Token::String(node->op()));
   Visit(node->right());
   Print(")");
 }
@@ -397,7 +400,7 @@
 void PrettyPrinter::VisitCompareOperation(CompareOperation* node) {
   Print("(");
   Visit(node->left());
-  Print("%s", Token::String(node->op()));
+  Print(" %s ", Token::String(node->op()));
   Visit(node->right());
   Print(")");
 }
diff --git a/src/prettyprinter.h b/src/prettyprinter.h
index 451b17e..080081d 100644
--- a/src/prettyprinter.h
+++ b/src/prettyprinter.h
@@ -28,6 +28,7 @@
 #ifndef V8_PRETTYPRINTER_H_
 #define V8_PRETTYPRINTER_H_
 
+#include "allocation.h"
 #include "ast.h"
 
 namespace v8 {
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 4cf62e2..c954c4f 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,14 +28,15 @@
 #ifdef ENABLE_LOGGING_AND_PROFILING
 
 #include "v8.h"
+
+#include "profile-generator-inl.h"
+
 #include "global-handles.h"
 #include "heap-profiler.h"
 #include "scopeinfo.h"
 #include "unicode.h"
 #include "zone-inl.h"
 
-#include "profile-generator-inl.h"
-
 namespace v8 {
 namespace internal {
 
@@ -1735,7 +1736,7 @@
   switch (object->map()->instance_type()) {
     case MAP_TYPE: return "system / Map";
     case JS_GLOBAL_PROPERTY_CELL_TYPE: return "system / JSGlobalPropertyCell";
-    case PROXY_TYPE: return "system / Proxy";
+    case FOREIGN_TYPE: return "system / Foreign";
     case ODDBALL_TYPE: return "system / Oddball";
 #define MAKE_STRUCT_CASE(NAME, Name, name) \
     case NAME##_TYPE: return "system / "#Name;
@@ -1864,9 +1865,11 @@
     SetInternalReference(obj, entry,
                          "constructor", map->constructor(),
                          Map::kConstructorOffset);
-    SetInternalReference(obj, entry,
-                         "descriptors", map->instance_descriptors(),
-                         Map::kInstanceDescriptorsOffset);
+    if (!map->instance_descriptors()->IsEmpty()) {
+      SetInternalReference(obj, entry,
+                           "descriptors", map->instance_descriptors(),
+                           Map::kInstanceDescriptorsOrBitField3Offset);
+    }
     SetInternalReference(obj, entry,
                          "code_cache", map->code_cache(),
                          Map::kCodeCacheOffset);
@@ -1904,7 +1907,7 @@
     HandleScope hs;
     JSFunction* func = JSFunction::cast(js_obj);
     Context* context = func->context();
-    ZoneScope zscope(DELETE_ON_EXIT);
+    ZoneScope zscope(Isolate::Current(), DELETE_ON_EXIT);
     SerializedScopeInfo* serialized_scope_info =
         context->closure()->shared()->scope_info();
     ScopeInfo<ZoneListAllocationPolicy> zone_scope_info(serialized_scope_info);
diff --git a/src/profile-generator.h b/src/profile-generator.h
index bbc9efc..5b789ac 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -30,6 +30,7 @@
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
 
+#include "allocation.h"
 #include "hashmap.h"
 #include "../include/v8-profiler.h"
 
diff --git a/src/property.cc b/src/property.cc
index c35fb83..dd23209 100644
--- a/src/property.cc
+++ b/src/property.cc
@@ -74,6 +74,9 @@
       PrintF(out, " -callback object:\n");
       GetCallbackObject()->Print(out);
       break;
+    case HANDLER:
+      PrintF(out, " -type = lookup proxy\n");
+      break;
     case INTERCEPTOR:
       PrintF(out, " -type = lookup interceptor\n");
       break;
diff --git a/src/property.h b/src/property.h
index ee95ca2..87f9ea3 100644
--- a/src/property.h
+++ b/src/property.h
@@ -28,6 +28,8 @@
 #ifndef V8_PROPERTY_H_
 #define V8_PROPERTY_H_
 
+#include "allocation.h"
+
 namespace v8 {
 namespace internal {
 
@@ -155,24 +157,15 @@
 class CallbacksDescriptor:  public Descriptor {
  public:
   CallbacksDescriptor(String* key,
-                      Object* proxy,
+                      Object* foreign,
                       PropertyAttributes attributes,
                       int index = 0)
-      : Descriptor(key, proxy, attributes, CALLBACKS, index) {}
+      : Descriptor(key, foreign, attributes, CALLBACKS, index) {}
 };
 
 
 class LookupResult BASE_EMBEDDED {
  public:
-  // Where did we find the result;
-  enum {
-    NOT_FOUND,
-    DESCRIPTOR_TYPE,
-    DICTIONARY_TYPE,
-    INTERCEPTOR_TYPE,
-    CONSTANT_TYPE
-  } lookup_type_;
-
   LookupResult()
       : lookup_type_(NOT_FOUND),
         cacheable_(true),
@@ -209,6 +202,12 @@
     number_ = entry;
   }
 
+  void HandlerResult() {
+    lookup_type_ = HANDLER_TYPE;
+    holder_ = NULL;
+    details_ = PropertyDetails(NONE, HANDLER);
+  }
+
   void InterceptorResult(JSObject* holder) {
     lookup_type_ = INTERCEPTOR_TYPE;
     holder_ = holder;
@@ -243,6 +242,7 @@
   bool IsDontEnum() { return details_.IsDontEnum(); }
   bool IsDeleted() { return details_.IsDeleted(); }
   bool IsFound() { return lookup_type_ != NOT_FOUND; }
+  bool IsHandler() { return lookup_type_ == HANDLER_TYPE; }
 
   // Is the result is a property excluding transitions and the null
   // descriptor?
@@ -343,6 +343,16 @@
   }
 
  private:
+  // Where did we find the result;
+  enum {
+    NOT_FOUND,
+    DESCRIPTOR_TYPE,
+    DICTIONARY_TYPE,
+    HANDLER_TYPE,
+    INTERCEPTOR_TYPE,
+    CONSTANT_TYPE
+  } lookup_type_;
+
   JSObject* holder_;
   int number_;
   bool cacheable_;
diff --git a/src/proxy.js b/src/proxy.js
new file mode 100644
index 0000000..c11852b
--- /dev/null
+++ b/src/proxy.js
@@ -0,0 +1,83 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+global.Proxy = new $Object();
+
+var $Proxy = global.Proxy
+
+var fundamentalTraps = [
+  "getOwnPropertyDescriptor",
+  "getPropertyDescriptor",
+  "getOwnPropertyNames",
+  "getPropertyNames",
+  "defineProperty",
+  "delete",
+  "fix",
+]
+
+var derivedTraps = [
+  "has",
+  "hasOwn",
+  "get",
+  "set",
+  "enumerate",
+  "keys",
+]
+
+var functionTraps = [
+  "callTrap",
+  "constructTrap",
+]
+
+$Proxy.createFunction = function(handler, callTrap, constructTrap) {
+  handler.callTrap = callTrap
+  handler.constructTrap = constructTrap
+  $Proxy.create(handler)
+}
+
+$Proxy.create = function(handler, proto) {
+  if (!IS_SPEC_OBJECT(proto)) proto = $Object.prototype
+  return %CreateJSProxy(handler, proto)
+}
+
+
+
+
+////////////////////////////////////////////////////////////////////////////////
+// Builtins
+////////////////////////////////////////////////////////////////////////////////
+
+function DerivedGetTrap(receiver, name) {
+  var desc = this.getPropertyDescriptor(name)
+  if (IS_UNDEFINED(desc)) { return desc; }
+  if ('value' in desc) {
+    return desc.value
+  } else {
+    if (IS_UNDEFINED(desc.get)) { return desc.get; }
+    return desc.get.call(receiver)  // The proposal says so...
+  }
+}
diff --git a/src/regexp.js b/src/regexp.js
index f68dee6..7b851a3 100644
--- a/src/regexp.js
+++ b/src/regexp.js
@@ -235,7 +235,7 @@
   // Conversion is required by the ES5 specification (RegExp.prototype.exec
   // algorithm, step 5) even if the value is discarded for non-global RegExps.
   var i = TO_INTEGER(lastIndex);
-  
+
   if (this.global) {
     if (i < 0 || i > string.length) {
       this.lastIndex = 0;
@@ -250,11 +250,11 @@
     }
     lastMatchInfoOverride = null;
     this.lastIndex = lastMatchInfo[CAPTURE1];
-    return true;    
+    return true;
   } else {
     // Non-global regexp.
-    // Remove irrelevant preceeding '.*' in a non-global test regexp. 
-    // The expression checks whether this.source starts with '.*' and 
+    // Remove irrelevant preceeding '.*' in a non-global test regexp.
+    // The expression checks whether this.source starts with '.*' and
     // that the third char is not a '?'.
     if (%_StringCharCodeAt(this.source, 0) == 46 &&  // '.'
         %_StringCharCodeAt(this.source, 1) == 42 &&  // '*'
@@ -262,14 +262,14 @@
       if (!%_ObjectEquals(regexp_key, this)) {
         regexp_key = this;
         regexp_val = new $RegExp(SubString(this.source, 2, this.source.length),
-                                 (!this.ignoreCase 
+                                 (!this.ignoreCase
                                   ? !this.multiline ? "" : "m"
                                   : !this.multiline ? "i" : "im"));
       }
       if (%_RegExpExec(regexp_val, string, 0, lastMatchInfo) === null) {
         return false;
       }
-    }    
+    }
     %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]);
     // matchIndices is either null or the lastMatchInfo array.
     var matchIndices = %_RegExpExec(this, string, 0, lastMatchInfo);
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 97f0341..ce9a308 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -153,6 +153,7 @@
   if (FLAG_trace_opt) {
     PrintF("[marking (%s) ", eager ? "eagerly" : "lazily");
     function->PrintName();
+    PrintF(" 0x%" V8PRIxPTR, reinterpret_cast<intptr_t>(function->address()));
     PrintF(" for recompilation");
     if (delay > 0) {
       PrintF(" (delayed %0.3f ms)", static_cast<double>(delay) / 1000);
@@ -170,7 +171,7 @@
   // Debug::has_break_points().
   ASSERT(function->IsMarkedForLazyRecompilation());
   if (!FLAG_use_osr ||
-      isolate_->debug()->has_break_points() ||
+      isolate_->DebuggerHasBreakPoints() ||
       function->IsBuiltin()) {
     return;
   }
diff --git a/src/runtime.cc b/src/runtime.cc
index 855bd41..77b3f66 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -42,17 +42,19 @@
 #include "execution.h"
 #include "global-handles.h"
 #include "jsregexp.h"
+#include "json-parser.h"
 #include "liveedit.h"
 #include "liveobjectlist-inl.h"
 #include "parser.h"
 #include "platform.h"
-#include "runtime.h"
 #include "runtime-profiler.h"
+#include "runtime.h"
 #include "scopeinfo.h"
 #include "smart-pointer.h"
+#include "string-search.h"
 #include "stub-cache.h"
 #include "v8threads.h"
-#include "string-search.h"
+#include "vm-state-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -587,10 +589,22 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSProxy) {
+  ASSERT(args.length() == 2);
+  Object* handler = args[0];
+  Object* prototype = args[1];
+  Object* used_prototype =
+      (prototype->IsJSObject() || prototype->IsJSProxy()) ? prototype
+          : isolate->heap()->null_value();
+  return isolate->heap()->AllocateJSProxy(handler, used_prototype);
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateCatchExtensionObject) {
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(String, key, args[0]);
   Object* value = args[1];
+  ASSERT(!value->IsFailure());
   // Create a catch context extension object.
   JSFunction* constructor =
       isolate->context()->global_context()->
@@ -2600,7 +2614,7 @@
   int capture_count = regexp_handle->CaptureCount();
 
   // CompiledReplacement uses zone allocation.
-  CompilationZoneScope zone(DELETE_ON_EXIT);
+  CompilationZoneScope zone(isolate, DELETE_ON_EXIT);
   CompiledReplacement compiled_replacement;
   compiled_replacement.Compile(replacement_handle,
                                capture_count,
@@ -3114,7 +3128,7 @@
   }
   int length = subject->length();
 
-  CompilationZoneScope zone_space(DELETE_ON_EXIT);
+  CompilationZoneScope zone_space(isolate, DELETE_ON_EXIT);
   ZoneList<int> offsets(8);
   do {
     int start;
@@ -3871,6 +3885,17 @@
       if (proto->IsNull()) return *obj_value;
       js_object = Handle<JSObject>::cast(proto);
     }
+
+    // Don't allow element properties to be redefined on objects with external
+    // array elements.
+    if (js_object->HasExternalArrayElements()) {
+      Handle<Object> args[2] = { js_object, name };
+      Handle<Object> error =
+          isolate->factory()->NewTypeError("redef_external_array_element",
+                                           HandleVector(args, 2));
+      return isolate->Throw(*error);
+    }
+
     NormalizeElements(js_object);
     Handle<NumberDictionary> dictionary(js_object->element_dictionary());
     // Make sure that we never go back to fast case.
@@ -4106,6 +4131,23 @@
 }
 
 
+// Set the ES5 native flag on the function.
+// This is used to decide if we should transform null and undefined
+// into the global object when doing call and apply.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetES5Flag) {
+  NoHandleAllocation ha;
+  RUNTIME_ASSERT(args.length() == 1);
+
+  Handle<Object> object = args.at<Object>(0);
+
+  if (object->IsJSFunction()) {
+    JSFunction* func = JSFunction::cast(*object);
+    func->shared()->set_es5_native(true);
+  }
+  return isolate->heap()->undefined_value();
+}
+
+
 // Set a local property, even if it is READ_ONLY.  If the property does not
 // exist, it will be added with attributes NONE.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
@@ -4165,25 +4207,33 @@
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(String, key, args[1]);
 
+  uint32_t index;
+  const bool key_is_array_index = key->AsArrayIndex(&index);
+
   Object* obj = args[0];
   // Only JS objects can have properties.
   if (obj->IsJSObject()) {
     JSObject* object = JSObject::cast(obj);
-    // Fast case - no interceptors.
+    // Fast case: either the key is a real named property or it is not
+    // an array index and there are no interceptors or hidden
+    // prototypes.
     if (object->HasRealNamedProperty(key)) return isolate->heap()->true_value();
-    // Slow case.  Either it's not there or we have an interceptor.  We should
-    // have handles for this kind of deal.
+    Map* map = object->map();
+    if (!key_is_array_index &&
+        !map->has_named_interceptor() &&
+        !HeapObject::cast(map->prototype())->map()->is_hidden_prototype()) {
+      return isolate->heap()->false_value();
+    }
+    // Slow case.
     HandleScope scope(isolate);
     return HasLocalPropertyImplementation(isolate,
                                           Handle<JSObject>(object),
                                           Handle<String>(key));
-  } else if (obj->IsString()) {
+  } else if (obj->IsString() && key_is_array_index) {
     // Well, there is one exception:  Handle [] on strings.
-    uint32_t index;
-    if (key->AsArrayIndex(&index)) {
-      String* string = String::cast(obj);
-      if (index < static_cast<uint32_t>(string->length()))
-        return isolate->heap()->true_value();
+    String* string = String::cast(obj);
+    if (index < static_cast<uint32_t>(string->length())) {
+      return isolate->heap()->true_value();
     }
   }
   return isolate->heap()->false_value();
@@ -5564,7 +5614,7 @@
 
   static const int kMaxInitialListCapacity = 16;
 
-  ZoneScope scope(DELETE_ON_EXIT);
+  ZoneScope scope(isolate, DELETE_ON_EXIT);
 
   // Find (up to limit) indices of separator and end-of-string in subject
   int initial_capacity = Min<uint32_t>(kMaxInitialListCapacity, limit);
@@ -6160,6 +6210,135 @@
   return answer;
 }
 
+template <typename Char>
+static void JoinSparseArrayWithSeparator(FixedArray* elements,
+                                         int elements_length,
+                                         uint32_t array_length,
+                                         String* separator,
+                                         Vector<Char> buffer) {
+  int previous_separator_position = 0;
+  int separator_length = separator->length();
+  int cursor = 0;
+  for (int i = 0; i < elements_length; i += 2) {
+    int position = NumberToInt32(elements->get(i));
+    String* string = String::cast(elements->get(i + 1));
+    int string_length = string->length();
+    if (string->length() > 0) {
+      while (previous_separator_position < position) {
+        String::WriteToFlat<Char>(separator, &buffer[cursor],
+                                  0, separator_length);
+        cursor += separator_length;
+        previous_separator_position++;
+      }
+      String::WriteToFlat<Char>(string, &buffer[cursor],
+                                0, string_length);
+      cursor += string->length();
+    }
+  }
+  if (separator_length > 0) {
+    // Array length must be representable as a signed 32-bit number,
+    // otherwise the total string length would have been too large.
+    ASSERT(array_length <= 0x7fffffff);  // Is int32_t.
+    int last_array_index = static_cast<int>(array_length - 1);
+    while (previous_separator_position < last_array_index) {
+      String::WriteToFlat<Char>(separator, &buffer[cursor],
+                                0, separator_length);
+      cursor += separator_length;
+      previous_separator_position++;
+    }
+  }
+  ASSERT(cursor <= buffer.length());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 3);
+  CONVERT_CHECKED(JSArray, elements_array, args[0]);
+  RUNTIME_ASSERT(elements_array->HasFastElements());
+  CONVERT_NUMBER_CHECKED(uint32_t, array_length, Uint32, args[1]);
+  CONVERT_CHECKED(String, separator, args[2]);
+  // elements_array is fast-mode JSarray of alternating positions
+  // (increasing order) and strings.
+  // array_length is length of original array (used to add separators);
+  // separator is string to put between elements. Assumed to be non-empty.
+
+  // Find total length of join result.
+  int string_length = 0;
+  bool is_ascii = true;
+  int max_string_length = SeqAsciiString::kMaxLength;
+  bool overflow = false;
+  CONVERT_NUMBER_CHECKED(int, elements_length,
+                         Int32, elements_array->length());
+  RUNTIME_ASSERT((elements_length & 1) == 0);  // Even length.
+  FixedArray* elements = FixedArray::cast(elements_array->elements());
+  for (int i = 0; i < elements_length; i += 2) {
+    RUNTIME_ASSERT(elements->get(i)->IsNumber());
+    CONVERT_CHECKED(String, string, elements->get(i + 1));
+    int length = string->length();
+    if (is_ascii && !string->IsAsciiRepresentation()) {
+      is_ascii = false;
+      max_string_length = SeqTwoByteString::kMaxLength;
+    }
+    if (length > max_string_length ||
+        max_string_length - length < string_length) {
+      overflow = true;
+      break;
+    }
+    string_length += length;
+  }
+  int separator_length = separator->length();
+  if (!overflow && separator_length > 0) {
+    if (array_length <= 0x7fffffffu) {
+      int separator_count = static_cast<int>(array_length) - 1;
+      int remaining_length = max_string_length - string_length;
+      if ((remaining_length / separator_length) >= separator_count) {
+        string_length += separator_length * (array_length - 1);
+      } else {
+        // Not room for the separators within the maximal string length.
+        overflow = true;
+      }
+    } else {
+      // Nonempty separator and at least 2^31-1 separators necessary
+      // means that the string is too large to create.
+      STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
+      overflow = true;
+    }
+  }
+  if (overflow) {
+    // Throw OutOfMemory exception for creating too large a string.
+    V8::FatalProcessOutOfMemory("Array join result too large.");
+  }
+
+  if (is_ascii) {
+    MaybeObject* result_allocation =
+        isolate->heap()->AllocateRawAsciiString(string_length);
+    if (result_allocation->IsFailure()) return result_allocation;
+    SeqAsciiString* result_string =
+        SeqAsciiString::cast(result_allocation->ToObjectUnchecked());
+    JoinSparseArrayWithSeparator<char>(elements,
+                                       elements_length,
+                                       array_length,
+                                       separator,
+                                       Vector<char>(result_string->GetChars(),
+                                                    string_length));
+    return result_string;
+  } else {
+    MaybeObject* result_allocation =
+        isolate->heap()->AllocateRawTwoByteString(string_length);
+    if (result_allocation->IsFailure()) return result_allocation;
+    SeqTwoByteString* result_string =
+        SeqTwoByteString::cast(result_allocation->ToObjectUnchecked());
+    JoinSparseArrayWithSeparator<uc16>(elements,
+                                       elements_length,
+                                       array_length,
+                                       separator,
+                                       Vector<uc16>(result_string->GetChars(),
+                                                    string_length));
+    return result_string;
+  }
+}
+
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberOr) {
   NoHandleAllocation ha;
@@ -6598,9 +6777,16 @@
   int exponent = number->get_exponent();
   int sign = number->get_sign();
 
-  // We compare with kSmiValueSize - 3 because (2^30 - 0.1) has exponent 29 and
-  // should be rounded to 2^30, which is not smi.
-  if (!sign && exponent <= kSmiValueSize - 3) {
+  if (exponent < -1) {
+    // Number in range ]-0.5..0.5[. These always round to +/-zero.
+    if (sign) return isolate->heap()->minus_zero_value();
+    return Smi::FromInt(0);
+  }
+
+  // We compare with kSmiValueSize - 2 because (2^30 - 0.1) has exponent 29 and
+  // should be rounded to 2^30, which is not smi (for 31-bit smis, similar
+  // agument holds for 32-bit smis).
+  if (!sign && exponent < kSmiValueSize - 2) {
     return Smi::FromInt(static_cast<int>(value + 0.5));
   }
 
@@ -7292,13 +7478,13 @@
   // If the function is not optimizable or debugger is active continue using the
   // code from the full compiler.
   if (!function->shared()->code()->optimizable() ||
-      isolate->debug()->has_break_points()) {
+      isolate->DebuggerHasBreakPoints()) {
     if (FLAG_trace_opt) {
       PrintF("[failed to optimize ");
       function->PrintName();
       PrintF(": is code optimizable: %s, is debugger enabled: %s]\n",
           function->shared()->code()->optimizable() ? "T" : "F",
-          isolate->debug()->has_break_points() ? "T" : "F");
+          isolate->DebuggerHasBreakPoints() ? "T" : "F");
     }
     function->ReplaceCode(function->shared()->code());
     return function->code();
@@ -7418,6 +7604,29 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 1);
+  if (!V8::UseCrankshaft()) {
+    return Smi::FromInt(4);  // 4 == "never".
+  }
+  if (FLAG_always_opt) {
+    return Smi::FromInt(3);  // 3 == "always".
+  }
+  CONVERT_ARG_CHECKED(JSFunction, function, 0);
+  return function->IsOptimized() ? Smi::FromInt(1)   // 1 == "yes".
+                                 : Smi::FromInt(2);  // 2 == "no".
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationCount) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(JSFunction, function, 0);
+  return Smi::FromInt(function->shared()->opt_count());
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
@@ -7689,8 +7898,8 @@
 }
 
 
-static JSObject* ComputeReceiverForNonGlobal(Isolate* isolate,
-                                             JSObject* holder) {
+static Object* ComputeReceiverForNonGlobal(Isolate* isolate,
+                                           JSObject* holder) {
   ASSERT(!holder->IsGlobalObject());
   Context* top = isolate->context();
   // Get the context extension function.
@@ -7702,10 +7911,11 @@
   // explicitly via a with-statement.
   Object* constructor = holder->map()->constructor();
   if (constructor != context_extension_function) return holder;
-  // Fall back to using the global object as the receiver if the
-  // property turns out to be a local variable allocated in a context
-  // extension object - introduced via eval.
-  return top->global()->global_receiver();
+  // Fall back to using the global object as the implicit receiver if
+  // the property turns out to be a local variable allocated in a
+  // context extension object - introduced via eval. Implicit global
+  // receivers are indicated with the hole value.
+  return isolate->heap()->the_hole_value();
 }
 
 
@@ -7733,30 +7943,38 @@
     // If the "property" we were looking for is a local variable or an
     // argument in a context, the receiver is the global object; see
     // ECMA-262, 3rd., 10.1.6 and 10.2.3.
-    JSObject* receiver =
-        isolate->context()->global()->global_receiver();
+    //
+    // Use the hole as the receiver to signal that the receiver is
+    // implicit and that the global receiver should be used.
+    Handle<Object> receiver = isolate->factory()->the_hole_value();
     MaybeObject* value = (holder->IsContext())
         ? Context::cast(*holder)->get(index)
         : JSObject::cast(*holder)->GetElement(index);
-    return MakePair(Unhole(isolate->heap(), value, attributes), receiver);
+    return MakePair(Unhole(isolate->heap(), value, attributes), *receiver);
   }
 
   // If the holder is found, we read the property from it.
   if (!holder.is_null() && holder->IsJSObject()) {
     ASSERT(Handle<JSObject>::cast(holder)->HasProperty(*name));
     JSObject* object = JSObject::cast(*holder);
-    JSObject* receiver;
+    Object* receiver;
     if (object->IsGlobalObject()) {
       receiver = GlobalObject::cast(object)->global_receiver();
     } else if (context->is_exception_holder(*holder)) {
-      receiver = isolate->context()->global()->global_receiver();
+      // Use the hole as the receiver to signal that the receiver is
+      // implicit and that the global receiver should be used.
+      receiver = isolate->heap()->the_hole_value();
     } else {
       receiver = ComputeReceiverForNonGlobal(isolate, object);
     }
+
+    // GetProperty below can cause GC.
+    Handle<Object> receiver_handle(receiver);
+
     // No need to unhole the value here. This is taken care of by the
     // GetProperty function.
     MaybeObject* value = object->GetProperty(*name);
-    return MakePair(value, receiver);
+    return MakePair(value, *receiver_handle);
   }
 
   if (throw_error) {
@@ -7766,7 +7984,7 @@
                                               HandleVector(&name, 1));
     return MakePair(isolate->Throw(*reference_error), NULL);
   } else {
-    // The property doesn't exist - return undefined
+    // The property doesn't exist - return undefined.
     return MakePair(isolate->heap()->undefined_value(),
                     isolate->heap()->undefined_value());
   }
@@ -8150,13 +8368,41 @@
 }
 
 
+bool CodeGenerationFromStringsAllowed(Isolate* isolate,
+                                      Handle<Context> context) {
+  if (context->allow_code_gen_from_strings()->IsFalse()) {
+    // Check with callback if set.
+    AllowCodeGenerationFromStringsCallback callback =
+        isolate->allow_code_gen_callback();
+    if (callback == NULL) {
+      // No callback set and code generation disallowed.
+      return false;
+    } else {
+      // Callback set. Let it decide if code generation is allowed.
+      VMState state(isolate, EXTERNAL);
+      return callback(v8::Utils::ToLocal(context));
+    }
+  }
+  return true;
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
   HandleScope scope(isolate);
   ASSERT_EQ(1, args.length());
   CONVERT_ARG_CHECKED(String, source, 0);
 
-  // Compile source string in the global context.
+  // Extract global context.
   Handle<Context> context(isolate->context()->global_context());
+
+  // Check if global context allows code generation from
+  // strings. Throw an exception if it doesn't.
+  if (!CodeGenerationFromStringsAllowed(isolate, context)) {
+    return isolate->Throw(*isolate->factory()->NewError(
+        "code_gen_from_strings", HandleVector<Object>(NULL, 0)));
+  }
+
+  // Compile source string in the global context.
   Handle<SharedFunctionInfo> shared = Compiler::CompileEval(source,
                                                             context,
                                                             true,
@@ -8174,17 +8420,28 @@
                                     Handle<String> source,
                                     Handle<Object> receiver,
                                     StrictModeFlag strict_mode) {
+  Handle<Context> context = Handle<Context>(isolate->context());
+  Handle<Context> global_context = Handle<Context>(context->global_context());
+
+  // Check if global context allows code generation from
+  // strings. Throw an exception if it doesn't.
+  if (!CodeGenerationFromStringsAllowed(isolate, global_context)) {
+    isolate->Throw(*isolate->factory()->NewError(
+        "code_gen_from_strings", HandleVector<Object>(NULL, 0)));
+    return MakePair(Failure::Exception(), NULL);
+  }
+
   // Deal with a normal eval call with a string argument. Compile it
   // and return the compiled function bound in the local context.
   Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
       source,
       Handle<Context>(isolate->context()),
-      isolate->context()->IsGlobalContext(),
+      context->IsGlobalContext(),
       strict_mode);
   if (shared.is_null()) return MakePair(Failure::Exception(), NULL);
   Handle<JSFunction> compiled =
       isolate->factory()->NewFunctionFromSharedFunctionInfo(
-          shared, Handle<Context>(isolate->context()), NOT_TENURED);
+          shared, context, NOT_TENURED);
   return MakePair(*compiled, *receiver);
 }
 
@@ -8238,12 +8495,8 @@
   if (!context->IsGlobalContext()) {
     // 'eval' is not bound in the global context. Just call the function
     // with the given arguments. This is not necessarily the global eval.
-    if (receiver->IsContext()) {
-      context = Handle<Context>::cast(receiver);
-      receiver = Handle<Object>(context->get(index), isolate);
-    } else if (receiver->IsJSContextExtensionObject()) {
-      receiver = Handle<JSObject>(
-          isolate->context()->global()->global_receiver(), isolate);
+    if (receiver->IsContext() || receiver->IsJSContextExtensionObject()) {
+      receiver = isolate->factory()->the_hole_value();
     }
     return MakePair(*callee, *receiver);
   }
@@ -8252,8 +8505,7 @@
   // Compare it to the builtin 'GlobalEval' function to make sure.
   if (*callee != isolate->global_context()->global_eval_fun() ||
       !args[1]->IsString()) {
-    return MakePair(*callee,
-                    isolate->context()->global()->global_receiver());
+    return MakePair(*callee, isolate->heap()->the_hole_value());
   }
 
   ASSERT(args[3]->IsSmi());
@@ -8275,8 +8527,7 @@
   // Compare it to the builtin 'GlobalEval' function to make sure.
   if (*callee != isolate->global_context()->global_eval_fun() ||
       !args[1]->IsString()) {
-    return MakePair(*callee,
-                    isolate->context()->global()->global_receiver());
+    return MakePair(*callee, isolate->heap()->the_hole_value());
   }
 
   ASSERT(args[3]->IsSmi());
@@ -8597,43 +8848,48 @@
       int dense_elements_length;
       switch (kind) {
         case JSObject::EXTERNAL_PIXEL_ELEMENTS: {
-        dense_elements_length =
-            ExternalPixelArray::cast(object->elements())->length();
+          dense_elements_length =
+              ExternalPixelArray::cast(object->elements())->length();
           break;
         }
         case JSObject::EXTERNAL_BYTE_ELEMENTS: {
-        dense_elements_length =
-            ExternalByteArray::cast(object->elements())->length();
+          dense_elements_length =
+              ExternalByteArray::cast(object->elements())->length();
           break;
         }
         case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
-        dense_elements_length =
-            ExternalUnsignedByteArray::cast(object->elements())->length();
+          dense_elements_length =
+              ExternalUnsignedByteArray::cast(object->elements())->length();
           break;
         }
         case JSObject::EXTERNAL_SHORT_ELEMENTS: {
-        dense_elements_length =
-            ExternalShortArray::cast(object->elements())->length();
+          dense_elements_length =
+              ExternalShortArray::cast(object->elements())->length();
           break;
         }
         case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
-        dense_elements_length =
-            ExternalUnsignedShortArray::cast(object->elements())->length();
+          dense_elements_length =
+              ExternalUnsignedShortArray::cast(object->elements())->length();
           break;
         }
         case JSObject::EXTERNAL_INT_ELEMENTS: {
-        dense_elements_length =
-            ExternalIntArray::cast(object->elements())->length();
+          dense_elements_length =
+              ExternalIntArray::cast(object->elements())->length();
           break;
         }
         case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: {
-        dense_elements_length =
-            ExternalUnsignedIntArray::cast(object->elements())->length();
+          dense_elements_length =
+              ExternalUnsignedIntArray::cast(object->elements())->length();
           break;
         }
         case JSObject::EXTERNAL_FLOAT_ELEMENTS: {
-        dense_elements_length =
-            ExternalFloatArray::cast(object->elements())->length();
+          dense_elements_length =
+              ExternalFloatArray::cast(object->elements())->length();
+          break;
+        }
+        case JSObject::EXTERNAL_DOUBLE_ELEMENTS: {
+          dense_elements_length =
+              ExternalDoubleArray::cast(object->elements())->length();
           break;
         }
         default:
@@ -8767,6 +9023,11 @@
           isolate, receiver, false, false, visitor);
       break;
     }
+    case JSObject::EXTERNAL_DOUBLE_ELEMENTS: {
+      IterateExternalArrayElements<ExternalDoubleArray, double>(
+          isolate, receiver, false, false, visitor);
+      break;
+    }
     default:
       UNREACHABLE();
       break;
@@ -9110,7 +9371,7 @@
       return result->GetConstantFunction();
     case CALLBACKS: {
       Object* structure = result->GetCallbackObject();
-      if (structure->IsProxy() || structure->IsAccessorInfo()) {
+      if (structure->IsForeign() || structure->IsAccessorInfo()) {
         MaybeObject* maybe_value = receiver->GetPropertyWithCallback(
             receiver, structure, name, result->holder());
         if (!maybe_value->ToObject(&value)) {
@@ -10479,7 +10740,7 @@
   // Recursively copy the with contexts.
   Handle<Context> previous(context_chain->previous());
   Handle<JSObject> extension(JSObject::cast(context_chain->extension()));
-  Handle<Context> context = CopyWithContextChain(function_context, previous);
+  Handle<Context> context = CopyWithContextChain(previous, function_context);
   return context->GetIsolate()->factory()->NewWithContext(
       context, extension, context_chain->IsCatchContext());
 }
diff --git a/src/runtime.h b/src/runtime.h
index bf1ba68..d3223d1 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -28,6 +28,7 @@
 #ifndef V8_RUNTIME_H_
 #define V8_RUNTIME_H_
 
+#include "allocation.h"
 #include "zone.h"
 
 namespace v8 {
@@ -86,9 +87,12 @@
   F(NotifyOSR, 0, 1) \
   F(DeoptimizeFunction, 1, 1) \
   F(OptimizeFunctionOnNextCall, 1, 1) \
+  F(GetOptimizationStatus, 1, 1) \
+  F(GetOptimizationCount, 1, 1) \
   F(CompileForOnStackReplacement, 1, 1) \
   F(SetNewFunctionAttributes, 1, 1) \
   F(AllocateInNewSpace, 1, 1) \
+  F(SetES5Flag, 1, 1) \
   \
   /* Array join support */ \
   F(PushIfAbsent, 2, 1) \
@@ -132,6 +136,7 @@
   F(StringAdd, 2, 1) \
   F(StringBuilderConcat, 3, 1) \
   F(StringBuilderJoin, 3, 1) \
+  F(SparseJoinWithSeparator, 3, 1)            \
   \
   /* Bit operations */ \
   F(NumberOr, 2, 1) \
@@ -270,6 +275,9 @@
   F(CreateArrayLiteral, 3, 1) \
   F(CreateArrayLiteralShallow, 3, 1) \
   \
+  /* Harmony proxies */ \
+  F(CreateJSProxy, 2, 1) \
+  \
   /* Catch context extension objects */ \
   F(CreateCatchExtensionObject, 2, 1) \
   \
diff --git a/src/runtime.js b/src/runtime.js
index 66d839b..77b97ae 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -49,41 +49,47 @@
 const $Boolean = global.Boolean;
 const $NaN = 0/0;
 
-
-// ECMA-262, section 11.9.1, page 55.
+// ECMA-262 Section 11.9.3.
 function EQUALS(y) {
   if (IS_STRING(this) && IS_STRING(y)) return %StringEquals(this, y);
   var x = this;
 
-  // NOTE: We use iteration instead of recursion, because it is
-  // difficult to call EQUALS with the correct setting of 'this' in
-  // an efficient way.
   while (true) {
     if (IS_NUMBER(x)) {
-      if (y == null) return 1;  // not equal
-      return %NumberEquals(x, %ToNumber(y));
-    } else if (IS_STRING(x)) {
-      if (IS_STRING(y)) return %StringEquals(x, y);
-      if (IS_NUMBER(y)) return %NumberEquals(%ToNumber(x), y);
-      if (IS_BOOLEAN(y)) return %NumberEquals(%ToNumber(x), %ToNumber(y));
-      if (y == null) return 1;  // not equal
-      y = %ToPrimitive(y, NO_HINT);
-    } else if (IS_BOOLEAN(x)) {
-      if (IS_BOOLEAN(y)) {
-        return %_ObjectEquals(x, y) ? 0 : 1;
+      while (true) {
+        if (IS_NUMBER(y)) return %NumberEquals(x, y);
+        if (IS_NULL_OR_UNDEFINED(y)) return 1;  // not equal
+        if (!IS_SPEC_OBJECT(y)) {
+          // String or boolean.
+          return %NumberEquals(x, %ToNumber(y));
+        }
+        y = %ToPrimitive(y, NO_HINT);
       }
-      if (y == null) return 1;  // not equal
-      return %NumberEquals(%ToNumber(x), %ToNumber(y));
-    } else if (x == null) {
-      // NOTE: This checks for both null and undefined.
-      return (y == null) ? 0 : 1;
+    } else if (IS_STRING(x)) {
+      while (true) {
+        if (IS_STRING(y)) return %StringEquals(x, y);
+        if (IS_NUMBER(y)) return %NumberEquals(%ToNumber(x), y);
+        if (IS_BOOLEAN(y)) return %NumberEquals(%ToNumber(x), %ToNumber(y));
+        if (IS_NULL_OR_UNDEFINED(y)) return 1;  // not equal
+        y = %ToPrimitive(y, NO_HINT);
+      }
+    } else if (IS_BOOLEAN(x)) {
+      if (IS_BOOLEAN(y)) return %_ObjectEquals(x, y) ? 0 : 1;
+      if (IS_NULL_OR_UNDEFINED(y)) return 1;
+      if (IS_NUMBER(y)) return %NumberEquals(%ToNumber(x), y);
+      if (IS_STRING(y)) return %NumberEquals(%ToNumber(x), %ToNumber(y));
+      // y is object.
+      x = %ToNumber(x);
+      y = %ToPrimitive(y, NO_HINT);
+    } else if (IS_NULL_OR_UNDEFINED(x)) {
+      return IS_NULL_OR_UNDEFINED(y) ? 0 : 1;
     } else {
-      // x is not a number, boolean, null or undefined.
-      if (y == null) return 1;  // not equal
+      // x is an object.
       if (IS_SPEC_OBJECT(y)) {
         return %_ObjectEquals(x, y) ? 0 : 1;
       }
-
+      if (IS_NULL_OR_UNDEFINED(y)) return 1;  // not equal
+      if (IS_BOOLEAN(y)) y = %ToNumber(y);
       x = %ToPrimitive(x, NO_HINT);
     }
   }
@@ -638,6 +644,6 @@
 // NOTE: Setting the prototype for Array must take place as early as
 // possible due to code generation for array literals.  When
 // generating code for a array literal a boilerplate array is created
-// that is cloned when running the code.  It is essiential that the
+// that is cloned when running the code.  It is essential that the
 // boilerplate gets the right prototype.
 %FunctionSetPrototype($Array, new $Array(0));
diff --git a/src/safepoint-table.h b/src/safepoint-table.h
index 084a0b4..de537f9 100644
--- a/src/safepoint-table.h
+++ b/src/safepoint-table.h
@@ -28,6 +28,7 @@
 #ifndef V8_SAFEPOINT_TABLE_H_
 #define V8_SAFEPOINT_TABLE_H_
 
+#include "allocation.h"
 #include "heap.h"
 #include "v8memory.h"
 #include "zone.h"
diff --git a/src/scanner-base.cc b/src/scanner-base.cc
index 9715ca9..e15ef41 100644
--- a/src/scanner-base.cc
+++ b/src/scanner-base.cc
@@ -38,8 +38,7 @@
 // Scanner
 
 Scanner::Scanner(UnicodeCache* unicode_cache)
-    : unicode_cache_(unicode_cache),
-      octal_pos_(kNoOctalLocation) { }
+    : unicode_cache_(unicode_cache) { }
 
 
 uc32 Scanner::ScanHexEscape(uc32 c, int length) {
@@ -70,34 +69,12 @@
 }
 
 
-// Octal escapes of the forms '\0xx' and '\xxx' are not a part of
-// ECMA-262. Other JS VMs support them.
-uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
-  uc32 x = c - '0';
-  int i = 0;
-  for (; i < length; i++) {
-    int d = c0_ - '0';
-    if (d < 0 || d > 7) break;
-    int nx = x * 8 + d;
-    if (nx >= 256) break;
-    x = nx;
-    Advance();
-  }
-  // Anything excelt '\0' is an octal escape sequence, illegal in strict mode.
-  // Remember the position of octal escape sequences so that better error
-  // can be reported later (in strict mode).
-  if (c != '0' || i > 0) {
-    octal_pos_ = source_pos() - i - 1;     // Already advanced
-  }
-  return x;
-}
-
 
 // ----------------------------------------------------------------------------
 // JavaScriptScanner
 
 JavaScriptScanner::JavaScriptScanner(UnicodeCache* scanner_contants)
-    : Scanner(scanner_contants) { }
+  : Scanner(scanner_contants), octal_pos_(Location::invalid()) { }
 
 
 Token::Value JavaScriptScanner::Next() {
@@ -518,6 +495,31 @@
 }
 
 
+// Octal escapes of the forms '\0xx' and '\xxx' are not a part of
+// ECMA-262. Other JS VMs support them.
+uc32 JavaScriptScanner::ScanOctalEscape(uc32 c, int length) {
+  uc32 x = c - '0';
+  int i = 0;
+  for (; i < length; i++) {
+    int d = c0_ - '0';
+    if (d < 0 || d > 7) break;
+    int nx = x * 8 + d;
+    if (nx >= 256) break;
+    x = nx;
+    Advance();
+  }
+  // Anything except '\0' is an octal escape sequence, illegal in strict mode.
+  // Remember the position of octal escape sequences so that an error
+  // can be reported later (in strict mode).
+  // We don't report the error immediately, because the octal escape can
+  // occur before the "use strict" directive.
+  if (c != '0' || i > 0) {
+    octal_pos_ = Location(source_pos() - i - 1, source_pos() - 1);
+  }
+  return x;
+}
+
+
 Token::Value JavaScriptScanner::ScanString() {
   uc32 quote = c0_;
   Advance();  // consume quote
@@ -562,6 +564,7 @@
   } else {
     // if the first character is '0' we must check for octals and hex
     if (c0_ == '0') {
+      int start_pos = source_pos();  // For reporting octal positions.
       AddLiteralCharAdvance();
 
       // either 0, 0exxx, 0Exxx, 0.xxx, an octal number, or a hex number
@@ -586,7 +589,7 @@
           }
           if (c0_  < '0' || '7'  < c0_) {
             // Octal literal finished.
-            octal_pos_ = next_.location.beg_pos;
+            octal_pos_ = Location(start_pos, source_pos());
             break;
           }
           AddLiteralCharAdvance();
@@ -729,6 +732,9 @@
       // worrying whether the following characters are part of the escape
       // or not, since any '/', '\\' or '[' is guaranteed to not be part
       // of the escape sequence.
+
+      // TODO(896): At some point, parse RegExps more throughly to capture
+      // octal esacpes in strict mode.
     } else {  // Unescaped character.
       if (c0_ == '[') in_character_class = true;
       if (c0_ == ']') in_character_class = false;
diff --git a/src/scanner-base.h b/src/scanner-base.h
index 60b97d2..02566dd 100644
--- a/src/scanner-base.h
+++ b/src/scanner-base.h
@@ -30,14 +30,13 @@
 #ifndef V8_SCANNER_BASE_H_
 #define V8_SCANNER_BASE_H_
 
-#include "globals.h"
-#include "checks.h"
 #include "allocation.h"
+#include "char-predicates.h"
+#include "checks.h"
+#include "globals.h"
 #include "token.h"
 #include "unicode-inl.h"
-#include "char-predicates.h"
 #include "utils.h"
-#include "list-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -287,23 +286,17 @@
       return beg_pos >= 0 && end_pos >= beg_pos;
     }
 
+    static Location invalid() { return Location(-1, -1); }
+
     int beg_pos;
     int end_pos;
   };
 
-  static Location NoLocation() {
-    return Location(-1, -1);
-  }
-
   // Returns the location information for the current token
   // (the token returned by Next()).
   Location location() const { return current_.location; }
   Location peek_location() const { return next_.location; }
 
-  // Returns the location of the last seen octal literal
-  int octal_position() const { return octal_pos_; }
-  void clear_octal_position() { octal_pos_ = -1; }
-
   // Returns the literal string, if any, for the current token (the
   // token returned by Next()). The string is 0-terminated and in
   // UTF-8 format; they may contain 0-characters. Literal strings are
@@ -327,6 +320,16 @@
     return current_.literal_chars->length();
   }
 
+  bool literal_contains_escapes() const {
+    Location location = current_.location;
+    int source_length = (location.end_pos - location.beg_pos);
+    if (current_.token == Token::STRING) {
+      // Subtract delimiters.
+      source_length -= 2;
+    }
+    return current_.literal_chars->length() != source_length;
+  }
+
   // Returns the literal string for the next token (the token that
   // would be returned if Next() were called).
   bool is_next_literal_ascii() {
@@ -418,9 +421,6 @@
 
   uc32 ScanHexEscape(uc32 c, int length);
 
-  // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
-  uc32 ScanOctalEscape(uc32 c, int length);
-
   // Return the current source position.
   int source_pos() {
     return source_->pos() - kCharacterLookaheadBufferSize;
@@ -438,9 +438,6 @@
   // Input stream. Must be initialized to an UC16CharacterStream.
   UC16CharacterStream* source_;
 
-  // Start position of the octal literal last scanned.
-  int octal_pos_;
-
   // One Unicode character look-ahead; c0_ < 0 at the end of the input.
   uc32 c0_;
 };
@@ -493,6 +490,13 @@
   // Used for checking if a property name is an identifier.
   static bool IsIdentifier(unibrow::CharacterStream* buffer);
 
+  // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
+  uc32 ScanOctalEscape(uc32 c, int length);
+
+  // Returns the location of the last seen octal literal
+  Location octal_position() const { return octal_pos_; }
+  void clear_octal_position() { octal_pos_ = Location::invalid(); }
+
   // Seek forward to the given position.  This operation does not
   // work in general, for instance when there are pushed back
   // characters, but works for seeking forward until simple delimiter
@@ -522,6 +526,9 @@
   // If the escape sequence cannot be decoded the result is kBadChar.
   uc32 ScanIdentifierUnicodeEscape();
 
+  // Start position of the octal literal last scanned.
+  Location octal_pos_;
+
   bool has_line_terminator_before_next_;
 };
 
diff --git a/src/scanner.cc b/src/scanner.cc
index 666818e..21a0c2d 100755
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -342,244 +342,4 @@
 }
 
 
-// ----------------------------------------------------------------------------
-// JsonScanner
-
-JsonScanner::JsonScanner(UnicodeCache* unicode_cache)
-    : Scanner(unicode_cache) { }
-
-
-void JsonScanner::Initialize(UC16CharacterStream* source) {
-  source_ = source;
-  Init();
-  // Skip initial whitespace.
-  SkipJsonWhiteSpace();
-  // Preload first token as look-ahead.
-  ScanJson();
-}
-
-
-Token::Value JsonScanner::Next() {
-  // BUG 1215673: Find a thread safe way to set a stack limit in
-  // pre-parse mode. Otherwise, we cannot safely pre-parse from other
-  // threads.
-  current_ = next_;
-  // Check for stack-overflow before returning any tokens.
-  ScanJson();
-  return current_.token;
-}
-
-
-bool JsonScanner::SkipJsonWhiteSpace() {
-  int start_position = source_pos();
-  // JSON WhiteSpace is tab, carrige-return, newline and space.
-  while (c0_ == ' ' || c0_ == '\n' || c0_ == '\r' || c0_ == '\t') {
-    Advance();
-  }
-  return source_pos() != start_position;
-}
-
-
-void JsonScanner::ScanJson() {
-  next_.literal_chars = NULL;
-  Token::Value token;
-  do {
-    // Remember the position of the next token
-    next_.location.beg_pos = source_pos();
-    switch (c0_) {
-      case '\t':
-      case '\r':
-      case '\n':
-      case ' ':
-        Advance();
-        token = Token::WHITESPACE;
-        break;
-      case '{':
-        Advance();
-        token = Token::LBRACE;
-        break;
-      case '}':
-        Advance();
-        token = Token::RBRACE;
-        break;
-      case '[':
-        Advance();
-        token = Token::LBRACK;
-        break;
-      case ']':
-        Advance();
-        token = Token::RBRACK;
-        break;
-      case ':':
-        Advance();
-        token = Token::COLON;
-        break;
-      case ',':
-        Advance();
-        token = Token::COMMA;
-        break;
-      case '"':
-        token = ScanJsonString();
-        break;
-      case '-':
-      case '0':
-      case '1':
-      case '2':
-      case '3':
-      case '4':
-      case '5':
-      case '6':
-      case '7':
-      case '8':
-      case '9':
-        token = ScanJsonNumber();
-        break;
-      case 't':
-        token = ScanJsonIdentifier("true", Token::TRUE_LITERAL);
-        break;
-      case 'f':
-        token = ScanJsonIdentifier("false", Token::FALSE_LITERAL);
-        break;
-      case 'n':
-        token = ScanJsonIdentifier("null", Token::NULL_LITERAL);
-        break;
-      default:
-        if (c0_ < 0) {
-          Advance();
-          token = Token::EOS;
-        } else {
-          Advance();
-          token = Select(Token::ILLEGAL);
-        }
-    }
-  } while (token == Token::WHITESPACE);
-
-  next_.location.end_pos = source_pos();
-  next_.token = token;
-}
-
-
-Token::Value JsonScanner::ScanJsonString() {
-  ASSERT_EQ('"', c0_);
-  Advance();
-  LiteralScope literal(this);
-  while (c0_ != '"') {
-    // Check for control character (0x00-0x1f) or unterminated string (<0).
-    if (c0_ < 0x20) return Token::ILLEGAL;
-    if (c0_ != '\\') {
-      AddLiteralCharAdvance();
-    } else {
-      Advance();
-      switch (c0_) {
-        case '"':
-        case '\\':
-        case '/':
-          AddLiteralChar(c0_);
-          break;
-        case 'b':
-          AddLiteralChar('\x08');
-          break;
-        case 'f':
-          AddLiteralChar('\x0c');
-          break;
-        case 'n':
-          AddLiteralChar('\x0a');
-          break;
-        case 'r':
-          AddLiteralChar('\x0d');
-          break;
-        case 't':
-          AddLiteralChar('\x09');
-          break;
-        case 'u': {
-          uc32 value = 0;
-          for (int i = 0; i < 4; i++) {
-            Advance();
-            int digit = HexValue(c0_);
-            if (digit < 0) {
-              return Token::ILLEGAL;
-            }
-            value = value * 16 + digit;
-          }
-          AddLiteralChar(value);
-          break;
-        }
-        default:
-          return Token::ILLEGAL;
-      }
-      Advance();
-    }
-  }
-  literal.Complete();
-  Advance();
-  return Token::STRING;
-}
-
-
-Token::Value JsonScanner::ScanJsonNumber() {
-  LiteralScope literal(this);
-  bool negative = false;
-
-  if (c0_ == '-') {
-    AddLiteralCharAdvance();
-    negative = true;
-  }
-  if (c0_ == '0') {
-    AddLiteralCharAdvance();
-    // Prefix zero is only allowed if it's the only digit before
-    // a decimal point or exponent.
-    if ('0' <= c0_ && c0_ <= '9') return Token::ILLEGAL;
-  } else {
-    int i = 0;
-    int digits = 0;
-    if (c0_ < '1' || c0_ > '9') return Token::ILLEGAL;
-    do {
-      i = i * 10 + c0_ - '0';
-      digits++;
-      AddLiteralCharAdvance();
-    } while (c0_ >= '0' && c0_ <= '9');
-    if (c0_ != '.' && c0_ != 'e' && c0_ != 'E' && digits < 10) {
-      number_ = (negative ? -i : i);
-      return Token::NUMBER;
-    }
-  }
-  if (c0_ == '.') {
-    AddLiteralCharAdvance();
-    if (c0_ < '0' || c0_ > '9') return Token::ILLEGAL;
-    do {
-      AddLiteralCharAdvance();
-    } while (c0_ >= '0' && c0_ <= '9');
-  }
-  if (AsciiAlphaToLower(c0_) == 'e') {
-    AddLiteralCharAdvance();
-    if (c0_ == '-' || c0_ == '+') AddLiteralCharAdvance();
-    if (c0_ < '0' || c0_ > '9') return Token::ILLEGAL;
-    do {
-      AddLiteralCharAdvance();
-    } while (c0_ >= '0' && c0_ <= '9');
-  }
-  literal.Complete();
-  ASSERT_NOT_NULL(next_.literal_chars);
-  number_ = StringToDouble(unicode_cache_,
-                           next_.literal_chars->ascii_literal(),
-                           NO_FLAGS,  // Hex, octal or trailing junk.
-                           OS::nan_value());
-  return Token::NUMBER;
-}
-
-
-Token::Value JsonScanner::ScanJsonIdentifier(const char* text,
-                                             Token::Value token) {
-  LiteralScope literal(this);
-  while (*text != '\0') {
-    if (c0_ != *text) return Token::ILLEGAL;
-    Advance();
-    text++;
-  }
-  if (unicode_cache_->IsIdentifierPart(c0_)) return Token::ILLEGAL;
-  literal.Complete();
-  return token;
-}
-
-
 } }  // namespace v8::internal
diff --git a/src/scanner.h b/src/scanner.h
index 871c69b..804fac8 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -141,56 +141,6 @@
 };
 
 
-class JsonScanner : public Scanner {
- public:
-  explicit JsonScanner(UnicodeCache* unicode_cache);
-
-  void Initialize(UC16CharacterStream* source);
-
-  // Returns the next token.
-  Token::Value Next();
-
-  // Returns the value of a number token.
-  double number() {
-    return number_;
-  }
-
-
- protected:
-  // Skip past JSON whitespace (only space, tab, newline and carrige-return).
-  bool SkipJsonWhiteSpace();
-
-  // Scan a single JSON token. The JSON lexical grammar is specified in the
-  // ECMAScript 5 standard, section 15.12.1.1.
-  // Recognizes all of the single-character tokens directly, or calls a function
-  // to scan a number, string or identifier literal.
-  // The only allowed whitespace characters between tokens are tab,
-  // carriage-return, newline and space.
-  void ScanJson();
-
-  // A JSON number (production JSONNumber) is a subset of the valid JavaScript
-  // decimal number literals.
-  // It includes an optional minus sign, must have at least one
-  // digit before and after a decimal point, may not have prefixed zeros (unless
-  // the integer part is zero), and may include an exponent part (e.g., "e-10").
-  // Hexadecimal and octal numbers are not allowed.
-  Token::Value ScanJsonNumber();
-
-  // A JSON string (production JSONString) is subset of valid JavaScript string
-  // literals. The string must only be double-quoted (not single-quoted), and
-  // the only allowed backslash-escapes are ", /, \, b, f, n, r, t and
-  // four-digit hex escapes (uXXXX). Any other use of backslashes is invalid.
-  Token::Value ScanJsonString();
-
-  // Used to recognizes one of the literals "true", "false", or "null". These
-  // are the only valid JSON identifiers (productions JSONBooleanLiteral,
-  // JSONNullLiteral).
-  Token::Value ScanJsonIdentifier(const char* text, Token::Value token);
-
-  // Holds the value of a scanned number token.
-  double number_;
-};
-
 } }  // namespace v8::internal
 
 #endif  // V8_SCANNER_H_
diff --git a/src/scopeinfo.cc b/src/scopeinfo.cc
index 58e2ad2..ccc2cc8 100644
--- a/src/scopeinfo.cc
+++ b/src/scopeinfo.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -52,6 +52,7 @@
 ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
     : function_name_(FACTORY->empty_symbol()),
       calls_eval_(scope->calls_eval()),
+      is_strict_mode_(scope->is_strict_mode()),
       parameters_(scope->num_parameters()),
       stack_slots_(scope->num_stack_slots()),
       context_slots_(scope->num_heap_slots()),
@@ -248,6 +249,7 @@
     Object** p = p0;
     p = ReadSymbol(p, &function_name_);
     p = ReadBool(p, &calls_eval_);
+    p = ReadBool(p, &is_strict_mode_);
     p = ReadList<Allocator>(p, &context_slots_, &context_modes_);
     p = ReadList<Allocator>(p, &parameters_);
     p = ReadList<Allocator>(p, &stack_slots_);
@@ -301,8 +303,8 @@
 
 template<class Allocator>
 Handle<SerializedScopeInfo> ScopeInfo<Allocator>::Serialize() {
-  // function name, calls eval, length for 3 tables:
-  const int extra_slots = 1 + 1 + 3;
+  // function name, calls eval, is_strict_mode, length for 3 tables:
+  const int extra_slots = 1 + 1 + 1 + 3;
   int length = extra_slots +
                context_slots_.length() * 2 +
                parameters_.length() +
@@ -316,6 +318,7 @@
   Object** p = p0;
   p = WriteSymbol(p, function_name_);
   p = WriteBool(p, calls_eval_);
+  p = WriteBool(p, is_strict_mode_);
   p = WriteList(p, &context_slots_, &context_modes_);
   p = WriteList(p, &parameters_);
   p = WriteList(p, &stack_slots_);
@@ -363,7 +366,8 @@
 
 Object** SerializedScopeInfo::ContextEntriesAddr() {
   ASSERT(length() > 0);
-  return data_start() + 2;  // +2 for function name and calls eval.
+  // +3 for function name, calls eval, strict mode.
+  return data_start() + 3;
 }
 
 
@@ -392,7 +396,18 @@
     p = ReadBool(p, &calls_eval);
     return calls_eval;
   }
-  return true;
+  return false;
+}
+
+
+bool SerializedScopeInfo::IsStrictMode() {
+  if (length() > 0) {
+    Object** p = data_start() + 2;  // +2 for function name, calls eval.
+    bool strict_mode;
+    p = ReadBool(p, &strict_mode);
+    return strict_mode;
+  }
+  return false;
 }
 
 
diff --git a/src/scopeinfo.h b/src/scopeinfo.h
index 2552af2..ff72013 100644
--- a/src/scopeinfo.h
+++ b/src/scopeinfo.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,7 @@
 #ifndef V8_SCOPEINFO_H_
 #define V8_SCOPEINFO_H_
 
+#include "allocation.h"
 #include "variables.h"
 #include "zone-inl.h"
 
@@ -92,6 +93,7 @@
  private:
   Handle<String> function_name_;
   bool calls_eval_;
+  bool is_strict_mode_;
   List<Handle<String>, Allocator > parameters_;
   List<Handle<String>, Allocator > stack_slots_;
   List<Handle<String>, Allocator > context_slots_;
@@ -112,6 +114,9 @@
   // Does this scope call eval?
   bool CallsEval();
 
+  // Is this scope a strict mode scope?
+  bool IsStrictMode();
+
   // Does this scope have an arguments shadow?
   bool HasArgumentsShadow() {
     return StackSlotIndex(GetHeap()->arguments_shadow_symbol()) >= 0;
diff --git a/src/scopes.cc b/src/scopes.cc
index 8df93c5..6102442 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -199,6 +199,7 @@
   // Inherit the strict mode from the parent scope.
   strict_mode_ = (outer_scope != NULL) && outer_scope->strict_mode_;
   outer_scope_calls_eval_ = false;
+  outer_scope_calls_non_strict_eval_ = false;
   inner_scope_calls_eval_ = false;
   outer_scope_is_eval_scope_ = false;
   force_eager_compilation_ = false;
@@ -489,8 +490,17 @@
   // and assume they may invoke eval themselves. Eventually we could capture
   // this information in the ScopeInfo and then use it here (by traversing
   // the call chain stack, at compile time).
+
   bool eval_scope = is_eval_scope();
-  PropagateScopeInfo(eval_scope, eval_scope);
+  bool outer_scope_calls_eval = false;
+  bool outer_scope_calls_non_strict_eval = false;
+  if (!is_global_scope()) {
+    context->ComputeEvalScopeInfo(&outer_scope_calls_eval,
+                                  &outer_scope_calls_non_strict_eval);
+  }
+  PropagateScopeInfo(outer_scope_calls_eval,
+                     outer_scope_calls_non_strict_eval,
+                     eval_scope);
 
   // 2) Resolve variables.
   Scope* global_scope = NULL;
@@ -622,10 +632,14 @@
   if (HasTrivialOuterContext()) {
     Indent(n1, "// scope has trivial outer context\n");
   }
+  if (is_strict_mode()) Indent(n1, "// strict mode scope\n");
   if (scope_inside_with_) Indent(n1, "// scope inside 'with'\n");
   if (scope_contains_with_) Indent(n1, "// scope contains 'with'\n");
   if (scope_calls_eval_) Indent(n1, "// scope calls 'eval'\n");
   if (outer_scope_calls_eval_) Indent(n1, "// outer scope calls 'eval'\n");
+  if (outer_scope_calls_non_strict_eval_) {
+    Indent(n1, "// outer scope calls 'eval' in non-strict context\n");
+  }
   if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n");
   if (outer_scope_is_eval_scope_) {
     Indent(n1, "// outer scope is 'eval' scope\n");
@@ -852,20 +866,30 @@
 
 
 bool Scope::PropagateScopeInfo(bool outer_scope_calls_eval,
+                               bool outer_scope_calls_non_strict_eval,
                                bool outer_scope_is_eval_scope) {
   if (outer_scope_calls_eval) {
     outer_scope_calls_eval_ = true;
   }
 
+  if (outer_scope_calls_non_strict_eval) {
+    outer_scope_calls_non_strict_eval_ = true;
+  }
+
   if (outer_scope_is_eval_scope) {
     outer_scope_is_eval_scope_ = true;
   }
 
   bool calls_eval = scope_calls_eval_ || outer_scope_calls_eval_;
   bool is_eval = is_eval_scope() || outer_scope_is_eval_scope_;
+  bool calls_non_strict_eval =
+      (scope_calls_eval_ && !is_strict_mode()) ||
+      outer_scope_calls_non_strict_eval_;
   for (int i = 0; i < inner_scopes_.length(); i++) {
     Scope* inner_scope = inner_scopes_[i];
-    if (inner_scope->PropagateScopeInfo(calls_eval, is_eval)) {
+    if (inner_scope->PropagateScopeInfo(calls_eval,
+                                        calls_non_strict_eval,
+                                        is_eval)) {
       inner_scope_calls_eval_ = true;
     }
     if (inner_scope->force_eager_compilation_) {
diff --git a/src/scopes.h b/src/scopes.h
index a0e56a4..faa6fd9 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -218,10 +218,16 @@
   bool is_function_scope() const { return type_ == FUNCTION_SCOPE; }
   bool is_global_scope() const { return type_ == GLOBAL_SCOPE; }
   bool is_strict_mode() const { return strict_mode_; }
+  bool is_strict_mode_eval_scope() const {
+    return is_eval_scope() && is_strict_mode();
+  }
 
   // Information about which scopes calls eval.
   bool calls_eval() const { return scope_calls_eval_; }
   bool outer_scope_calls_eval() const { return outer_scope_calls_eval_; }
+  bool outer_scope_calls_non_strict_eval() const {
+    return outer_scope_calls_non_strict_eval_;
+  }
 
   // Is this scope inside a with statement.
   bool inside_with() const { return scope_inside_with_; }
@@ -379,6 +385,7 @@
 
   // Computed via PropagateScopeInfo.
   bool outer_scope_calls_eval_;
+  bool outer_scope_calls_non_strict_eval_;
   bool inner_scope_calls_eval_;
   bool outer_scope_is_eval_scope_;
   bool force_eager_compilation_;
@@ -410,6 +417,7 @@
 
   // Scope analysis.
   bool PropagateScopeInfo(bool outer_scope_calls_eval,
+                          bool outer_scope_calls_non_strict_eval,
                           bool outer_scope_is_eval_scope);
   bool HasTrivialContext() const;
 
diff --git a/src/serialize.cc b/src/serialize.cc
index 12e9613..a64fba3 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,6 +29,7 @@
 
 #include "accessors.h"
 #include "api.h"
+#include "bootstrapper.h"
 #include "execution.h"
 #include "global-handles.h"
 #include "ic-inl.h"
@@ -38,7 +39,6 @@
 #include "serialize.h"
 #include "stub-cache.h"
 #include "v8threads.h"
-#include "bootstrapper.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/serialize.h b/src/serialize.h
index 07c0a25..d83722d 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -148,7 +148,7 @@
 // This only works for objects in the first page of a space.  Don't use this for
 // things in newspace since it bypasses the write barrier.
 
-RLYSTC const int k64 = (sizeof(uintptr_t) - 4) / 4;
+static const int k64 = (sizeof(uintptr_t) - 4) / 4;
 
 #define COMMON_REFERENCE_PATTERNS(f)                               \
   f(kNumberOfSpaces, 2, (11 - k64))                                \
@@ -181,8 +181,8 @@
 // both.
 class SerializerDeserializer: public ObjectVisitor {
  public:
-  RLYSTC void Iterate(ObjectVisitor* visitor);
-  RLYSTC void SetSnapshotCacheSize(int size);
+  static void Iterate(ObjectVisitor* visitor);
+  static void SetSnapshotCacheSize(int size);
 
  protected:
   // Where the pointed-to object can be found:
@@ -220,34 +220,34 @@
 
   // Misc.
   // Raw data to be copied from the snapshot.
-  RLYSTC const int kRawData = 0x30;
+  static const int kRawData = 0x30;
   // Some common raw lengths: 0x31-0x3f
   // A tag emitted at strategic points in the snapshot to delineate sections.
   // If the deserializer does not find these at the expected moments then it
   // is an indication that the snapshot and the VM do not fit together.
   // Examine the build process for architecture, version or configuration
   // mismatches.
-  RLYSTC const int kSynchronize = 0x70;
+  static const int kSynchronize = 0x70;
   // Used for the source code of the natives, which is in the executable, but
   // is referred to from external strings in the snapshot.
-  RLYSTC const int kNativesStringResource = 0x71;
-  RLYSTC const int kNewPage = 0x72;
+  static const int kNativesStringResource = 0x71;
+  static const int kNewPage = 0x72;
   // 0x73-0x7f                            Free.
   // 0xb0-0xbf                            Free.
   // 0xf0-0xff                            Free.
 
 
-  RLYSTC const int kLargeData = LAST_SPACE;
-  RLYSTC const int kLargeCode = kLargeData + 1;
-  RLYSTC const int kLargeFixedArray = kLargeCode + 1;
-  RLYSTC const int kNumberOfSpaces = kLargeFixedArray + 1;
-  RLYSTC const int kAnyOldSpace = -1;
+  static const int kLargeData = LAST_SPACE;
+  static const int kLargeCode = kLargeData + 1;
+  static const int kLargeFixedArray = kLargeCode + 1;
+  static const int kNumberOfSpaces = kLargeFixedArray + 1;
+  static const int kAnyOldSpace = -1;
 
   // A bitmask for getting the space out of an instruction.
-  RLYSTC const int kSpaceMask = 15;
+  static const int kSpaceMask = 15;
 
-  RLYSTC inline bool SpaceIsLarge(int space) { return space >= kLargeData; }
-  RLYSTC inline bool SpaceIsPaged(int space) {
+  static inline bool SpaceIsLarge(int space) { return space >= kLargeData; }
+  static inline bool SpaceIsPaged(int space) {
     return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE;
   }
 };
@@ -380,19 +380,19 @@
   }
 
  private:
-  RLYSTC bool SerializationMatchFun(void* key1, void* key2) {
+  static bool SerializationMatchFun(void* key1, void* key2) {
     return key1 == key2;
   }
 
-  RLYSTC uint32_t Hash(HeapObject* obj) {
+  static uint32_t Hash(HeapObject* obj) {
     return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
   }
 
-  RLYSTC void* Key(HeapObject* obj) {
+  static void* Key(HeapObject* obj) {
     return reinterpret_cast<void*>(obj->address());
   }
 
-  RLYSTC void* Value(int v) {
+  static void* Value(int v) {
     return reinterpret_cast<void*>(v);
   }
 
@@ -403,7 +403,7 @@
 
 
 // There can be only one serializer per V8 process.
-STATIC_CLASS Serializer : public SerializerDeserializer {
+class Serializer : public SerializerDeserializer {
  public:
   explicit Serializer(SnapshotByteSink* sink);
   ~Serializer();
@@ -415,25 +415,25 @@
     return fullness_[space];
   }
 
-  RLYSTC void Enable() {
+  static void Enable() {
     if (!serialization_enabled_) {
       ASSERT(!too_late_to_enable_now_);
     }
     serialization_enabled_ = true;
   }
 
-  RLYSTC void Disable() { serialization_enabled_ = false; }
+  static void Disable() { serialization_enabled_ = false; }
   // Call this when you have made use of the fact that there is no serialization
   // going on.
-  RLYSTC void TooLateToEnableNow() { too_late_to_enable_now_ = true; }
-  RLYSTC bool enabled() { return serialization_enabled_; }
+  static void TooLateToEnableNow() { too_late_to_enable_now_ = true; }
+  static bool enabled() { return serialization_enabled_; }
   SerializationAddressMapper* address_mapper() { return &address_mapper_; }
 #ifdef DEBUG
   virtual void Synchronize(const char* tag);
 #endif
 
  protected:
-  RLYSTC const int kInvalidRootIndex = -1;
+  static const int kInvalidRootIndex = -1;
   virtual int RootIndex(HeapObject* heap_object) = 0;
   virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0;
 
@@ -488,11 +488,11 @@
   // object space it may return kLargeCode or kLargeFixedArray in order
   // to indicate to the deserializer what kind of large object allocation
   // to make.
-  RLYSTC int SpaceOfObject(HeapObject* object);
+  static int SpaceOfObject(HeapObject* object);
   // This just returns the space of the object.  It will return LO_SPACE
   // for all large objects since you can't check the type of the object
   // once the map has been used for the serialization address.
-  RLYSTC int SpaceOfAlreadySerializedObject(HeapObject* object);
+  static int SpaceOfAlreadySerializedObject(HeapObject* object);
   int Allocate(int space, int size, bool* new_page_started);
   int EncodeExternalReference(Address addr) {
     return external_reference_encoder_->Encode(addr);
@@ -506,9 +506,9 @@
   SnapshotByteSink* sink_;
   int current_root_index_;
   ExternalReferenceEncoder* external_reference_encoder_;
-  RLYSTC bool serialization_enabled_;
+  static bool serialization_enabled_;
   // Did we already make use of the fact that serialization was not enabled?
-  RLYSTC bool too_late_to_enable_now_;
+  static bool too_late_to_enable_now_;
   int large_object_total_;
   SerializationAddressMapper address_mapper_;
 
diff --git a/src/snapshot-common.cc b/src/snapshot-common.cc
index 7f82895..ef89a5e 100644
--- a/src/snapshot-common.cc
+++ b/src/snapshot-common.cc
@@ -53,7 +53,7 @@
     DeleteArray(str);
     return true;
   } else if (size_ > 0) {
-    Deserialize(data_, size_);
+    Deserialize(raw_data_, raw_size_);
     return true;
   }
   return false;
@@ -71,7 +71,8 @@
                      map_space_used_,
                      cell_space_used_,
                      large_space_used_);
-  SnapshotByteSource source(context_data_, context_size_);
+  SnapshotByteSource source(context_raw_data_,
+                            context_raw_size_);
   Deserializer deserializer(&source);
   Object* root;
   deserializer.DeserializePartial(&root);
diff --git a/src/snapshot-empty.cc b/src/snapshot-empty.cc
index cb26eb8..0b35720 100644
--- a/src/snapshot-empty.cc
+++ b/src/snapshot-empty.cc
@@ -35,9 +35,13 @@
 namespace internal {
 
 const byte Snapshot::data_[] = { 0 };
+const byte* Snapshot::raw_data_ = NULL;
 const int Snapshot::size_ = 0;
+const int Snapshot::raw_size_ = 0;
 const byte Snapshot::context_data_[] = { 0 };
+const byte* Snapshot::context_raw_data_ = NULL;
 const int Snapshot::context_size_ = 0;
+const int Snapshot::context_raw_size_ = 0;
 
 const int Snapshot::new_space_used_ = 0;
 const int Snapshot::pointer_space_used_ = 0;
diff --git a/src/snapshot.h b/src/snapshot.h
index bedd186..4f01a2d 100644
--- a/src/snapshot.h
+++ b/src/snapshot.h
@@ -33,7 +33,7 @@
 namespace v8 {
 namespace internal {
 
-STATIC_CLASS Snapshot {
+class Snapshot {
  public:
   // Initialize the VM from the given snapshot file. If snapshot_file is
   // NULL, use the internal snapshot instead. Returns false if no snapshot
@@ -50,9 +50,25 @@
   // successfully.
   static bool WriteToFile(const char* snapshot_file);
 
+  static const byte* data() { return data_; }
+  static int size() { return size_; }
+  static int raw_size() { return raw_size_; }
+  static void set_raw_data(const byte* raw_data) {
+    raw_data_ = raw_data;
+  }
+  static const byte* context_data() { return context_data_; }
+  static int context_size() { return context_size_; }
+  static int context_raw_size() { return context_raw_size_; }
+  static void set_context_raw_data(
+      const byte* context_raw_data) {
+    context_raw_data_ = context_raw_data;
+  }
+
  private:
   static const byte data_[];
+  static const byte* raw_data_;
   static const byte context_data_[];
+  static const byte* context_raw_data_;
   static const int new_space_used_;
   static const int pointer_space_used_;
   static const int data_space_used_;
@@ -61,7 +77,9 @@
   static const int cell_space_used_;
   static const int large_space_used_;
   static const int size_;
+  static const int raw_size_;
   static const int context_size_;
+  static const int context_raw_size_;
 
   static bool Deserialize(const byte* content, int len);
 
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index 070f970..f5f6654 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -155,7 +155,8 @@
 
 uint32_t Page::GetRegionMaskForSpan(Address start, int length_in_bytes) {
   uint32_t result = 0;
-  if (length_in_bytes >= kPageSize) {
+  static const intptr_t kRegionMask = (1 << kRegionSizeLog2) - 1;
+  if (length_in_bytes + (OffsetFrom(start) & kRegionMask) >= kPageSize) {
     result = kAllRegionsDirtyMarks;
   } else if (length_in_bytes > 0) {
     int start_region = GetRegionNumberForAddress(start);
diff --git a/src/spaces.cc b/src/spaces.cc
index 3db9306..b494d24 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -148,12 +148,12 @@
 // CodeRange
 
 
-CodeRange::CodeRange(Isolate* isolate)
-    : isolate_(isolate),
-      code_range_(NULL),
+CodeRange::CodeRange()
+    : code_range_(NULL),
       free_list_(0),
       allocation_list_(0),
-      current_allocation_block_index_(0) {
+      current_allocation_block_index_(0),
+      isolate_(NULL) {
 }
 
 
@@ -279,9 +279,8 @@
 const int kEstimatedNumberOfChunks = 270;
 
 
-MemoryAllocator::MemoryAllocator(Isolate* isolate)
-    : isolate_(isolate),
-      capacity_(0),
+MemoryAllocator::MemoryAllocator()
+    : capacity_(0),
       capacity_executable_(0),
       size_(0),
       size_executable_(0),
@@ -289,7 +288,8 @@
       chunks_(kEstimatedNumberOfChunks),
       free_chunk_ids_(kEstimatedNumberOfChunks),
       max_nof_chunks_(0),
-      top_(0) {
+      top_(0),
+      isolate_(NULL) {
 }
 
 
@@ -1564,13 +1564,12 @@
       CASE(BUILTIN);
       CASE(LOAD_IC);
       CASE(KEYED_LOAD_IC);
-      CASE(KEYED_EXTERNAL_ARRAY_LOAD_IC);
       CASE(STORE_IC);
       CASE(KEYED_STORE_IC);
-      CASE(KEYED_EXTERNAL_ARRAY_STORE_IC);
       CASE(CALL_IC);
       CASE(KEYED_CALL_IC);
-      CASE(TYPE_RECORDING_BINARY_OP_IC);
+      CASE(UNARY_OP_IC);
+      CASE(BINARY_OP_IC);
       CASE(COMPARE_IC);
     }
   }
diff --git a/src/spaces.h b/src/spaces.h
index f323f85..4024387 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,7 +28,8 @@
 #ifndef V8_SPACES_H_
 #define V8_SPACES_H_
 
-#include "list-inl.h"
+#include "allocation.h"
+#include "list.h"
 #include "log.h"
 
 namespace v8 {
@@ -413,8 +414,6 @@
 // manages a range of virtual memory.
 class CodeRange {
  public:
-  explicit CodeRange(Isolate* isolate);
-
   // Reserves a range of virtual memory, but does not commit any of it.
   // Can only be called once, at heap initialization time.
   // Returns false on failure.
@@ -424,9 +423,9 @@
   // manage it.
   void TearDown();
 
-  bool exists() { return this != NULL && code_range_ != NULL; }
+  bool exists() { return code_range_ != NULL; }
   bool contains(Address address) {
-    if (this == NULL || code_range_ == NULL) return false;
+    if (code_range_ == NULL) return false;
     Address start = static_cast<Address>(code_range_->address());
     return start <= address && address < start + code_range_->size();
   }
@@ -439,7 +438,7 @@
   void FreeRawMemory(void* buf, size_t length);
 
  private:
-  Isolate* isolate_;
+  CodeRange();
 
   // The reserved range of virtual memory that all code objects are put in.
   VirtualMemory* code_range_;
@@ -473,6 +472,10 @@
   static int CompareFreeBlockAddress(const FreeBlock* left,
                                      const FreeBlock* right);
 
+  friend class Isolate;
+
+  Isolate* isolate_;
+
   DISALLOW_COPY_AND_ASSIGN(CodeRange);
 };
 
@@ -503,8 +506,6 @@
 
 class MemoryAllocator {
  public:
-  explicit MemoryAllocator(Isolate* isolate);
-
   // Initializes its internal bookkeeping structures.
   // Max capacity of the total space and executable memory limit.
   bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
@@ -675,11 +676,11 @@
 #endif
 
  private:
+  MemoryAllocator();
+
   static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
   static const int kChunkSizeLog2 = kPagesPerChunkLog2 + kPageSizeBits;
 
-  Isolate* isolate_;
-
   // Maximum space size in bytes.
   intptr_t capacity_;
   // Maximum subset of capacity_ that can be executable
@@ -772,6 +773,10 @@
                            Page* prev,
                            Page** last_page_in_use);
 
+  friend class Isolate;
+
+  Isolate* isolate_;
+
   DISALLOW_COPY_AND_ASSIGN(MemoryAllocator);
 };
 
diff --git a/src/splay-tree.h b/src/splay-tree.h
index c265276..0cb9ea8 100644
--- a/src/splay-tree.h
+++ b/src/splay-tree.h
@@ -28,6 +28,8 @@
 #ifndef V8_SPLAY_TREE_H_
 #define V8_SPLAY_TREE_H_
 
+#include "allocation.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/string.js b/src/string.js
index d8d402c..f24862c 100644
--- a/src/string.js
+++ b/src/string.js
@@ -62,6 +62,10 @@
 
 // ECMA-262, section 15.5.4.4
 function StringCharAt(pos) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.charAt"]);
+  }
   var result = %_StringCharAt(this, pos);
   if (%_IsSmi(result)) {
     result = %_StringCharAt(TO_STRING_INLINE(this), TO_INTEGER(pos));
@@ -72,6 +76,10 @@
 
 // ECMA-262 section 15.5.4.5
 function StringCharCodeAt(pos) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.charCodeAt"]);
+  }
   var result = %_StringCharCodeAt(this, pos);
   if (!%_IsSmi(result)) {
     result = %_StringCharCodeAt(TO_STRING_INLINE(this), TO_INTEGER(pos));
@@ -82,6 +90,9 @@
 
 // ECMA-262, section 15.5.4.6
 function StringConcat() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined", ["String.prototype.concat"]);
+  }
   var len = %_ArgumentsLength();
   var this_as_string = TO_STRING_INLINE(this);
   if (len === 1) {
@@ -102,6 +113,10 @@
 
 // ECMA-262 section 15.5.4.7
 function StringIndexOf(pattern /* position */) {  // length == 1
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.indexOf"]);
+  }
   var subject = TO_STRING_INLINE(this);
   pattern = TO_STRING_INLINE(pattern);
   var index = 0;
@@ -117,6 +132,10 @@
 
 // ECMA-262 section 15.5.4.8
 function StringLastIndexOf(pat /* position */) {  // length == 1
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.lastIndexOf"]);
+  }
   var sub = TO_STRING_INLINE(this);
   var subLength = sub.length;
   var pat = TO_STRING_INLINE(pat);
@@ -146,6 +165,10 @@
 // This function is implementation specific.  For now, we do not
 // do anything locale specific.
 function StringLocaleCompare(other) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.localeCompare"]);
+  }
   if (%_ArgumentsLength() === 0) return 0;
   return %StringLocaleCompare(TO_STRING_INLINE(this), 
                               TO_STRING_INLINE(other));
@@ -154,6 +177,10 @@
 
 // ECMA-262 section 15.5.4.10
 function StringMatch(regexp) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.match"]);
+  }
   var subject = TO_STRING_INLINE(this);
   if (IS_REGEXP(regexp)) {
     if (!regexp.global) return RegExpExecNoTests(regexp, subject, 0);
@@ -187,6 +214,10 @@
 
 // ECMA-262, section 15.5.4.11
 function StringReplace(search, replace) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.replace"]);
+  }
   var subject = TO_STRING_INLINE(this);
 
   // Delegate to one of the regular expression variants if necessary.
@@ -243,7 +274,7 @@
 // the result.
 function ExpandReplacement(string, subject, matchInfo, builder) {
   var length = string.length;
-  var builder_elements = builder.elements; 
+  var builder_elements = builder.elements;
   var next = %StringIndexOf(string, '$', 0);
   if (next < 0) {
     if (length > 0) builder_elements.push(string);
@@ -467,6 +498,10 @@
 
 // ECMA-262 section 15.5.4.12
 function StringSearch(re) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.search"]);
+  }
   var regexp;
   if (IS_STRING(re)) {
     regexp = %_GetFromCache(STRING_TO_REGEXP_CACHE_ID, re);
@@ -485,6 +520,10 @@
 
 // ECMA-262 section 15.5.4.13
 function StringSlice(start, end) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.slice"]);
+  }
   var s = TO_STRING_INLINE(this);
   var s_len = s.length;
   var start_i = TO_INTEGER(start);
@@ -520,6 +559,10 @@
 
 // ECMA-262 section 15.5.4.14
 function StringSplit(separator, limit) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.split"]);
+  }
   var subject = TO_STRING_INLINE(this);
   limit = (IS_UNDEFINED(limit)) ? 0xffffffff : TO_UINT32(limit);
   if (limit === 0) return [];
@@ -613,6 +656,10 @@
 
 // ECMA-262 section 15.5.4.15
 function StringSubstring(start, end) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.subString"]);
+  }
   var s = TO_STRING_INLINE(this);
   var s_len = s.length;
 
@@ -646,6 +693,10 @@
 
 // This is not a part of ECMA-262.
 function StringSubstr(start, n) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.substr"]);
+  }
   var s = TO_STRING_INLINE(this);
   var len;
 
@@ -686,37 +737,65 @@
 
 // ECMA-262, 15.5.4.16
 function StringToLowerCase() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.toLowerCase"]);
+  }
   return %StringToLowerCase(TO_STRING_INLINE(this));
 }
 
 
 // ECMA-262, 15.5.4.17
 function StringToLocaleLowerCase() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.toLocaleLowerCase"]);
+  }
   return %StringToLowerCase(TO_STRING_INLINE(this));
 }
 
 
 // ECMA-262, 15.5.4.18
 function StringToUpperCase() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.toUpperCase"]);
+  }
   return %StringToUpperCase(TO_STRING_INLINE(this));
 }
 
 
 // ECMA-262, 15.5.4.19
 function StringToLocaleUpperCase() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.toLocaleUpperCase"]);
+  }
   return %StringToUpperCase(TO_STRING_INLINE(this));
 }
 
 // ES5, 15.5.4.20
 function StringTrim() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.trim"]);
+  }
   return %StringTrim(TO_STRING_INLINE(this), true, true);
 }
 
 function StringTrimLeft() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.trimLeft"]);
+  }
   return %StringTrim(TO_STRING_INLINE(this), true, false);
 }
 
 function StringTrimRight() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["String.prototype.trimRight"]);
+  }
   return %StringTrim(TO_STRING_INLINE(this), false, true);
 }
 
@@ -830,6 +909,8 @@
   this.special_string = str;
 }
 
+ReplaceResultBuilder.prototype.__proto__ = null;
+
 
 ReplaceResultBuilder.prototype.add = function(str) {
   str = TO_STRING_INLINE(str);
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 0c6a7f7..8c6d84c 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -457,34 +457,6 @@
 }
 
 
-MaybeObject* StubCache::ComputeKeyedLoadSpecialized(JSObject* receiver) {
-  // Using NORMAL as the PropertyType for array element loads is a misuse. The
-  // generated stub always accesses fast elements, not slow-mode fields, but
-  // some property type is required for the stub lookup. Note that overloading
-  // the NORMAL PropertyType is only safe as long as no stubs are generated for
-  // other keyed field loads. This is guaranteed to be the case since all field
-  // keyed loads that are not array elements go through a generic builtin stub.
-  Code::Flags flags =
-      Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, NORMAL);
-  String* name = heap()->KeyedLoadSpecialized_symbol();
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    KeyedLoadStubCompiler compiler;
-    { MaybeObject* maybe_code = compiler.CompileLoadSpecialized(receiver);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), 0));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
-  return code;
-}
-
-
 MaybeObject* StubCache::ComputeStoreField(String* name,
                                           JSObject* receiver,
                                           int field_index,
@@ -513,30 +485,6 @@
 }
 
 
-MaybeObject* StubCache::ComputeKeyedStoreSpecialized(
-    JSObject* receiver,
-    StrictModeFlag strict_mode) {
-  Code::Flags flags =
-      Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, NORMAL, strict_mode);
-  String* name = heap()->KeyedStoreSpecialized_symbol();
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    KeyedStoreStubCompiler compiler(strict_mode);
-    { MaybeObject* maybe_code = compiler.CompileStoreSpecialized(receiver);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    PROFILE(isolate_,
-            CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, Code::cast(code), 0));
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-  }
-  return code;
-}
-
-
 namespace {
 
 ExternalArrayType ElementsKindToExternalArrayType(JSObject::ElementsKind kind) {
@@ -555,6 +503,8 @@
       return kExternalUnsignedIntArray;
     case JSObject::EXTERNAL_FLOAT_ELEMENTS:
       return kExternalFloatArray;
+    case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+      return kExternalDoubleArray;
     case JSObject::EXTERNAL_PIXEL_ELEMENTS:
       return kExternalPixelArray;
     default:
@@ -563,56 +513,6 @@
   }
 }
 
-String* ExternalArrayTypeToStubName(Heap* heap,
-                                    ExternalArrayType array_type,
-                                    bool is_store) {
-  if (is_store) {
-    switch (array_type) {
-      case kExternalByteArray:
-        return heap->KeyedStoreExternalByteArray_symbol();
-      case kExternalUnsignedByteArray:
-        return heap->KeyedStoreExternalUnsignedByteArray_symbol();
-      case kExternalShortArray:
-        return heap->KeyedStoreExternalShortArray_symbol();
-      case kExternalUnsignedShortArray:
-        return heap->KeyedStoreExternalUnsignedShortArray_symbol();
-      case kExternalIntArray:
-        return heap->KeyedStoreExternalIntArray_symbol();
-      case kExternalUnsignedIntArray:
-        return heap->KeyedStoreExternalUnsignedIntArray_symbol();
-      case kExternalFloatArray:
-        return heap->KeyedStoreExternalFloatArray_symbol();
-      case kExternalPixelArray:
-        return heap->KeyedStoreExternalPixelArray_symbol();
-      default:
-        UNREACHABLE();
-        return NULL;
-    }
-  } else {
-    switch (array_type) {
-      case kExternalByteArray:
-        return heap->KeyedLoadExternalByteArray_symbol();
-      case kExternalUnsignedByteArray:
-        return heap->KeyedLoadExternalUnsignedByteArray_symbol();
-      case kExternalShortArray:
-        return heap->KeyedLoadExternalShortArray_symbol();
-      case kExternalUnsignedShortArray:
-        return heap->KeyedLoadExternalUnsignedShortArray_symbol();
-      case kExternalIntArray:
-        return heap->KeyedLoadExternalIntArray_symbol();
-      case kExternalUnsignedIntArray:
-        return heap->KeyedLoadExternalUnsignedIntArray_symbol();
-      case kExternalFloatArray:
-        return heap->KeyedLoadExternalFloatArray_symbol();
-      case kExternalPixelArray:
-        return heap->KeyedLoadExternalPixelArray_symbol();
-      default:
-        UNREACHABLE();
-        return NULL;
-    }
-  }
-}
-
 }  // anonymous namespace
 
 
@@ -622,37 +522,88 @@
     StrictModeFlag strict_mode) {
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(
-          is_store ? Code::KEYED_EXTERNAL_ARRAY_STORE_IC :
-                     Code::KEYED_EXTERNAL_ARRAY_LOAD_IC,
+          is_store ? Code::KEYED_STORE_IC :
+                     Code::KEYED_LOAD_IC,
           NORMAL,
           strict_mode);
   ExternalArrayType array_type =
       ElementsKindToExternalArrayType(receiver->GetElementsKind());
-  String* name = ExternalArrayTypeToStubName(heap(), array_type, is_store);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
-  if (code->IsUndefined()) {
-    ExternalArrayStubCompiler compiler;
-    { MaybeObject* maybe_code =
-          is_store ?
-              compiler.CompileKeyedStoreStub(receiver, array_type, flags) :
-              compiler.CompileKeyedLoadStub(receiver, array_type, flags);
-      if (!maybe_code->ToObject(&code)) return maybe_code;
-    }
-    Code::cast(code)->set_external_array_type(array_type);
-    if (is_store) {
-      PROFILE(isolate_,
-          CodeCreateEvent(Logger::KEYED_EXTERNAL_ARRAY_STORE_IC_TAG,
-                          Code::cast(code), 0));
-    } else {
-      PROFILE(isolate_,
-          CodeCreateEvent(Logger::KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG,
-                          Code::cast(code), 0));
-    }
-    Object* result;
-    { MaybeObject* maybe_result =
-          receiver->UpdateMapCodeCache(name, Code::cast(code));
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
+  String* name = is_store
+      ? isolate()->heap()->KeyedStoreSpecializedMonomorphic_symbol()
+      : isolate()->heap()->KeyedLoadSpecializedMonomorphic_symbol();
+  Object* maybe_code = receiver->map()->FindInCodeCache(name, flags);
+  if (!maybe_code->IsUndefined()) return Code::cast(maybe_code);
+
+  MaybeObject* maybe_new_code = NULL;
+  if (is_store) {
+    ExternalArrayStoreStubCompiler compiler(strict_mode);
+    maybe_new_code = compiler.CompileStore(receiver, array_type);
+  } else {
+    ExternalArrayLoadStubCompiler compiler(strict_mode);
+    maybe_new_code = compiler.CompileLoad(receiver, array_type);
+  }
+  Code* code;
+  if (!maybe_new_code->To(&code)) return maybe_new_code;
+  code->set_external_array_type(array_type);
+  if (is_store) {
+    PROFILE(isolate_,
+            CodeCreateEvent(Logger::KEYED_EXTERNAL_ARRAY_STORE_IC_TAG,
+                            Code::cast(code), 0));
+  } else {
+    PROFILE(isolate_,
+            CodeCreateEvent(Logger::KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG,
+                            Code::cast(code), 0));
+  }
+  ASSERT(code->IsCode());
+  Object* result;
+  { MaybeObject* maybe_result =
+        receiver->UpdateMapCodeCache(name, Code::cast(code));
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  return code;
+}
+
+
+MaybeObject* StubCache::ComputeKeyedLoadOrStoreFastElement(
+    JSObject* receiver,
+    bool is_store,
+    StrictModeFlag strict_mode) {
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(
+          is_store ? Code::KEYED_STORE_IC :
+                     Code::KEYED_LOAD_IC,
+          NORMAL,
+          strict_mode);
+  String* name = is_store
+      ? isolate()->heap()->KeyedStoreSpecializedMonomorphic_symbol()
+      : isolate()->heap()->KeyedLoadSpecializedMonomorphic_symbol();
+  Object* maybe_code = receiver->map()->FindInCodeCache(name, flags);
+  if (!maybe_code->IsUndefined()) return Code::cast(maybe_code);
+
+  MaybeObject* maybe_new_code = NULL;
+  if (is_store) {
+    KeyedStoreStubCompiler compiler(strict_mode);
+    maybe_new_code = compiler.CompileStoreFastElement(receiver->map());
+  } else {
+    KeyedLoadStubCompiler compiler;
+    maybe_new_code = compiler.CompileLoadFastElement(receiver->map());
+  }
+  Code* code;
+  if (!maybe_new_code->To(&code)) return maybe_new_code;
+  if (is_store) {
+    PROFILE(isolate_,
+            CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
+                            Code::cast(code), 0));
+  } else {
+    PROFILE(isolate_,
+            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG,
+                            Code::cast(code), 0));
+  }
+  ASSERT(code->IsCode());
+  Object* result;
+  { MaybeObject* maybe_result =
+        receiver->UpdateMapCodeCache(name, Code::cast(code));
+    if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   return code;
 }
@@ -838,6 +789,7 @@
 MaybeObject* StubCache::ComputeCallField(int argc,
                                          InLoopFlag in_loop,
                                          Code::Kind kind,
+                                         Code::ExtraICState extra_ic_state,
                                          String* name,
                                          Object* object,
                                          JSObject* holder,
@@ -856,14 +808,14 @@
 
   Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
                                                     FIELD,
-                                                    Code::kNoExtraICState,
+                                                    extra_ic_state,
                                                     cache_holder,
                                                     in_loop,
                                                     argc);
   Object* code = map_holder->map()->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
     CallStubCompiler compiler(
-        argc, in_loop, kind, Code::kNoExtraICState, cache_holder);
+        argc, in_loop, kind, extra_ic_state, cache_holder);
     { MaybeObject* maybe_code =
           compiler.CompileCallField(JSObject::cast(object),
                                     holder,
@@ -886,11 +838,13 @@
 }
 
 
-MaybeObject* StubCache::ComputeCallInterceptor(int argc,
-                                               Code::Kind kind,
-                                               String* name,
-                                               Object* object,
-                                               JSObject* holder) {
+MaybeObject* StubCache::ComputeCallInterceptor(
+    int argc,
+    Code::Kind kind,
+    Code::ExtraICState extra_ic_state,
+    String* name,
+    Object* object,
+    JSObject* holder) {
   // Compute the check type and the map.
   InlineCacheHolderFlag cache_holder =
       IC::GetCodeCacheForObject(object, holder);
@@ -905,14 +859,14 @@
 
   Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
                                                     INTERCEPTOR,
-                                                    Code::kNoExtraICState,
+                                                    extra_ic_state,
                                                     cache_holder,
                                                     NOT_IN_LOOP,
                                                     argc);
   Object* code = map_holder->map()->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
     CallStubCompiler compiler(
-        argc, NOT_IN_LOOP, kind, Code::kNoExtraICState, cache_holder);
+        argc, NOT_IN_LOOP, kind, extra_ic_state, cache_holder);
     { MaybeObject* maybe_code =
           compiler.CompileCallInterceptor(JSObject::cast(object), holder, name);
       if (!maybe_code->ToObject(&code)) return maybe_code;
@@ -935,10 +889,12 @@
 MaybeObject* StubCache::ComputeCallNormal(int argc,
                                           InLoopFlag in_loop,
                                           Code::Kind kind,
+                                          Code::ExtraICState extra_ic_state,
                                           String* name,
                                           JSObject* receiver) {
   Object* code;
-  { MaybeObject* maybe_code = ComputeCallNormal(argc, in_loop, kind);
+  { MaybeObject* maybe_code =
+        ComputeCallNormal(argc, in_loop, kind, extra_ic_state);
     if (!maybe_code->ToObject(&code)) return maybe_code;
   }
   return code;
@@ -948,6 +904,7 @@
 MaybeObject* StubCache::ComputeCallGlobal(int argc,
                                           InLoopFlag in_loop,
                                           Code::Kind kind,
+                                          Code::ExtraICState extra_ic_state,
                                           String* name,
                                           JSObject* receiver,
                                           GlobalObject* holder,
@@ -958,7 +915,7 @@
   JSObject* map_holder = IC::GetCodeCacheHolder(receiver, cache_holder);
   Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
                                                     NORMAL,
-                                                    Code::kNoExtraICState,
+                                                    extra_ic_state,
                                                     cache_holder,
                                                     in_loop,
                                                     argc);
@@ -970,7 +927,7 @@
     // caches.
     if (!function->is_compiled()) return Failure::InternalError();
     CallStubCompiler compiler(
-        argc, in_loop, kind, Code::kNoExtraICState, cache_holder);
+        argc, in_loop, kind, extra_ic_state, cache_holder);
     { MaybeObject* maybe_code =
           compiler.CompileCallGlobal(receiver, holder, cell, function, name);
       if (!maybe_code->ToObject(&code)) return maybe_code;
@@ -1040,11 +997,15 @@
 
 Code* StubCache::FindCallInitialize(int argc,
                                     InLoopFlag in_loop,
+                                    RelocInfo::Mode mode,
                                     Code::Kind kind) {
+  Code::ExtraICState extra_state =
+      CallICBase::StringStubState::encode(DEFAULT_STRING_STUB) |
+      CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT);
   Code::Flags flags = Code::ComputeFlags(kind,
                                          in_loop,
                                          UNINITIALIZED,
-                                         Code::kNoExtraICState,
+                                         extra_state,
                                          NORMAL,
                                          argc);
   Object* result = ProbeCache(isolate(), flags)->ToObjectUnchecked();
@@ -1057,11 +1018,15 @@
 
 MaybeObject* StubCache::ComputeCallInitialize(int argc,
                                               InLoopFlag in_loop,
+                                              RelocInfo::Mode mode,
                                               Code::Kind kind) {
+  Code::ExtraICState extra_state =
+      CallICBase::StringStubState::encode(DEFAULT_STRING_STUB) |
+      CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT);
   Code::Flags flags = Code::ComputeFlags(kind,
                                          in_loop,
                                          UNINITIALIZED,
-                                         Code::kNoExtraICState,
+                                         extra_state,
                                          NORMAL,
                                          argc);
   Object* probe;
@@ -1074,17 +1039,20 @@
 }
 
 
-Handle<Code> StubCache::ComputeCallInitialize(int argc, InLoopFlag in_loop) {
+Handle<Code> StubCache::ComputeCallInitialize(int argc,
+                                              InLoopFlag in_loop,
+                                              RelocInfo::Mode mode) {
   if (in_loop == IN_LOOP) {
     // Force the creation of the corresponding stub outside loops,
     // because it may be used when clearing the ICs later - it is
     // possible for a series of IC transitions to lose the in-loop
     // information, and the IC clearing code can't generate a stub
     // that it needs so we need to ensure it is generated already.
-    ComputeCallInitialize(argc, NOT_IN_LOOP);
+    ComputeCallInitialize(argc, NOT_IN_LOOP, mode);
   }
   CALL_HEAP_FUNCTION(isolate_,
-                     ComputeCallInitialize(argc, in_loop, Code::CALL_IC), Code);
+                     ComputeCallInitialize(argc, in_loop, mode, Code::CALL_IC),
+                     Code);
 }
 
 
@@ -1100,17 +1068,23 @@
   }
   CALL_HEAP_FUNCTION(
       isolate_,
-      ComputeCallInitialize(argc, in_loop, Code::KEYED_CALL_IC), Code);
+      ComputeCallInitialize(argc,
+                            in_loop,
+                            RelocInfo::CODE_TARGET,
+                            Code::KEYED_CALL_IC),
+      Code);
 }
 
 
-MaybeObject* StubCache::ComputeCallPreMonomorphic(int argc,
-                                                  InLoopFlag in_loop,
-                                                  Code::Kind kind) {
+MaybeObject* StubCache::ComputeCallPreMonomorphic(
+    int argc,
+    InLoopFlag in_loop,
+    Code::Kind kind,
+    Code::ExtraICState extra_ic_state) {
   Code::Flags flags = Code::ComputeFlags(kind,
                                          in_loop,
                                          PREMONOMORPHIC,
-                                         Code::kNoExtraICState,
+                                         extra_ic_state,
                                          NORMAL,
                                          argc);
   Object* probe;
@@ -1125,11 +1099,12 @@
 
 MaybeObject* StubCache::ComputeCallNormal(int argc,
                                           InLoopFlag in_loop,
-                                          Code::Kind kind) {
+                                          Code::Kind kind,
+                                          Code::ExtraICState extra_ic_state) {
   Code::Flags flags = Code::ComputeFlags(kind,
                                          in_loop,
                                          MONOMORPHIC,
-                                         Code::kNoExtraICState,
+                                         extra_ic_state,
                                          NORMAL,
                                          argc);
   Object* probe;
@@ -1142,13 +1117,15 @@
 }
 
 
-MaybeObject* StubCache::ComputeCallMegamorphic(int argc,
-                                               InLoopFlag in_loop,
-                                               Code::Kind kind) {
+MaybeObject* StubCache::ComputeCallMegamorphic(
+    int argc,
+    InLoopFlag in_loop,
+    Code::Kind kind,
+    Code::ExtraICState extra_ic_state) {
   Code::Flags flags = Code::ComputeFlags(kind,
                                          in_loop,
                                          MEGAMORPHIC,
-                                         Code::kNoExtraICState,
+                                         extra_ic_state,
                                          NORMAL,
                                          argc);
   Object* probe;
@@ -1161,13 +1138,15 @@
 }
 
 
-MaybeObject* StubCache::ComputeCallMiss(int argc, Code::Kind kind) {
+MaybeObject* StubCache::ComputeCallMiss(int argc,
+                                        Code::Kind kind,
+                                        Code::ExtraICState extra_ic_state) {
   // MONOMORPHIC_PROTOTYPE_FAILURE state is used to make sure that miss stubs
   // and monomorphic stubs are not mixed up together in the stub cache.
   Code::Flags flags = Code::ComputeFlags(kind,
                                          NOT_IN_LOOP,
                                          MONOMORPHIC_PROTOTYPE_FAILURE,
-                                         Code::kNoExtraICState,
+                                         extra_ic_state,
                                          NORMAL,
                                          argc,
                                          OWN_MAP);
@@ -1182,7 +1161,11 @@
 
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-MaybeObject* StubCache::ComputeCallDebugBreak(int argc, Code::Kind kind) {
+MaybeObject* StubCache::ComputeCallDebugBreak(
+    int argc,
+    Code::Kind kind) {
+  // Extra IC state is irrelevant for debug break ICs. They jump to
+  // the actual call ic to carry out the work.
   Code::Flags flags = Code::ComputeFlags(kind,
                                          NOT_IN_LOOP,
                                          DEBUG_BREAK,
@@ -1199,8 +1182,11 @@
 }
 
 
-MaybeObject* StubCache::ComputeCallDebugPrepareStepIn(int argc,
-                                                      Code::Kind kind) {
+MaybeObject* StubCache::ComputeCallDebugPrepareStepIn(
+    int argc,
+    Code::Kind kind) {
+  // Extra IC state is irrelevant for debug break ICs. They jump to
+  // the actual call ic to carry out the work.
   Code::Flags flags = Code::ComputeFlags(kind,
                                          NOT_IN_LOOP,
                                          DEBUG_PREPARE_STEP_IN,
@@ -1484,8 +1470,9 @@
   HandleScope scope(isolate());
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
+  Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
   if (kind == Code::CALL_IC) {
-    CallIC::GenerateInitialize(masm(), argc);
+    CallIC::GenerateInitialize(masm(), argc, extra_ic_state);
   } else {
     KeyedCallIC::GenerateInitialize(masm(), argc);
   }
@@ -1511,8 +1498,9 @@
   // The code of the PreMonomorphic stub is the same as the code
   // of the Initialized stub.  They just differ on the code object flags.
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
+  Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
   if (kind == Code::CALL_IC) {
-    CallIC::GenerateInitialize(masm(), argc);
+    CallIC::GenerateInitialize(masm(), argc, extra_ic_state);
   } else {
     KeyedCallIC::GenerateInitialize(masm(), argc);
   }
@@ -1537,6 +1525,9 @@
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
   if (kind == Code::CALL_IC) {
+    // Call normal is always with a explict receiver.
+    ASSERT(!CallIC::Contextual::decode(
+        Code::ExtractExtraICStateFromFlags(flags)));
     CallIC::GenerateNormal(masm(), argc);
   } else {
     KeyedCallIC::GenerateNormal(masm(), argc);
@@ -1560,8 +1551,9 @@
   HandleScope scope(isolate());
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
+  Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
   if (kind == Code::CALL_IC) {
-    CallIC::GenerateMegamorphic(masm(), argc);
+    CallIC::GenerateMegamorphic(masm(), argc, extra_ic_state);
   } else {
     KeyedCallIC::GenerateMegamorphic(masm(), argc);
   }
@@ -1585,8 +1577,9 @@
   HandleScope scope(isolate());
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
+  Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
   if (kind == Code::CALL_IC) {
-    CallIC::GenerateMiss(masm(), argc);
+    CallIC::GenerateMiss(masm(), argc, extra_ic_state);
   } else {
     KeyedCallIC::GenerateMiss(masm(), argc);
   }
@@ -1632,7 +1625,8 @@
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
   if (kind == Code::CALL_IC) {
-    CallIC::GenerateMiss(masm(), argc);
+    // For the debugger extra ic state is irrelevant.
+    CallIC::GenerateMiss(masm(), argc, Code::kNoExtraICState);
   } else {
     KeyedCallIC::GenerateMiss(masm(), argc);
   }
@@ -1711,8 +1705,11 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::GetCode(PropertyType type, String* name) {
-  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, type);
+MaybeObject* KeyedLoadStubCompiler::GetCode(PropertyType type,
+                                            String* name,
+                                            InlineCacheState state) {
+  Code::Flags flags = Code::ComputeFlags(
+      Code::KEYED_LOAD_IC, NOT_IN_LOOP, state, Code::kNoExtraICState, type);
   MaybeObject* result = GetCodeWithFlags(flags, name);
   if (!result->IsFailure()) {
     PROFILE(isolate(),
@@ -1744,9 +1741,11 @@
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::GetCode(PropertyType type, String* name) {
-  Code::Flags flags = Code::ComputeMonomorphicFlags(
-      Code::KEYED_STORE_IC, type, strict_mode_);
+MaybeObject* KeyedStoreStubCompiler::GetCode(PropertyType type,
+                                             String* name,
+                                             InlineCacheState state) {
+  Code::Flags flags = Code::ComputeFlags(
+      Code::KEYED_STORE_IC, NOT_IN_LOOP, state, strict_mode_, type);
   MaybeObject* result = GetCodeWithFlags(flags, name);
   if (!result->IsFailure()) {
     PROFILE(isolate(),
@@ -1924,15 +1923,36 @@
 }
 
 
-MaybeObject* ExternalArrayStubCompiler::GetCode(Code::Flags flags) {
+MaybeObject* ExternalArrayLoadStubCompiler::GetCode() {
   Object* result;
-  { MaybeObject* maybe_result = GetCodeWithFlags(flags, "ExternalArrayStub");
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC,
+                                                    NORMAL,
+                                                    strict_mode_);
+  { MaybeObject* maybe_result = GetCodeWithFlags(flags,
+                                                 "ExternalArrayLoadStub");
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   Code* code = Code::cast(result);
   USE(code);
   PROFILE(isolate(),
-          CodeCreateEvent(Logger::STUB_TAG, code, "ExternalArrayStub"));
+          CodeCreateEvent(Logger::STUB_TAG, code, "ExternalArrayLoadStub"));
+  return result;
+}
+
+
+MaybeObject* ExternalArrayStoreStubCompiler::GetCode() {
+  Object* result;
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC,
+                                                    NORMAL,
+                                                    strict_mode_);
+  { MaybeObject* maybe_result = GetCodeWithFlags(flags,
+                                                 "ExternalArrayStoreStub");
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  Code* code = Code::cast(result);
+  USE(code);
+  PROFILE(isolate(),
+          CodeCreateEvent(Logger::STUB_TAG, code, "ExternalArrayStoreStub"));
   return result;
 }
 
diff --git a/src/stub-cache.h b/src/stub-cache.h
index c5dcf36..a1243c2 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,8 +28,10 @@
 #ifndef V8_STUB_CACHE_H_
 #define V8_STUB_CACHE_H_
 
+#include "allocation.h"
 #include "arguments.h"
 #include "macro-assembler.h"
+#include "objects.h"
 #include "zone-inl.h"
 
 namespace v8 {
@@ -143,9 +145,6 @@
       String* name,
       JSFunction* receiver);
 
-  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadSpecialized(
-      JSObject* receiver);
-
   // ---
 
   MUST_USE_RESULT MaybeObject* ComputeStoreField(
@@ -184,25 +183,26 @@
       Map* transition,
       StrictModeFlag strict_mode);
 
-  MUST_USE_RESULT MaybeObject* ComputeKeyedStoreSpecialized(
-      JSObject* receiver,
-      StrictModeFlag strict_mode);
-
-
   MUST_USE_RESULT MaybeObject* ComputeKeyedLoadOrStoreExternalArray(
       JSObject* receiver,
       bool is_store,
       StrictModeFlag strict_mode);
 
+  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadOrStoreFastElement(
+      JSObject* receiver,
+      bool is_store,
+      StrictModeFlag strict_mode);
   // ---
 
-  MUST_USE_RESULT MaybeObject* ComputeCallField(int argc,
-                                                InLoopFlag in_loop,
-                                                Code::Kind,
-                                                String* name,
-                                                Object* object,
-                                                JSObject* holder,
-                                                int index);
+  MUST_USE_RESULT MaybeObject* ComputeCallField(
+      int argc,
+      InLoopFlag in_loop,
+      Code::Kind,
+      Code::ExtraICState extra_ic_state,
+      String* name,
+      Object* object,
+      JSObject* holder,
+      int index);
 
   MUST_USE_RESULT MaybeObject* ComputeCallConstant(
       int argc,
@@ -214,22 +214,27 @@
       JSObject* holder,
       JSFunction* function);
 
-  MUST_USE_RESULT MaybeObject* ComputeCallNormal(int argc,
-                                                 InLoopFlag in_loop,
-                                                 Code::Kind,
-                                                 String* name,
-                                                 JSObject* receiver);
+  MUST_USE_RESULT MaybeObject* ComputeCallNormal(
+      int argc,
+      InLoopFlag in_loop,
+      Code::Kind,
+      Code::ExtraICState extra_ic_state,
+      String* name,
+      JSObject* receiver);
 
-  MUST_USE_RESULT MaybeObject* ComputeCallInterceptor(int argc,
-                                                      Code::Kind,
-                                                      String* name,
-                                                      Object* object,
-                                                      JSObject* holder);
+  MUST_USE_RESULT MaybeObject* ComputeCallInterceptor(
+      int argc,
+      Code::Kind,
+      Code::ExtraICState extra_ic_state,
+      String* name,
+      Object* object,
+      JSObject* holder);
 
   MUST_USE_RESULT MaybeObject* ComputeCallGlobal(
       int argc,
       InLoopFlag in_loop,
       Code::Kind,
+      Code::ExtraICState extra_ic_state,
       String* name,
       JSObject* receiver,
       GlobalObject* holder,
@@ -240,30 +245,39 @@
 
   MUST_USE_RESULT MaybeObject* ComputeCallInitialize(int argc,
                                                      InLoopFlag in_loop,
+                                                     RelocInfo::Mode mode,
                                                      Code::Kind kind);
 
-  Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
+  Handle<Code> ComputeCallInitialize(int argc,
+                                     InLoopFlag in_loop,
+                                     RelocInfo::Mode mode);
 
   Handle<Code> ComputeKeyedCallInitialize(int argc, InLoopFlag in_loop);
 
   MUST_USE_RESULT MaybeObject* ComputeCallPreMonomorphic(
       int argc,
       InLoopFlag in_loop,
-      Code::Kind kind);
+      Code::Kind kind,
+      Code::ExtraICState extra_ic_state);
 
   MUST_USE_RESULT MaybeObject* ComputeCallNormal(int argc,
                                                  InLoopFlag in_loop,
-                                                 Code::Kind kind);
+                                                 Code::Kind kind,
+                                                 Code::ExtraICState state);
 
   MUST_USE_RESULT MaybeObject* ComputeCallMegamorphic(int argc,
                                                       InLoopFlag in_loop,
-                                                      Code::Kind kind);
+                                                      Code::Kind kind,
+                                                      Code::ExtraICState state);
 
-  MUST_USE_RESULT MaybeObject* ComputeCallMiss(int argc, Code::Kind kind);
+  MUST_USE_RESULT MaybeObject* ComputeCallMiss(int argc,
+                                               Code::Kind kind,
+                                               Code::ExtraICState state);
 
   // Finds the Code object stored in the Heap::non_monomorphic_cache().
   MUST_USE_RESULT Code* FindCallInitialize(int argc,
                                            InLoopFlag in_loop,
+                                           RelocInfo::Mode mode,
                                            Code::Kind kind);
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -336,7 +350,7 @@
   Entry secondary_[kSecondaryTableSize];
 
   // Computes the hashed offsets for primary and secondary caches.
-  RLYSTC int PrimaryOffset(String* name, Code::Flags flags, Map* map) {
+  static int PrimaryOffset(String* name, Code::Flags flags, Map* map) {
     // This works well because the heap object tag size and the hash
     // shift are equal.  Shifting down the length field to get the
     // hash code would effectively throw away two bits of the hash
@@ -359,7 +373,7 @@
     return key & ((kPrimaryTableSize - 1) << kHeapObjectTagSize);
   }
 
-  RLYSTC int SecondaryOffset(String* name, Code::Flags flags, int seed) {
+  static int SecondaryOffset(String* name, Code::Flags flags, int seed) {
     // Use the seed from the primary cache in the secondary cache.
     uint32_t string_low32bits =
         static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
@@ -376,7 +390,7 @@
   // ends in String::kHashShift 0s.  Then we shift it so it is a multiple
   // of sizeof(Entry).  This makes it easier to avoid making mistakes
   // in the hashed offset computations.
-  RLYSTC Entry* entry(Entry* table, int offset) {
+  static Entry* entry(Entry* table, int offset) {
     const int shift_amount = kPointerSizeLog2 + 1 - String::kHashShift;
     return reinterpret_cast<Entry*>(
         reinterpret_cast<Address>(table) + (offset << shift_amount));
@@ -468,7 +482,10 @@
                                  Register scratch,
                                  Label* miss_label);
 
-  static void GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind);
+  static void GenerateLoadMiss(MacroAssembler* masm,
+                               Code::Kind kind);
+
+  static void GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm);
 
   // Generates code that verifies that the property holder has not changed
   // (checking maps of objects in the prototype chain for fast and global
@@ -633,10 +650,21 @@
   MUST_USE_RESULT MaybeObject* CompileLoadStringLength(String* name);
   MUST_USE_RESULT MaybeObject* CompileLoadFunctionPrototype(String* name);
 
-  MUST_USE_RESULT MaybeObject* CompileLoadSpecialized(JSObject* receiver);
+  MUST_USE_RESULT MaybeObject* CompileLoadFastElement(Map* receiver_map);
+
+  MUST_USE_RESULT MaybeObject* CompileLoadMegamorphic(
+      MapList* receiver_maps,
+      CodeList* handler_ics);
+
+  static void GenerateLoadExternalArray(MacroAssembler* masm,
+                                        ExternalArrayType array_type);
+
+  static void GenerateLoadFastElement(MacroAssembler* masm);
 
  private:
-  MaybeObject* GetCode(PropertyType type, String* name);
+  MaybeObject* GetCode(PropertyType type,
+                       String* name,
+                       InlineCacheState state = MONOMORPHIC);
 };
 
 
@@ -677,10 +705,22 @@
                                                  Map* transition,
                                                  String* name);
 
-  MUST_USE_RESULT MaybeObject* CompileStoreSpecialized(JSObject* receiver);
+  MUST_USE_RESULT MaybeObject* CompileStoreFastElement(Map* receiver_map);
+
+  MUST_USE_RESULT MaybeObject* CompileStoreMegamorphic(
+      MapList* receiver_maps,
+      CodeList* handler_ics);
+
+  static void GenerateStoreFastElement(MacroAssembler* masm,
+                                       bool is_js_array);
+
+  static void GenerateStoreExternalArray(MacroAssembler* masm,
+                                         ExternalArrayType array_type);
 
  private:
-  MaybeObject* GetCode(PropertyType type, String* name);
+  MaybeObject* GetCode(PropertyType type,
+                       String* name,
+                       InlineCacheState state = MONOMORPHIC);
 
   StrictModeFlag strict_mode_;
 };
@@ -708,23 +748,30 @@
                    Code::ExtraICState extra_ic_state,
                    InlineCacheHolderFlag cache_holder);
 
-  MUST_USE_RESULT MaybeObject* CompileCallField(JSObject* object,
-                                                JSObject* holder,
-                                                int index,
-                                                String* name);
-  MUST_USE_RESULT MaybeObject* CompileCallConstant(Object* object,
-                                                   JSObject* holder,
-                                                   JSFunction* function,
-                                                   String* name,
-                                                   CheckType check);
-  MUST_USE_RESULT MaybeObject* CompileCallInterceptor(JSObject* object,
-                                                      JSObject* holder,
-                                                      String* name);
-  MUST_USE_RESULT MaybeObject* CompileCallGlobal(JSObject* object,
-                                                 GlobalObject* holder,
-                                                 JSGlobalPropertyCell* cell,
-                                                 JSFunction* function,
-                                                 String* name);
+  MUST_USE_RESULT MaybeObject* CompileCallField(
+      JSObject* object,
+      JSObject* holder,
+      int index,
+      String* name);
+
+  MUST_USE_RESULT MaybeObject* CompileCallConstant(
+      Object* object,
+      JSObject* holder,
+      JSFunction* function,
+      String* name,
+      CheckType check);
+
+  MUST_USE_RESULT MaybeObject* CompileCallInterceptor(
+      JSObject* object,
+      JSObject* holder,
+      String* name);
+
+  MUST_USE_RESULT MaybeObject* CompileCallGlobal(
+      JSObject* object,
+      GlobalObject* holder,
+      JSGlobalPropertyCell* cell,
+      JSFunction* function,
+      String* name);
 
   static bool HasCustomCallGenerator(JSFunction* function);
 
@@ -847,20 +894,36 @@
   CallHandlerInfo* api_call_info_;
 };
 
-class ExternalArrayStubCompiler: public StubCompiler {
+class ExternalArrayLoadStubCompiler: public StubCompiler {
  public:
-  explicit ExternalArrayStubCompiler() {}
+  explicit ExternalArrayLoadStubCompiler(StrictModeFlag strict_mode)
+    : strict_mode_(strict_mode) { }
 
-  MUST_USE_RESULT MaybeObject* CompileKeyedLoadStub(
-      JSObject* receiver, ExternalArrayType array_type, Code::Flags flags);
-
-  MUST_USE_RESULT MaybeObject* CompileKeyedStoreStub(
-      JSObject* receiver, ExternalArrayType array_type, Code::Flags flags);
+  MUST_USE_RESULT MaybeObject* CompileLoad(
+      JSObject* receiver, ExternalArrayType array_type);
 
  private:
-  MaybeObject* GetCode(Code::Flags flags);
+  MaybeObject* GetCode();
+
+  StrictModeFlag strict_mode_;
 };
 
+
+class ExternalArrayStoreStubCompiler: public StubCompiler {
+ public:
+  explicit ExternalArrayStoreStubCompiler(StrictModeFlag strict_mode)
+      : strict_mode_(strict_mode) {}
+
+  MUST_USE_RESULT MaybeObject* CompileStore(
+      JSObject* receiver, ExternalArrayType array_type);
+
+ private:
+  MaybeObject* GetCode();
+
+  StrictModeFlag strict_mode_;
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_STUB_CACHE_H_
diff --git a/src/top.cc b/src/top.cc
deleted file mode 100644
index b9207c8..0000000
--- a/src/top.cc
+++ /dev/null
@@ -1,983 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "api.h"
-#include "bootstrapper.h"
-#include "compiler.h"
-#include "debug.h"
-#include "execution.h"
-#include "messages.h"
-#include "platform.h"
-#include "simulator.h"
-#include "string-stream.h"
-#include "vm-state-inl.h"
-
-
-// TODO(isolates): move to isolate.cc. This stuff is kept here to
-// simplify merging.
-
-namespace v8 {
-namespace internal {
-
-ThreadLocalTop::ThreadLocalTop() {
-  InitializeInternal();
-  // This flag may be set using v8::V8::IgnoreOutOfMemoryException()
-  // before an isolate is initialized. The initialize methods below do
-  // not touch it to preserve its value.
-  ignore_out_of_memory_ = false;
-}
-
-
-void ThreadLocalTop::InitializeInternal() {
-  c_entry_fp_ = 0;
-  handler_ = 0;
-#ifdef USE_SIMULATOR
-  simulator_ = NULL;
-#endif
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  js_entry_sp_ = NULL;
-  external_callback_ = NULL;
-#endif
-#ifdef ENABLE_VMSTATE_TRACKING
-  current_vm_state_ = EXTERNAL;
-#endif
-  try_catch_handler_address_ = NULL;
-  context_ = NULL;
-  thread_id_ = ThreadId::Invalid();
-  external_caught_exception_ = false;
-  failed_access_check_callback_ = NULL;
-  save_context_ = NULL;
-  catcher_ = NULL;
-}
-
-
-void ThreadLocalTop::Initialize() {
-  InitializeInternal();
-#ifdef USE_SIMULATOR
-#ifdef V8_TARGET_ARCH_ARM
-  simulator_ = Simulator::current(Isolate::Current());
-#elif V8_TARGET_ARCH_MIPS
-  simulator_ = Simulator::current(Isolate::Current());
-#endif
-#endif
-  thread_id_ = ThreadId::Current();
-}
-
-
-v8::TryCatch* ThreadLocalTop::TryCatchHandler() {
-  return TRY_CATCH_FROM_ADDRESS(try_catch_handler_address());
-}
-
-
-Address Isolate::get_address_from_id(Isolate::AddressId id) {
-  return isolate_addresses_[id];
-}
-
-
-char* Isolate::Iterate(ObjectVisitor* v, char* thread_storage) {
-  ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(thread_storage);
-  Iterate(v, thread);
-  return thread_storage + sizeof(ThreadLocalTop);
-}
-
-
-void Isolate::IterateThread(ThreadVisitor* v) {
-  v->VisitThread(this, thread_local_top());
-}
-
-
-void Isolate::IterateThread(ThreadVisitor* v, char* t) {
-  ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(t);
-  v->VisitThread(this, thread);
-}
-
-
-void Isolate::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
-  // Visit the roots from the top for a given thread.
-  Object* pending;
-  // The pending exception can sometimes be a failure.  We can't show
-  // that to the GC, which only understands objects.
-  if (thread->pending_exception_->ToObject(&pending)) {
-    v->VisitPointer(&pending);
-    thread->pending_exception_ = pending;  // In case GC updated it.
-  }
-  v->VisitPointer(&(thread->pending_message_obj_));
-  v->VisitPointer(BitCast<Object**>(&(thread->pending_message_script_)));
-  v->VisitPointer(BitCast<Object**>(&(thread->context_)));
-  Object* scheduled;
-  if (thread->scheduled_exception_->ToObject(&scheduled)) {
-    v->VisitPointer(&scheduled);
-    thread->scheduled_exception_ = scheduled;
-  }
-
-  for (v8::TryCatch* block = thread->TryCatchHandler();
-       block != NULL;
-       block = TRY_CATCH_FROM_ADDRESS(block->next_)) {
-    v->VisitPointer(BitCast<Object**>(&(block->exception_)));
-    v->VisitPointer(BitCast<Object**>(&(block->message_)));
-  }
-
-  // Iterate over pointers on native execution stack.
-  for (StackFrameIterator it(this, thread); !it.done(); it.Advance()) {
-    it.frame()->Iterate(v);
-  }
-}
-
-
-void Isolate::Iterate(ObjectVisitor* v) {
-  ThreadLocalTop* current_t = thread_local_top();
-  Iterate(v, current_t);
-}
-
-
-void Isolate::RegisterTryCatchHandler(v8::TryCatch* that) {
-  // The ARM simulator has a separate JS stack.  We therefore register
-  // the C++ try catch handler with the simulator and get back an
-  // address that can be used for comparisons with addresses into the
-  // JS stack.  When running without the simulator, the address
-  // returned will be the address of the C++ try catch handler itself.
-  Address address = reinterpret_cast<Address>(
-      SimulatorStack::RegisterCTryCatch(reinterpret_cast<uintptr_t>(that)));
-  thread_local_top()->set_try_catch_handler_address(address);
-}
-
-
-void Isolate::UnregisterTryCatchHandler(v8::TryCatch* that) {
-  ASSERT(thread_local_top()->TryCatchHandler() == that);
-  thread_local_top()->set_try_catch_handler_address(
-      reinterpret_cast<Address>(that->next_));
-  thread_local_top()->catcher_ = NULL;
-  SimulatorStack::UnregisterCTryCatch();
-}
-
-
-Handle<String> Isolate::StackTraceString() {
-  if (stack_trace_nesting_level_ == 0) {
-    stack_trace_nesting_level_++;
-    HeapStringAllocator allocator;
-    StringStream::ClearMentionedObjectCache();
-    StringStream accumulator(&allocator);
-    incomplete_message_ = &accumulator;
-    PrintStack(&accumulator);
-    Handle<String> stack_trace = accumulator.ToString();
-    incomplete_message_ = NULL;
-    stack_trace_nesting_level_ = 0;
-    return stack_trace;
-  } else if (stack_trace_nesting_level_ == 1) {
-    stack_trace_nesting_level_++;
-    OS::PrintError(
-      "\n\nAttempt to print stack while printing stack (double fault)\n");
-    OS::PrintError(
-      "If you are lucky you may find a partial stack dump on stdout.\n\n");
-    incomplete_message_->OutputToStdOut();
-    return factory()->empty_symbol();
-  } else {
-    OS::Abort();
-    // Unreachable
-    return factory()->empty_symbol();
-  }
-}
-
-
-Handle<JSArray> Isolate::CaptureCurrentStackTrace(
-    int frame_limit, StackTrace::StackTraceOptions options) {
-  // Ensure no negative values.
-  int limit = Max(frame_limit, 0);
-  Handle<JSArray> stack_trace = factory()->NewJSArray(frame_limit);
-
-  Handle<String> column_key = factory()->LookupAsciiSymbol("column");
-  Handle<String> line_key = factory()->LookupAsciiSymbol("lineNumber");
-  Handle<String> script_key = factory()->LookupAsciiSymbol("scriptName");
-  Handle<String> name_or_source_url_key =
-      factory()->LookupAsciiSymbol("nameOrSourceURL");
-  Handle<String> script_name_or_source_url_key =
-      factory()->LookupAsciiSymbol("scriptNameOrSourceURL");
-  Handle<String> function_key = factory()->LookupAsciiSymbol("functionName");
-  Handle<String> eval_key = factory()->LookupAsciiSymbol("isEval");
-  Handle<String> constructor_key =
-      factory()->LookupAsciiSymbol("isConstructor");
-
-  StackTraceFrameIterator it(this);
-  int frames_seen = 0;
-  while (!it.done() && (frames_seen < limit)) {
-    JavaScriptFrame* frame = it.frame();
-    // Set initial size to the maximum inlining level + 1 for the outermost
-    // function.
-    List<FrameSummary> frames(Compiler::kMaxInliningLevels + 1);
-    frame->Summarize(&frames);
-    for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
-      // Create a JSObject to hold the information for the StackFrame.
-      Handle<JSObject> stackFrame = factory()->NewJSObject(object_function());
-
-      Handle<JSFunction> fun = frames[i].function();
-      Handle<Script> script(Script::cast(fun->shared()->script()));
-
-      if (options & StackTrace::kLineNumber) {
-        int script_line_offset = script->line_offset()->value();
-        int position = frames[i].code()->SourcePosition(frames[i].pc());
-        int line_number = GetScriptLineNumber(script, position);
-        // line_number is already shifted by the script_line_offset.
-        int relative_line_number = line_number - script_line_offset;
-        if (options & StackTrace::kColumnOffset && relative_line_number >= 0) {
-          Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
-          int start = (relative_line_number == 0) ? 0 :
-              Smi::cast(line_ends->get(relative_line_number - 1))->value() + 1;
-          int column_offset = position - start;
-          if (relative_line_number == 0) {
-            // For the case where the code is on the same line as the script
-            // tag.
-            column_offset += script->column_offset()->value();
-          }
-          SetLocalPropertyNoThrow(stackFrame, column_key,
-                                  Handle<Smi>(Smi::FromInt(column_offset + 1)));
-        }
-        SetLocalPropertyNoThrow(stackFrame, line_key,
-                                Handle<Smi>(Smi::FromInt(line_number + 1)));
-      }
-
-      if (options & StackTrace::kScriptName) {
-        Handle<Object> script_name(script->name(), this);
-        SetLocalPropertyNoThrow(stackFrame, script_key, script_name);
-      }
-
-      if (options & StackTrace::kScriptNameOrSourceURL) {
-        Handle<Object> script_name(script->name(), this);
-        Handle<JSValue> script_wrapper = GetScriptWrapper(script);
-        Handle<Object> property = GetProperty(script_wrapper,
-                                              name_or_source_url_key);
-        ASSERT(property->IsJSFunction());
-        Handle<JSFunction> method = Handle<JSFunction>::cast(property);
-        bool caught_exception;
-        Handle<Object> result = Execution::TryCall(method, script_wrapper, 0,
-                                                   NULL, &caught_exception);
-        if (caught_exception) {
-          result = factory()->undefined_value();
-        }
-        SetLocalPropertyNoThrow(stackFrame, script_name_or_source_url_key,
-                                result);
-      }
-
-      if (options & StackTrace::kFunctionName) {
-        Handle<Object> fun_name(fun->shared()->name(), this);
-        if (fun_name->ToBoolean()->IsFalse()) {
-          fun_name = Handle<Object>(fun->shared()->inferred_name(), this);
-        }
-        SetLocalPropertyNoThrow(stackFrame, function_key, fun_name);
-      }
-
-      if (options & StackTrace::kIsEval) {
-        int type = Smi::cast(script->compilation_type())->value();
-        Handle<Object> is_eval = (type == Script::COMPILATION_TYPE_EVAL) ?
-            factory()->true_value() : factory()->false_value();
-        SetLocalPropertyNoThrow(stackFrame, eval_key, is_eval);
-      }
-
-      if (options & StackTrace::kIsConstructor) {
-        Handle<Object> is_constructor = (frames[i].is_constructor()) ?
-            factory()->true_value() : factory()->false_value();
-        SetLocalPropertyNoThrow(stackFrame, constructor_key, is_constructor);
-      }
-
-      FixedArray::cast(stack_trace->elements())->set(frames_seen, *stackFrame);
-      frames_seen++;
-    }
-    it.Advance();
-  }
-
-  stack_trace->set_length(Smi::FromInt(frames_seen));
-  return stack_trace;
-}
-
-
-void Isolate::PrintStack() {
-  if (stack_trace_nesting_level_ == 0) {
-    stack_trace_nesting_level_++;
-
-    StringAllocator* allocator;
-    if (preallocated_message_space_ == NULL) {
-      allocator = new HeapStringAllocator();
-    } else {
-      allocator = preallocated_message_space_;
-    }
-
-    StringStream::ClearMentionedObjectCache();
-    StringStream accumulator(allocator);
-    incomplete_message_ = &accumulator;
-    PrintStack(&accumulator);
-    accumulator.OutputToStdOut();
-    InitializeLoggingAndCounters();
-    accumulator.Log();
-    incomplete_message_ = NULL;
-    stack_trace_nesting_level_ = 0;
-    if (preallocated_message_space_ == NULL) {
-      // Remove the HeapStringAllocator created above.
-      delete allocator;
-    }
-  } else if (stack_trace_nesting_level_ == 1) {
-    stack_trace_nesting_level_++;
-    OS::PrintError(
-      "\n\nAttempt to print stack while printing stack (double fault)\n");
-    OS::PrintError(
-      "If you are lucky you may find a partial stack dump on stdout.\n\n");
-    incomplete_message_->OutputToStdOut();
-  }
-}
-
-
-static void PrintFrames(StringStream* accumulator,
-                        StackFrame::PrintMode mode) {
-  StackFrameIterator it;
-  for (int i = 0; !it.done(); it.Advance()) {
-    it.frame()->Print(accumulator, mode, i++);
-  }
-}
-
-
-void Isolate::PrintStack(StringStream* accumulator) {
-  if (!IsInitialized()) {
-    accumulator->Add(
-        "\n==== Stack trace is not available ==========================\n\n");
-    accumulator->Add(
-        "\n==== Isolate for the thread is not initialized =============\n\n");
-    return;
-  }
-  // The MentionedObjectCache is not GC-proof at the moment.
-  AssertNoAllocation nogc;
-  ASSERT(StringStream::IsMentionedObjectCacheClear());
-
-  // Avoid printing anything if there are no frames.
-  if (c_entry_fp(thread_local_top()) == 0) return;
-
-  accumulator->Add(
-      "\n==== Stack trace ============================================\n\n");
-  PrintFrames(accumulator, StackFrame::OVERVIEW);
-
-  accumulator->Add(
-      "\n==== Details ================================================\n\n");
-  PrintFrames(accumulator, StackFrame::DETAILS);
-
-  accumulator->PrintMentionedObjectCache();
-  accumulator->Add("=====================\n\n");
-}
-
-
-void Isolate::SetFailedAccessCheckCallback(
-    v8::FailedAccessCheckCallback callback) {
-  thread_local_top()->failed_access_check_callback_ = callback;
-}
-
-
-void Isolate::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) {
-  if (!thread_local_top()->failed_access_check_callback_) return;
-
-  ASSERT(receiver->IsAccessCheckNeeded());
-  ASSERT(context());
-
-  // Get the data object from access check info.
-  JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
-  if (!constructor->shared()->IsApiFunction()) return;
-  Object* data_obj =
-      constructor->shared()->get_api_func_data()->access_check_info();
-  if (data_obj == heap_.undefined_value()) return;
-
-  HandleScope scope;
-  Handle<JSObject> receiver_handle(receiver);
-  Handle<Object> data(AccessCheckInfo::cast(data_obj)->data());
-  thread_local_top()->failed_access_check_callback_(
-    v8::Utils::ToLocal(receiver_handle),
-    type,
-    v8::Utils::ToLocal(data));
-}
-
-
-enum MayAccessDecision {
-  YES, NO, UNKNOWN
-};
-
-
-static MayAccessDecision MayAccessPreCheck(Isolate* isolate,
-                                           JSObject* receiver,
-                                           v8::AccessType type) {
-  // During bootstrapping, callback functions are not enabled yet.
-  if (isolate->bootstrapper()->IsActive()) return YES;
-
-  if (receiver->IsJSGlobalProxy()) {
-    Object* receiver_context = JSGlobalProxy::cast(receiver)->context();
-    if (!receiver_context->IsContext()) return NO;
-
-    // Get the global context of current top context.
-    // avoid using Isolate::global_context() because it uses Handle.
-    Context* global_context = isolate->context()->global()->global_context();
-    if (receiver_context == global_context) return YES;
-
-    if (Context::cast(receiver_context)->security_token() ==
-        global_context->security_token())
-      return YES;
-  }
-
-  return UNKNOWN;
-}
-
-
-bool Isolate::MayNamedAccess(JSObject* receiver, Object* key,
-                             v8::AccessType type) {
-  ASSERT(receiver->IsAccessCheckNeeded());
-
-  // The callers of this method are not expecting a GC.
-  AssertNoAllocation no_gc;
-
-  // Skip checks for hidden properties access.  Note, we do not
-  // require existence of a context in this case.
-  if (key == heap_.hidden_symbol()) return true;
-
-  // Check for compatibility between the security tokens in the
-  // current lexical context and the accessed object.
-  ASSERT(context());
-
-  MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
-  if (decision != UNKNOWN) return decision == YES;
-
-  // Get named access check callback
-  JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
-  if (!constructor->shared()->IsApiFunction()) return false;
-
-  Object* data_obj =
-     constructor->shared()->get_api_func_data()->access_check_info();
-  if (data_obj == heap_.undefined_value()) return false;
-
-  Object* fun_obj = AccessCheckInfo::cast(data_obj)->named_callback();
-  v8::NamedSecurityCallback callback =
-      v8::ToCData<v8::NamedSecurityCallback>(fun_obj);
-
-  if (!callback) return false;
-
-  HandleScope scope(this);
-  Handle<JSObject> receiver_handle(receiver, this);
-  Handle<Object> key_handle(key, this);
-  Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
-  LOG(this, ApiNamedSecurityCheck(key));
-  bool result = false;
-  {
-    // Leaving JavaScript.
-    VMState state(this, EXTERNAL);
-    result = callback(v8::Utils::ToLocal(receiver_handle),
-                      v8::Utils::ToLocal(key_handle),
-                      type,
-                      v8::Utils::ToLocal(data));
-  }
-  return result;
-}
-
-
-bool Isolate::MayIndexedAccess(JSObject* receiver,
-                               uint32_t index,
-                               v8::AccessType type) {
-  ASSERT(receiver->IsAccessCheckNeeded());
-  // Check for compatibility between the security tokens in the
-  // current lexical context and the accessed object.
-  ASSERT(context());
-
-  MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
-  if (decision != UNKNOWN) return decision == YES;
-
-  // Get indexed access check callback
-  JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
-  if (!constructor->shared()->IsApiFunction()) return false;
-
-  Object* data_obj =
-      constructor->shared()->get_api_func_data()->access_check_info();
-  if (data_obj == heap_.undefined_value()) return false;
-
-  Object* fun_obj = AccessCheckInfo::cast(data_obj)->indexed_callback();
-  v8::IndexedSecurityCallback callback =
-      v8::ToCData<v8::IndexedSecurityCallback>(fun_obj);
-
-  if (!callback) return false;
-
-  HandleScope scope(this);
-  Handle<JSObject> receiver_handle(receiver, this);
-  Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
-  LOG(this, ApiIndexedSecurityCheck(index));
-  bool result = false;
-  {
-    // Leaving JavaScript.
-    VMState state(this, EXTERNAL);
-    result = callback(v8::Utils::ToLocal(receiver_handle),
-                      index,
-                      type,
-                      v8::Utils::ToLocal(data));
-  }
-  return result;
-}
-
-
-const char* const Isolate::kStackOverflowMessage =
-  "Uncaught RangeError: Maximum call stack size exceeded";
-
-
-Failure* Isolate::StackOverflow() {
-  HandleScope scope;
-  Handle<String> key = factory()->stack_overflow_symbol();
-  Handle<JSObject> boilerplate =
-      Handle<JSObject>::cast(GetProperty(js_builtins_object(), key));
-  Handle<Object> exception = Copy(boilerplate);
-  // TODO(1240995): To avoid having to call JavaScript code to compute
-  // the message for stack overflow exceptions which is very likely to
-  // double fault with another stack overflow exception, we use a
-  // precomputed message.
-  DoThrow(*exception, NULL);
-  return Failure::Exception();
-}
-
-
-Failure* Isolate::TerminateExecution() {
-  DoThrow(heap_.termination_exception(), NULL);
-  return Failure::Exception();
-}
-
-
-Failure* Isolate::Throw(Object* exception, MessageLocation* location) {
-  DoThrow(exception, location);
-  return Failure::Exception();
-}
-
-
-Failure* Isolate::ReThrow(MaybeObject* exception, MessageLocation* location) {
-  bool can_be_caught_externally = false;
-  ShouldReportException(&can_be_caught_externally,
-                        is_catchable_by_javascript(exception));
-  thread_local_top()->catcher_ = can_be_caught_externally ?
-      try_catch_handler() : NULL;
-
-  // Set the exception being re-thrown.
-  set_pending_exception(exception);
-  if (exception->IsFailure()) return exception->ToFailureUnchecked();
-  return Failure::Exception();
-}
-
-
-Failure* Isolate::ThrowIllegalOperation() {
-  return Throw(heap_.illegal_access_symbol());
-}
-
-
-void Isolate::ScheduleThrow(Object* exception) {
-  // When scheduling a throw we first throw the exception to get the
-  // error reporting if it is uncaught before rescheduling it.
-  Throw(exception);
-  thread_local_top()->scheduled_exception_ = pending_exception();
-  thread_local_top()->external_caught_exception_ = false;
-  clear_pending_exception();
-}
-
-
-Failure* Isolate::PromoteScheduledException() {
-  MaybeObject* thrown = scheduled_exception();
-  clear_scheduled_exception();
-  // Re-throw the exception to avoid getting repeated error reporting.
-  return ReThrow(thrown);
-}
-
-
-void Isolate::PrintCurrentStackTrace(FILE* out) {
-  StackTraceFrameIterator it(this);
-  while (!it.done()) {
-    HandleScope scope;
-    // Find code position if recorded in relocation info.
-    JavaScriptFrame* frame = it.frame();
-    int pos = frame->LookupCode()->SourcePosition(frame->pc());
-    Handle<Object> pos_obj(Smi::FromInt(pos));
-    // Fetch function and receiver.
-    Handle<JSFunction> fun(JSFunction::cast(frame->function()));
-    Handle<Object> recv(frame->receiver());
-    // Advance to the next JavaScript frame and determine if the
-    // current frame is the top-level frame.
-    it.Advance();
-    Handle<Object> is_top_level = it.done()
-        ? factory()->true_value()
-        : factory()->false_value();
-    // Generate and print stack trace line.
-    Handle<String> line =
-        Execution::GetStackTraceLine(recv, fun, pos_obj, is_top_level);
-    if (line->length() > 0) {
-      line->PrintOn(out);
-      fprintf(out, "\n");
-    }
-  }
-}
-
-
-void Isolate::ComputeLocation(MessageLocation* target) {
-  *target = MessageLocation(Handle<Script>(heap_.empty_script()), -1, -1);
-  StackTraceFrameIterator it(this);
-  if (!it.done()) {
-    JavaScriptFrame* frame = it.frame();
-    JSFunction* fun = JSFunction::cast(frame->function());
-    Object* script = fun->shared()->script();
-    if (script->IsScript() &&
-        !(Script::cast(script)->source()->IsUndefined())) {
-      int pos = frame->LookupCode()->SourcePosition(frame->pc());
-      // Compute the location from the function and the reloc info.
-      Handle<Script> casted_script(Script::cast(script));
-      *target = MessageLocation(casted_script, pos, pos + 1);
-    }
-  }
-}
-
-
-bool Isolate::ShouldReportException(bool* can_be_caught_externally,
-                                    bool catchable_by_javascript) {
-  // Find the top-most try-catch handler.
-  StackHandler* handler =
-      StackHandler::FromAddress(Isolate::handler(thread_local_top()));
-  while (handler != NULL && !handler->is_try_catch()) {
-    handler = handler->next();
-  }
-
-  // Get the address of the external handler so we can compare the address to
-  // determine which one is closer to the top of the stack.
-  Address external_handler_address =
-      thread_local_top()->try_catch_handler_address();
-
-  // The exception has been externally caught if and only if there is
-  // an external handler which is on top of the top-most try-catch
-  // handler.
-  *can_be_caught_externally = external_handler_address != NULL &&
-      (handler == NULL || handler->address() > external_handler_address ||
-       !catchable_by_javascript);
-
-  if (*can_be_caught_externally) {
-    // Only report the exception if the external handler is verbose.
-    return try_catch_handler()->is_verbose_;
-  } else {
-    // Report the exception if it isn't caught by JavaScript code.
-    return handler == NULL;
-  }
-}
-
-
-void Isolate::DoThrow(MaybeObject* exception, MessageLocation* location) {
-  ASSERT(!has_pending_exception());
-
-  HandleScope scope;
-  Object* exception_object = Smi::FromInt(0);
-  bool is_object = exception->ToObject(&exception_object);
-  Handle<Object> exception_handle(exception_object);
-
-  // Determine reporting and whether the exception is caught externally.
-  bool catchable_by_javascript = is_catchable_by_javascript(exception);
-  // Only real objects can be caught by JS.
-  ASSERT(!catchable_by_javascript || is_object);
-  bool can_be_caught_externally = false;
-  bool should_report_exception =
-      ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
-  bool report_exception = catchable_by_javascript && should_report_exception;
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  // Notify debugger of exception.
-  if (catchable_by_javascript) {
-    debugger_->OnException(exception_handle, report_exception);
-  }
-#endif
-
-  // Generate the message.
-  Handle<Object> message_obj;
-  MessageLocation potential_computed_location;
-  bool try_catch_needs_message =
-      can_be_caught_externally &&
-      try_catch_handler()->capture_message_;
-  if (report_exception || try_catch_needs_message) {
-    if (location == NULL) {
-      // If no location was specified we use a computed one instead
-      ComputeLocation(&potential_computed_location);
-      location = &potential_computed_location;
-    }
-    if (!bootstrapper()->IsActive()) {
-      // It's not safe to try to make message objects or collect stack
-      // traces while the bootstrapper is active since the infrastructure
-      // may not have been properly initialized.
-      Handle<String> stack_trace;
-      if (FLAG_trace_exception) stack_trace = StackTraceString();
-      Handle<JSArray> stack_trace_object;
-      if (report_exception && capture_stack_trace_for_uncaught_exceptions_) {
-          stack_trace_object = CaptureCurrentStackTrace(
-              stack_trace_for_uncaught_exceptions_frame_limit_,
-              stack_trace_for_uncaught_exceptions_options_);
-      }
-      ASSERT(is_object);  // Can't use the handle unless there's a real object.
-      message_obj = MessageHandler::MakeMessageObject("uncaught_exception",
-          location, HandleVector<Object>(&exception_handle, 1), stack_trace,
-          stack_trace_object);
-    }
-  }
-
-  // Save the message for reporting if the the exception remains uncaught.
-  thread_local_top()->has_pending_message_ = report_exception;
-  if (!message_obj.is_null()) {
-    thread_local_top()->pending_message_obj_ = *message_obj;
-    if (location != NULL) {
-      thread_local_top()->pending_message_script_ = *location->script();
-      thread_local_top()->pending_message_start_pos_ = location->start_pos();
-      thread_local_top()->pending_message_end_pos_ = location->end_pos();
-    }
-  }
-
-  // Do not forget to clean catcher_ if currently thrown exception cannot
-  // be caught.  If necessary, ReThrow will update the catcher.
-  thread_local_top()->catcher_ = can_be_caught_externally ?
-      try_catch_handler() : NULL;
-
-  // NOTE: Notifying the debugger or generating the message
-  // may have caused new exceptions. For now, we just ignore
-  // that and set the pending exception to the original one.
-  if (is_object) {
-    set_pending_exception(*exception_handle);
-  } else {
-    // Failures are not on the heap so they neither need nor work with handles.
-    ASSERT(exception_handle->IsFailure());
-    set_pending_exception(exception);
-  }
-}
-
-
-bool Isolate::IsExternallyCaught() {
-  ASSERT(has_pending_exception());
-
-  if ((thread_local_top()->catcher_ == NULL) ||
-      (try_catch_handler() != thread_local_top()->catcher_)) {
-    // When throwing the exception, we found no v8::TryCatch
-    // which should care about this exception.
-    return false;
-  }
-
-  if (!is_catchable_by_javascript(pending_exception())) {
-    return true;
-  }
-
-  // Get the address of the external handler so we can compare the address to
-  // determine which one is closer to the top of the stack.
-  Address external_handler_address =
-      thread_local_top()->try_catch_handler_address();
-  ASSERT(external_handler_address != NULL);
-
-  // The exception has been externally caught if and only if there is
-  // an external handler which is on top of the top-most try-finally
-  // handler.
-  // There should be no try-catch blocks as they would prohibit us from
-  // finding external catcher in the first place (see catcher_ check above).
-  //
-  // Note, that finally clause would rethrow an exception unless it's
-  // aborted by jumps in control flow like return, break, etc. and we'll
-  // have another chances to set proper v8::TryCatch.
-  StackHandler* handler =
-      StackHandler::FromAddress(Isolate::handler(thread_local_top()));
-  while (handler != NULL && handler->address() < external_handler_address) {
-    ASSERT(!handler->is_try_catch());
-    if (handler->is_try_finally()) return false;
-
-    handler = handler->next();
-  }
-
-  return true;
-}
-
-
-void Isolate::ReportPendingMessages() {
-  ASSERT(has_pending_exception());
-  PropagatePendingExceptionToExternalTryCatch();
-
-  // If the pending exception is OutOfMemoryException set out_of_memory in
-  // the global context.  Note: We have to mark the global context here
-  // since the GenerateThrowOutOfMemory stub cannot make a RuntimeCall to
-  // set it.
-  HandleScope scope;
-  if (thread_local_top_.pending_exception_ == Failure::OutOfMemoryException()) {
-    context()->mark_out_of_memory();
-  } else if (thread_local_top_.pending_exception_ ==
-             heap()->termination_exception()) {
-    // Do nothing: if needed, the exception has been already propagated to
-    // v8::TryCatch.
-  } else {
-    if (thread_local_top_.has_pending_message_) {
-      thread_local_top_.has_pending_message_ = false;
-      if (!thread_local_top_.pending_message_obj_->IsTheHole()) {
-        HandleScope scope;
-        Handle<Object> message_obj(thread_local_top_.pending_message_obj_);
-        if (thread_local_top_.pending_message_script_ != NULL) {
-          Handle<Script> script(thread_local_top_.pending_message_script_);
-          int start_pos = thread_local_top_.pending_message_start_pos_;
-          int end_pos = thread_local_top_.pending_message_end_pos_;
-          MessageLocation location(script, start_pos, end_pos);
-          MessageHandler::ReportMessage(this, &location, message_obj);
-        } else {
-          MessageHandler::ReportMessage(this, NULL, message_obj);
-        }
-      }
-    }
-  }
-  clear_pending_message();
-}
-
-
-void Isolate::TraceException(bool flag) {
-  FLAG_trace_exception = flag;  // TODO(isolates): This is an unfortunate use.
-}
-
-
-bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
-  ASSERT(has_pending_exception());
-  PropagatePendingExceptionToExternalTryCatch();
-
-  // Allways reschedule out of memory exceptions.
-  if (!is_out_of_memory()) {
-    bool is_termination_exception =
-        pending_exception() == heap_.termination_exception();
-
-    // Do not reschedule the exception if this is the bottom call.
-    bool clear_exception = is_bottom_call;
-
-    if (is_termination_exception) {
-      if (is_bottom_call) {
-        thread_local_top()->external_caught_exception_ = false;
-        clear_pending_exception();
-        return false;
-      }
-    } else if (thread_local_top()->external_caught_exception_) {
-      // If the exception is externally caught, clear it if there are no
-      // JavaScript frames on the way to the C++ frame that has the
-      // external handler.
-      ASSERT(thread_local_top()->try_catch_handler_address() != NULL);
-      Address external_handler_address =
-          thread_local_top()->try_catch_handler_address();
-      JavaScriptFrameIterator it;
-      if (it.done() || (it.frame()->sp() > external_handler_address)) {
-        clear_exception = true;
-      }
-    }
-
-    // Clear the exception if needed.
-    if (clear_exception) {
-      thread_local_top()->external_caught_exception_ = false;
-      clear_pending_exception();
-      return false;
-    }
-  }
-
-  // Reschedule the exception.
-  thread_local_top()->scheduled_exception_ = pending_exception();
-  clear_pending_exception();
-  return true;
-}
-
-
-void Isolate::SetCaptureStackTraceForUncaughtExceptions(
-      bool capture,
-      int frame_limit,
-      StackTrace::StackTraceOptions options) {
-  capture_stack_trace_for_uncaught_exceptions_ = capture;
-  stack_trace_for_uncaught_exceptions_frame_limit_ = frame_limit;
-  stack_trace_for_uncaught_exceptions_options_ = options;
-}
-
-
-bool Isolate::is_out_of_memory() {
-  if (has_pending_exception()) {
-    MaybeObject* e = pending_exception();
-    if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
-      return true;
-    }
-  }
-  if (has_scheduled_exception()) {
-    MaybeObject* e = scheduled_exception();
-    if (e->IsFailure() && Failure::cast(e)->IsOutOfMemoryException()) {
-      return true;
-    }
-  }
-  return false;
-}
-
-
-Handle<Context> Isolate::global_context() {
-  GlobalObject* global = thread_local_top()->context_->global();
-  return Handle<Context>(global->global_context());
-}
-
-
-Handle<Context> Isolate::GetCallingGlobalContext() {
-  JavaScriptFrameIterator it;
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  if (debug_->InDebugger()) {
-    while (!it.done()) {
-      JavaScriptFrame* frame = it.frame();
-      Context* context = Context::cast(frame->context());
-      if (context->global_context() == *debug_->debug_context()) {
-        it.Advance();
-      } else {
-        break;
-      }
-    }
-  }
-#endif  // ENABLE_DEBUGGER_SUPPORT
-  if (it.done()) return Handle<Context>::null();
-  JavaScriptFrame* frame = it.frame();
-  Context* context = Context::cast(frame->context());
-  return Handle<Context>(context->global_context());
-}
-
-
-char* Isolate::ArchiveThread(char* to) {
-  if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) {
-    RuntimeProfiler::IsolateExitedJS(this);
-  }
-  memcpy(to, reinterpret_cast<char*>(thread_local_top()),
-         sizeof(ThreadLocalTop));
-  InitializeThreadLocal();
-  return to + sizeof(ThreadLocalTop);
-}
-
-
-char* Isolate::RestoreThread(char* from) {
-  memcpy(reinterpret_cast<char*>(thread_local_top()), from,
-         sizeof(ThreadLocalTop));
-  // This might be just paranoia, but it seems to be needed in case a
-  // thread_local_top_ is restored on a separate OS thread.
-#ifdef USE_SIMULATOR
-#ifdef V8_TARGET_ARCH_ARM
-  thread_local_top()->simulator_ = Simulator::current(this);
-#elif V8_TARGET_ARCH_MIPS
-  thread_local_top()->simulator_ = Simulator::current(this);
-#endif
-#endif
-  if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) {
-    RuntimeProfiler::IsolateEnteredJS(this);
-  }
-  return from + sizeof(ThreadLocalTop);
-}
-
-} }  // namespace v8::internal
diff --git a/src/type-info.cc b/src/type-info.cc
index 4069c83..5f794bd 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -58,9 +58,6 @@
 }
 
 
-STATIC_ASSERT(DEFAULT_STRING_STUB == Code::kNoExtraICState);
-
-
 TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code,
                                        Handle<Context> global_context) {
   global_context_ = global_context;
@@ -69,8 +66,8 @@
 }
 
 
-Handle<Object> TypeFeedbackOracle::GetInfo(int pos) {
-  int entry = dictionary_->FindEntry(pos);
+Handle<Object> TypeFeedbackOracle::GetInfo(unsigned ast_id) {
+  int entry = dictionary_->FindEntry(ast_id);
   return entry != NumberDictionary::kNotFound
       ? Handle<Object>(dictionary_->ValueAt(entry))
       : Isolate::Current()->factory()->undefined_value();
@@ -78,11 +75,12 @@
 
 
 bool TypeFeedbackOracle::LoadIsMonomorphic(Property* expr) {
-  Handle<Object> map_or_code(GetInfo(expr->position()));
+  Handle<Object> map_or_code(GetInfo(expr->id()));
   if (map_or_code->IsMap()) return true;
   if (map_or_code->IsCode()) {
-    Handle<Code> code(Code::cast(*map_or_code));
-    return code->kind() == Code::KEYED_EXTERNAL_ARRAY_LOAD_IC &&
+    Handle<Code> code = Handle<Code>::cast(map_or_code);
+    return code->is_keyed_load_stub() &&
+        code->ic_state() == MONOMORPHIC &&
         code->FindFirstMap() != NULL;
   }
   return false;
@@ -90,80 +88,83 @@
 
 
 bool TypeFeedbackOracle::StoreIsMonomorphic(Expression* expr) {
-  Handle<Object> map_or_code(GetInfo(expr->position()));
+  Handle<Object> map_or_code(GetInfo(expr->id()));
   if (map_or_code->IsMap()) return true;
   if (map_or_code->IsCode()) {
-    Handle<Code> code(Code::cast(*map_or_code));
-    return code->kind() == Code::KEYED_EXTERNAL_ARRAY_STORE_IC &&
-        code->FindFirstMap() != NULL;
+    Handle<Code> code = Handle<Code>::cast(map_or_code);
+    return code->is_keyed_store_stub() &&
+        code->ic_state() == MONOMORPHIC;
   }
   return false;
 }
 
 
 bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) {
-  Handle<Object> value = GetInfo(expr->position());
+  Handle<Object> value = GetInfo(expr->id());
   return value->IsMap() || value->IsSmi();
 }
 
 
 Handle<Map> TypeFeedbackOracle::LoadMonomorphicReceiverType(Property* expr) {
   ASSERT(LoadIsMonomorphic(expr));
-  Handle<Object> map_or_code(
-      Handle<HeapObject>::cast(GetInfo(expr->position())));
+  Handle<Object> map_or_code(GetInfo(expr->id()));
   if (map_or_code->IsCode()) {
-    Handle<Code> code(Code::cast(*map_or_code));
-    return Handle<Map>(code->FindFirstMap());
+    Handle<Code> code = Handle<Code>::cast(map_or_code);
+    Map* first_map = code->FindFirstMap();
+    ASSERT(first_map != NULL);
+    return Handle<Map>(first_map);
   }
-  return Handle<Map>(Map::cast(*map_or_code));
+  return Handle<Map>::cast(map_or_code);
 }
 
 
 Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(Expression* expr) {
   ASSERT(StoreIsMonomorphic(expr));
-  Handle<HeapObject> map_or_code(
-      Handle<HeapObject>::cast(GetInfo(expr->position())));
+  Handle<Object> map_or_code(GetInfo(expr->id()));
   if (map_or_code->IsCode()) {
-    Handle<Code> code(Code::cast(*map_or_code));
+    Handle<Code> code = Handle<Code>::cast(map_or_code);
     return Handle<Map>(code->FindFirstMap());
   }
-  return Handle<Map>(Map::cast(*map_or_code));
+  return Handle<Map>::cast(map_or_code);
 }
 
 
 ZoneMapList* TypeFeedbackOracle::LoadReceiverTypes(Property* expr,
                                                    Handle<String> name) {
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
-  return CollectReceiverTypes(expr->position(), name, flags);
+  return CollectReceiverTypes(expr->id(), name, flags);
 }
 
 
 ZoneMapList* TypeFeedbackOracle::StoreReceiverTypes(Assignment* expr,
                                                     Handle<String> name) {
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, NORMAL);
-  return CollectReceiverTypes(expr->position(), name, flags);
+  return CollectReceiverTypes(expr->id(), name, flags);
 }
 
 
 ZoneMapList* TypeFeedbackOracle::CallReceiverTypes(Call* expr,
-                                                   Handle<String> name) {
+                                                   Handle<String> name,
+                                                   CallKind call_kind) {
   int arity = expr->arguments()->length();
-  // Note: these flags won't let us get maps from stubs with
-  // non-default extra ic state in the megamorphic case. In the more
-  // important monomorphic case the map is obtained directly, so it's
-  // not a problem until we decide to emit more polymorphic code.
+
+  // Note: Currently we do not take string extra ic data into account
+  // here.
+  Code::ExtraICState extra_ic_state =
+      CallIC::Contextual::encode(call_kind == CALL_AS_FUNCTION);
+
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC,
                                                     NORMAL,
-                                                    Code::kNoExtraICState,
+                                                    extra_ic_state,
                                                     OWN_MAP,
                                                     NOT_IN_LOOP,
                                                     arity);
-  return CollectReceiverTypes(expr->position(), name, flags);
+  return CollectReceiverTypes(expr->id(), name, flags);
 }
 
 
 CheckType TypeFeedbackOracle::GetCallCheckType(Call* expr) {
-  Handle<Object> value = GetInfo(expr->position());
+  Handle<Object> value = GetInfo(expr->id());
   if (!value->IsSmi()) return RECEIVER_MAP_CHECK;
   CheckType check = static_cast<CheckType>(Smi::cast(*value)->value());
   ASSERT(check != RECEIVER_MAP_CHECK);
@@ -172,14 +173,14 @@
 
 ExternalArrayType TypeFeedbackOracle::GetKeyedLoadExternalArrayType(
     Property* expr) {
-  Handle<Object> stub = GetInfo(expr->position());
+  Handle<Object> stub = GetInfo(expr->id());
   ASSERT(stub->IsCode());
   return Code::cast(*stub)->external_array_type();
 }
 
 ExternalArrayType TypeFeedbackOracle::GetKeyedStoreExternalArrayType(
     Expression* expr) {
-  Handle<Object> stub = GetInfo(expr->position());
+  Handle<Object> stub = GetInfo(expr->id());
   ASSERT(stub->IsCode());
   return Code::cast(*stub)->external_array_type();
 }
@@ -207,13 +208,13 @@
 
 
 bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) {
-  return *GetInfo(expr->position()) ==
+  return *GetInfo(expr->id()) ==
       Isolate::Current()->builtins()->builtin(id);
 }
 
 
 TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr) {
-  Handle<Object> object = GetInfo(expr->position());
+  Handle<Object> object = GetInfo(expr->id());
   TypeInfo unknown = TypeInfo::Unknown();
   if (!object->IsCode()) return unknown;
   Handle<Code> code = Handle<Code>::cast(object);
@@ -229,6 +230,9 @@
       return TypeInfo::Smi();
     case CompareIC::HEAP_NUMBERS:
       return TypeInfo::Number();
+    case CompareIC::SYMBOLS:
+    case CompareIC::STRINGS:
+      return TypeInfo::String();
     case CompareIC::OBJECTS:
       // TODO(kasperl): We really need a type for JS objects here.
       return TypeInfo::NonPrimitive();
@@ -239,44 +243,75 @@
 }
 
 
-TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr) {
-  Handle<Object> object = GetInfo(expr->position());
+bool TypeFeedbackOracle::IsSymbolCompare(CompareOperation* expr) {
+  Handle<Object> object = GetInfo(expr->id());
+  if (!object->IsCode()) return false;
+  Handle<Code> code = Handle<Code>::cast(object);
+  if (!code->is_compare_ic_stub()) return false;
+  CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
+  return state == CompareIC::SYMBOLS;
+}
+
+
+TypeInfo TypeFeedbackOracle::UnaryType(UnaryOperation* expr) {
+  Handle<Object> object = GetInfo(expr->id());
   TypeInfo unknown = TypeInfo::Unknown();
   if (!object->IsCode()) return unknown;
   Handle<Code> code = Handle<Code>::cast(object);
-  if (code->is_type_recording_binary_op_stub()) {
-    TRBinaryOpIC::TypeInfo type = static_cast<TRBinaryOpIC::TypeInfo>(
-        code->type_recording_binary_op_type());
-    TRBinaryOpIC::TypeInfo result_type = static_cast<TRBinaryOpIC::TypeInfo>(
-        code->type_recording_binary_op_result_type());
+  ASSERT(code->is_unary_op_stub());
+  UnaryOpIC::TypeInfo type = static_cast<UnaryOpIC::TypeInfo>(
+      code->unary_op_type());
+  switch (type) {
+    case UnaryOpIC::SMI:
+      return TypeInfo::Smi();
+    case UnaryOpIC::HEAP_NUMBER:
+      return TypeInfo::Double();
+    default:
+      return unknown;
+  }
+}
+
+
+TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr) {
+  Handle<Object> object = GetInfo(expr->id());
+  TypeInfo unknown = TypeInfo::Unknown();
+  if (!object->IsCode()) return unknown;
+  Handle<Code> code = Handle<Code>::cast(object);
+  if (code->is_binary_op_stub()) {
+    BinaryOpIC::TypeInfo type = static_cast<BinaryOpIC::TypeInfo>(
+        code->binary_op_type());
+    BinaryOpIC::TypeInfo result_type = static_cast<BinaryOpIC::TypeInfo>(
+        code->binary_op_result_type());
 
     switch (type) {
-      case TRBinaryOpIC::UNINITIALIZED:
+      case BinaryOpIC::UNINITIALIZED:
         // Uninitialized means never executed.
         // TODO(fschneider): Introduce a separate value for never-executed ICs
         return unknown;
-      case TRBinaryOpIC::SMI:
+      case BinaryOpIC::SMI:
         switch (result_type) {
-          case TRBinaryOpIC::UNINITIALIZED:
-          case TRBinaryOpIC::SMI:
+          case BinaryOpIC::UNINITIALIZED:
+          case BinaryOpIC::SMI:
             return TypeInfo::Smi();
-          case TRBinaryOpIC::INT32:
+          case BinaryOpIC::INT32:
             return TypeInfo::Integer32();
-          case TRBinaryOpIC::HEAP_NUMBER:
+          case BinaryOpIC::HEAP_NUMBER:
             return TypeInfo::Double();
           default:
             return unknown;
         }
-      case TRBinaryOpIC::INT32:
+      case BinaryOpIC::INT32:
         if (expr->op() == Token::DIV ||
-            result_type == TRBinaryOpIC::HEAP_NUMBER) {
+            result_type == BinaryOpIC::HEAP_NUMBER) {
           return TypeInfo::Double();
         }
         return TypeInfo::Integer32();
-      case TRBinaryOpIC::HEAP_NUMBER:
+      case BinaryOpIC::HEAP_NUMBER:
         return TypeInfo::Double();
-      case TRBinaryOpIC::STRING:
-      case TRBinaryOpIC::GENERIC:
+      case BinaryOpIC::BOTH_STRING:
+        return TypeInfo::String();
+      case BinaryOpIC::STRING:
+      case BinaryOpIC::GENERIC:
         return unknown;
      default:
         return unknown;
@@ -287,7 +322,7 @@
 
 
 TypeInfo TypeFeedbackOracle::SwitchType(CaseClause* clause) {
-  Handle<Object> object = GetInfo(clause->position());
+  Handle<Object> object = GetInfo(clause->CompareId());
   TypeInfo unknown = TypeInfo::Unknown();
   if (!object->IsCode()) return unknown;
   Handle<Code> code = Handle<Code>::cast(object);
@@ -313,11 +348,40 @@
 }
 
 
-ZoneMapList* TypeFeedbackOracle::CollectReceiverTypes(int position,
+TypeInfo TypeFeedbackOracle::IncrementType(CountOperation* expr) {
+  Handle<Object> object = GetInfo(expr->CountId());
+  TypeInfo unknown = TypeInfo::Unknown();
+  if (!object->IsCode()) return unknown;
+  Handle<Code> code = Handle<Code>::cast(object);
+  if (!code->is_binary_op_stub()) return unknown;
+
+  BinaryOpIC::TypeInfo type = static_cast<BinaryOpIC::TypeInfo>(
+      code->binary_op_type());
+  switch (type) {
+    case BinaryOpIC::UNINITIALIZED:
+    case BinaryOpIC::SMI:
+      return TypeInfo::Smi();
+    case BinaryOpIC::INT32:
+      return TypeInfo::Integer32();
+    case BinaryOpIC::HEAP_NUMBER:
+      return TypeInfo::Double();
+    case BinaryOpIC::BOTH_STRING:
+    case BinaryOpIC::STRING:
+    case BinaryOpIC::GENERIC:
+      return unknown;
+    default:
+      return unknown;
+  }
+  UNREACHABLE();
+  return unknown;
+}
+
+
+ZoneMapList* TypeFeedbackOracle::CollectReceiverTypes(unsigned ast_id,
                                                       Handle<String> name,
                                                       Code::Flags flags) {
   Isolate* isolate = Isolate::Current();
-  Handle<Object> object = GetInfo(position);
+  Handle<Object> object = GetInfo(ast_id);
   if (object->IsUndefined() || object->IsSmi()) return NULL;
 
   if (*object == isolate->builtins()->builtin(Builtins::kStoreIC_GlobalProxy)) {
@@ -340,8 +404,9 @@
 }
 
 
-void TypeFeedbackOracle::SetInfo(int position, Object* target) {
-  MaybeObject* maybe_result = dictionary_->AtNumberPut(position, target);
+void TypeFeedbackOracle::SetInfo(unsigned ast_id, Object* target) {
+  ASSERT(dictionary_->FindEntry(ast_id) == NumberDictionary::kNotFound);
+  MaybeObject* maybe_result = dictionary_->AtNumberPut(ast_id, target);
   USE(maybe_result);
 #ifdef DEBUG
   Object* result;
@@ -358,53 +423,48 @@
 
   const int kInitialCapacity = 16;
   List<int> code_positions(kInitialCapacity);
-  List<int> source_positions(kInitialCapacity);
-  CollectPositions(*code, &code_positions, &source_positions);
+  List<unsigned> ast_ids(kInitialCapacity);
+  CollectIds(*code, &code_positions, &ast_ids);
 
   ASSERT(dictionary_.is_null());  // Only initialize once.
   dictionary_ = isolate->factory()->NewNumberDictionary(
       code_positions.length());
 
-  int length = code_positions.length();
-  ASSERT(source_positions.length() == length);
+  const int length = code_positions.length();
+  ASSERT(ast_ids.length() == length);
   for (int i = 0; i < length; i++) {
     AssertNoAllocation no_allocation;
     RelocInfo info(code->instruction_start() + code_positions[i],
                    RelocInfo::CODE_TARGET, 0);
     Code* target = Code::GetCodeFromTargetAddress(info.target_address());
-    int position = source_positions[i];
+    unsigned id = ast_ids[i];
     InlineCacheState state = target->ic_state();
     Code::Kind kind = target->kind();
 
-    if (kind == Code::TYPE_RECORDING_BINARY_OP_IC ||
+    if (kind == Code::BINARY_OP_IC ||
+        kind == Code::UNARY_OP_IC ||
         kind == Code::COMPARE_IC) {
-      // TODO(kasperl): Avoid having multiple ICs with the same
-      // position by making sure that we have position information
-      // recorded for all binary ICs.
-      int entry = dictionary_->FindEntry(position);
-      if (entry == NumberDictionary::kNotFound) {
-        SetInfo(position, target);
-      }
+      SetInfo(id, target);
     } else if (state == MONOMORPHIC) {
-      if (kind == Code::KEYED_EXTERNAL_ARRAY_LOAD_IC ||
-          kind == Code::KEYED_EXTERNAL_ARRAY_STORE_IC) {
-        SetInfo(position, target);
-      } else if (target->kind() != Code::CALL_IC ||
-          target->check_type() == RECEIVER_MAP_CHECK) {
+      if (kind == Code::KEYED_LOAD_IC ||
+          kind == Code::KEYED_STORE_IC) {
+        SetInfo(id, target);
+      } else if (kind != Code::CALL_IC ||
+                 target->check_type() == RECEIVER_MAP_CHECK) {
         Map* map = target->FindFirstMap();
         if (map == NULL) {
-          SetInfo(position, target);
+          SetInfo(id, target);
         } else {
-          SetInfo(position, map);
+          SetInfo(id, map);
         }
       } else {
         ASSERT(target->kind() == Code::CALL_IC);
         CheckType check = target->check_type();
         ASSERT(check != RECEIVER_MAP_CHECK);
-        SetInfo(position, Smi::FromInt(check));
+        SetInfo(id,  Smi::FromInt(check));
       }
     } else if (state == MEGAMORPHIC) {
-      SetInfo(position, target);
+      SetInfo(id, target);
     }
   }
   // Allocate handle in the parent scope.
@@ -412,41 +472,34 @@
 }
 
 
-void TypeFeedbackOracle::CollectPositions(Code* code,
-                                          List<int>* code_positions,
-                                          List<int>* source_positions) {
+void TypeFeedbackOracle::CollectIds(Code* code,
+                                    List<int>* code_positions,
+                                    List<unsigned>* ast_ids) {
   AssertNoAllocation no_allocation;
-  int position = 0;
-  // Because the ICs we use for global variables access in the full
-  // code generator do not have any meaningful positions, we avoid
-  // collecting those by filtering out contextual code targets.
-  int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
-      RelocInfo::kPositionMask;
+  int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID);
   for (RelocIterator it(code, mask); !it.done(); it.next()) {
     RelocInfo* info = it.rinfo();
-    RelocInfo::Mode mode = info->rmode();
-    if (RelocInfo::IsCodeTarget(mode)) {
-      Code* target = Code::GetCodeFromTargetAddress(info->target_address());
-      if (target->is_inline_cache_stub()) {
-        InlineCacheState state = target->ic_state();
-        Code::Kind kind = target->kind();
-        if (kind == Code::TYPE_RECORDING_BINARY_OP_IC) {
-          if (target->type_recording_binary_op_type() ==
-              TRBinaryOpIC::GENERIC) {
-            continue;
-          }
-        } else if (kind == Code::COMPARE_IC) {
-          if (target->compare_state() == CompareIC::GENERIC) continue;
-        } else {
-          if (state != MONOMORPHIC && state != MEGAMORPHIC) continue;
+    ASSERT(RelocInfo::IsCodeTarget(info->rmode()));
+    Code* target = Code::GetCodeFromTargetAddress(info->target_address());
+    if (target->is_inline_cache_stub()) {
+      InlineCacheState state = target->ic_state();
+      Code::Kind kind = target->kind();
+      if (kind == Code::BINARY_OP_IC) {
+        if (target->binary_op_type() ==
+            BinaryOpIC::GENERIC) {
+          continue;
         }
-        code_positions->Add(
-            static_cast<int>(info->pc() - code->instruction_start()));
-        source_positions->Add(position);
+      } else if (kind == Code::COMPARE_IC) {
+        if (target->compare_state() == CompareIC::GENERIC) continue;
+      } else {
+        if (state != MONOMORPHIC && state != MEGAMORPHIC) continue;
       }
-    } else {
-      ASSERT(RelocInfo::IsPosition(mode));
-      position = static_cast<int>(info->data());
+      code_positions->Add(
+          static_cast<int>(info->pc() - code->instruction_start()));
+      ASSERT(ast_ids->length() == 0 ||
+             (*ast_ids)[ast_ids->length()-1] !=
+             static_cast<unsigned>(info->data()));
+      ast_ids->Add(static_cast<unsigned>(info->data()));
     }
   }
 }
diff --git a/src/type-info.h b/src/type-info.h
index f6e6729..828e3c7 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -28,6 +28,7 @@
 #ifndef V8_TYPE_INFO_H_
 #define V8_TYPE_INFO_H_
 
+#include "allocation.h"
 #include "globals.h"
 #include "zone.h"
 #include "zone-inl.h"
@@ -36,18 +37,18 @@
 namespace internal {
 
 //         Unknown
-//           |   |
-//           |   \--------------|
-//      Primitive             Non-primitive
-//           |   \--------|     |
-//         Number      String   |
-//         /    |         |     |
-//    Double  Integer32   |    /
-//        |      |       /    /
-//        |     Smi     /    /
-//        |      |     /    /
-//        |      |    /    /
-//        Uninitialized.--/
+//           |   \____________
+//           |                |
+//      Primitive       Non-primitive
+//           |   \_______     |
+//           |           |    |
+//        Number       String |
+//         /   \         |    |
+//    Double  Integer32  |   /
+//        |      |      /   /
+//        |     Smi    /   /
+//        |      |    / __/
+//        Uninitialized.
 
 class TypeInfo {
  public:
@@ -71,32 +72,6 @@
   // We haven't started collecting info yet.
   static TypeInfo Uninitialized() { return TypeInfo(kUninitialized); }
 
-  // Return compact representation.  Very sensitive to enum values below!
-  // Compacting drops information about primitive types and strings types.
-  // We use the compact representation when we only care about number types.
-  int ThreeBitRepresentation() {
-    ASSERT(type_ != kUninitialized);
-    int answer = type_ & 0xf;
-    answer = answer > 6 ? answer - 2 : answer;
-    ASSERT(answer >= 0);
-    ASSERT(answer <= 7);
-    return answer;
-  }
-
-  // Decode compact representation.  Very sensitive to enum values below!
-  static TypeInfo ExpandedRepresentation(int three_bit_representation) {
-    Type t = static_cast<Type>(three_bit_representation > 4 ?
-                               three_bit_representation + 2 :
-                               three_bit_representation);
-    t = (t == kUnknown) ? t : static_cast<Type>(t | kPrimitive);
-    ASSERT(t == kUnknown ||
-           t == kNumber ||
-           t == kInteger32 ||
-           t == kSmi ||
-           t == kDouble);
-    return TypeInfo(t);
-  }
-
   int ToInt() {
     return type_;
   }
@@ -227,9 +202,11 @@
 
 // Forward declarations.
 class Assignment;
+class UnaryOperation;
 class BinaryOperation;
 class Call;
 class CompareOperation;
+class CountOperation;
 class CompilationInfo;
 class Property;
 class CaseClause;
@@ -247,7 +224,9 @@
 
   ZoneMapList* LoadReceiverTypes(Property* expr, Handle<String> name);
   ZoneMapList* StoreReceiverTypes(Assignment* expr, Handle<String> name);
-  ZoneMapList* CallReceiverTypes(Call* expr, Handle<String> name);
+  ZoneMapList* CallReceiverTypes(Call* expr,
+                                 Handle<String> name,
+                                 CallKind call_kind);
 
   ExternalArrayType GetKeyedLoadExternalArrayType(Property* expr);
   ExternalArrayType GetKeyedStoreExternalArrayType(Expression* expr);
@@ -258,26 +237,29 @@
   bool LoadIsBuiltin(Property* expr, Builtins::Name id);
 
   // Get type information for arithmetic operations and compares.
+  TypeInfo UnaryType(UnaryOperation* expr);
   TypeInfo BinaryType(BinaryOperation* expr);
   TypeInfo CompareType(CompareOperation* expr);
+  bool IsSymbolCompare(CompareOperation* expr);
   TypeInfo SwitchType(CaseClause* clause);
+  TypeInfo IncrementType(CountOperation* expr);
 
  private:
-  ZoneMapList* CollectReceiverTypes(int position,
+  ZoneMapList* CollectReceiverTypes(unsigned ast_id,
                                     Handle<String> name,
                                     Code::Flags flags);
 
-  void SetInfo(int position, Object* target);
+  void SetInfo(unsigned ast_id, Object* target);
 
   void PopulateMap(Handle<Code> code);
 
-  void CollectPositions(Code* code,
-                        List<int>* code_positions,
-                        List<int>* source_positions);
+  void CollectIds(Code* code,
+                  List<int>* code_positions,
+                  List<unsigned>* ast_ids);
 
   // Returns an element from the backing store. Returns undefined if
   // there is no information.
-  Handle<Object> GetInfo(int pos);
+  Handle<Object> GetInfo(unsigned ast_id);
 
   Handle<Context> global_context_;
   Handle<NumberDictionary> dictionary_;
diff --git a/src/unbound-queue.h b/src/unbound-queue.h
index 443d5ce..59a426b 100644
--- a/src/unbound-queue.h
+++ b/src/unbound-queue.h
@@ -28,6 +28,8 @@
 #ifndef V8_UNBOUND_QUEUE_
 #define V8_UNBOUND_QUEUE_
 
+#include "allocation.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/uri.js b/src/uri.js
index e94b3fe..72ca6f1 100644
--- a/src/uri.js
+++ b/src/uri.js
@@ -166,7 +166,10 @@
 // ECMA-262, section 15.1.3
 function Encode(uri, unescape) {
   var uriLength = uri.length;
-  var result = new $Array(uriLength);
+  // We are going to pass result to %StringFromCharCodeArray
+  // which does not expect any getters/setters installed
+  // on the incoming array.
+  var result = new InternalArray(uriLength);
   var index = 0;
   for (var k = 0; k < uriLength; k++) {
     var cc1 = uri.charCodeAt(k);
@@ -192,7 +195,10 @@
 // ECMA-262, section 15.1.3
 function Decode(uri, reserved) {
   var uriLength = uri.length;
-  var result = new $Array(uriLength);
+  // We are going to pass result to %StringFromCharCodeArray
+  // which does not expect any getters/setters installed
+  // on the incoming array.
+  var result = new InternalArray(uriLength);
   var index = 0;
   for (var k = 0; k < uriLength; k++) {
     var ch = uri.charAt(k);
diff --git a/src/frame-element.cc b/src/utils-inl.h
similarity index 79%
copy from src/frame-element.cc
copy to src/utils-inl.h
index f629900..76a3c10 100644
--- a/src/frame-element.cc
+++ b/src/utils-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,24 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "v8.h"
+#ifndef V8_UTILS_INL_H_
+#define V8_UTILS_INL_H_
 
-#include "frame-element.h"
-#include "zone-inl.h"
+#include "list-inl.h"
 
 namespace v8 {
 namespace internal {
 
+template<typename T, int growth_factor, int max_growth>
+void Collector<T, growth_factor, max_growth>::Reset() {
+  for (int i = chunks_.length() - 1; i >= 0; i--) {
+    chunks_.at(i).Dispose();
+  }
+  chunks_.Rewind(0);
+  index_ = 0;
+  size_ = 0;
+}
 
 } }  // namespace v8::internal
+
+#endif  // V8_UTILS_INL_H_
diff --git a/src/utils.h b/src/utils.h
index b89f284..da7a1d9 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -222,6 +222,11 @@
     return static_cast<uint32_t>(value) << shift;
   }
 
+  // Returns a uint32_t with the bit field value updated.
+  static uint32_t update(uint32_t previous, T value) {
+    return (previous & ~mask()) | encode(value);
+  }
+
   // Extracts the bit field from the value.
   static T decode(uint32_t value) {
     return static_cast<T>((value & mask()) >> shift);
@@ -251,6 +256,12 @@
 }
 
 
+static inline uint32_t ComputePointerHash(void* ptr) {
+  return ComputeIntegerHash(
+      static_cast<uint32_t>(reinterpret_cast<intptr_t>(ptr)));
+}
+
+
 // ----------------------------------------------------------------------------
 // Miscellaneous
 
@@ -576,14 +587,7 @@
   }
 
   // Resets the collector to be empty.
-  virtual void Reset() {
-    for (int i = chunks_.length() - 1; i >= 0; i--) {
-      chunks_.at(i).Dispose();
-    }
-    chunks_.Rewind(0);
-    index_ = 0;
-    size_ = 0;
-  }
+  virtual void Reset();
 
   // Total number of elements added to collector so far.
   inline int size() { return size_; }
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 5e765b2..e3b16e9 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -134,6 +134,7 @@
   SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol)            \
   SC(keyed_load_generic_lookup_cache, V8.KeyedLoadGenericLookupCache) \
   SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow)                \
+  SC(keyed_load_polymorphic_stubs, V8.KeyedLoadPolymorphicStubs)      \
   SC(keyed_load_external_array_slow, V8.KeyedLoadExternalArraySlow)   \
   /* How is the generic keyed-call stub used? */                      \
   SC(keyed_call_generic_smi_fast, V8.KeyedCallGenericSmiFast)         \
@@ -179,6 +180,8 @@
   SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss)                \
   SC(named_store_global_inline, V8.NamedStoreGlobalInline)            \
   SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss)   \
+  SC(keyed_store_polymorphic_stubs, V8.KeyedStorePolymorphicStubs)    \
+  SC(keyed_store_external_array_slow, V8.KeyedStoreExternalArraySlow) \
   SC(store_normal_miss, V8.StoreNormalMiss)                           \
   SC(store_normal_hit, V8.StoreNormalHit)                             \
   SC(cow_arrays_created_stub, V8.COWArraysCreatedStub)                \
diff --git a/src/v8.h b/src/v8.h
index 776fa9c..9d98521 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -66,6 +66,7 @@
 #include "log-inl.h"
 #include "cpu-profiler-inl.h"
 #include "handles-inl.h"
+#include "isolate-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/v8dll-main.cc b/src/v8dll-main.cc
index 3d4b3a3..49d8689 100644
--- a/src/v8dll-main.cc
+++ b/src/v8dll-main.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,14 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include <windows.h>
-
+// The GYP based build ends up defining USING_V8_SHARED when compiling this
+// file.
+#undef USING_V8_SHARED
 #include "../include/v8.h"
 
+#ifdef WIN32
+#include <windows.h>  // NOLINT
+
 extern "C" {
 BOOL WINAPI DllMain(HANDLE hinstDLL,
                     DWORD dwReason,
@@ -37,3 +41,4 @@
   return TRUE;
 }
 }
+#endif
diff --git a/src/v8globals.h b/src/v8globals.h
index 2a01dfd..a23ca19 100644
--- a/src/v8globals.h
+++ b/src/v8globals.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -154,7 +154,7 @@
 class MaybeObject;
 class OldSpace;
 class Property;
-class Proxy;
+class Foreign;
 class RegExpNode;
 struct RegExpCompileData;
 class RegExpTree;
@@ -185,6 +185,8 @@
 
 typedef bool (*WeakSlotCallback)(Object** pointer);
 
+typedef bool (*WeakSlotCallbackWithHeap)(Heap* heap, Object** pointer);
+
 // -----------------------------------------------------------------------------
 // Miscellaneous
 
@@ -218,7 +220,12 @@
 
 enum Executability { NOT_EXECUTABLE, EXECUTABLE };
 
-enum VisitMode { VISIT_ALL, VISIT_ALL_IN_SCAVENGE, VISIT_ONLY_STRONG };
+enum VisitMode {
+  VISIT_ALL,
+  VISIT_ALL_IN_SCAVENGE,
+  VISIT_ALL_IN_SWEEP_NEWSPACE,
+  VISIT_ONLY_STRONG
+};
 
 // Flag indicating whether code is built into the VM (one of the natives files).
 enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE };
@@ -303,7 +310,9 @@
 
 enum CallFunctionFlags {
   NO_CALL_FUNCTION_FLAGS = 0,
-  RECEIVER_MIGHT_BE_VALUE = 1 << 0  // Receiver might not be a JSObject.
+  // Receiver might implicitly be the global objects. If it is, the
+  // hole is passed to the call function stub.
+  RECEIVER_MIGHT_BE_IMPLICIT = 1 << 0
 };
 
 
@@ -322,11 +331,12 @@
   FIELD                     = 1,  // only in fast mode
   CONSTANT_FUNCTION         = 2,  // only in fast mode
   CALLBACKS                 = 3,
-  INTERCEPTOR               = 4,  // only in lookup results, not in descriptors.
-  MAP_TRANSITION            = 5,  // only in fast mode
-  EXTERNAL_ARRAY_TRANSITION = 6,
-  CONSTANT_TRANSITION       = 7,  // only in fast mode
-  NULL_DESCRIPTOR           = 8,  // only in fast mode
+  HANDLER                   = 4,  // only in lookup results, not in descriptors
+  INTERCEPTOR               = 5,  // only in lookup results, not in descriptors
+  MAP_TRANSITION            = 6,  // only in fast mode
+  EXTERNAL_ARRAY_TRANSITION = 7,
+  CONSTANT_TRANSITION       = 8,  // only in fast mode
+  NULL_DESCRIPTOR           = 9,  // only in fast mode
   // All properties before MAP_TRANSITION are real.
   FIRST_PHANTOM_PROPERTY_TYPE = MAP_TRANSITION,
   // There are no IC stubs for NULL_DESCRIPTORS. Therefore,
@@ -481,6 +491,22 @@
   kInvalidStrictFlag
 };
 
+
+// Used to specify if a macro instruction must perform a smi check on tagged
+// values.
+enum SmiCheckType {
+  DONT_DO_SMI_CHECK = 0,
+  DO_SMI_CHECK
+};
+
+
+// Used to specify whether a receiver is implicitly or explicitly
+// provided to a call.
+enum CallKind {
+  CALL_AS_METHOD = 0,
+  CALL_AS_FUNCTION
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_V8GLOBALS_H_
diff --git a/src/v8natives.js b/src/v8natives.js
index 429cea5..700fe58 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -56,6 +56,7 @@
     %FunctionSetName(f, key);
     %FunctionRemovePrototype(f);
     %SetProperty(object, key, f, attributes);
+    %SetES5Flag(f);
   }
   %ToFastProperties(object);
 }
@@ -196,12 +197,20 @@
 
 // ECMA-262 - 15.2.4.2
 function ObjectToString() {
+  if (IS_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    return '[object Undefined]';
+  }
+  if (IS_NULL(this)) return  '[object Null]';
   return "[object " + %_ClassOf(ToObject(this)) + "]";
 }
 
 
 // ECMA-262 - 15.2.4.3
 function ObjectToLocaleString() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Object.prototype.toLocaleString"]);
+  }
   return this.toString();
 }
 
@@ -214,12 +223,16 @@
 
 // ECMA-262 - 15.2.4.5
 function ObjectHasOwnProperty(V) {
-  return %HasLocalProperty(ToObject(this), ToString(V));
+  return %HasLocalProperty(TO_OBJECT_INLINE(this), TO_STRING_INLINE(V));
 }
 
 
 // ECMA-262 - 15.2.4.6
 function ObjectIsPrototypeOf(V) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Object.prototype.isPrototypeOf"]);
+  }
   if (!IS_SPEC_OBJECT(V)) return false;
   return %IsInPrototypeChain(this, V);
 }
@@ -313,18 +326,18 @@
 // ES5 8.10.4
 function FromPropertyDescriptor(desc) {
   if (IS_UNDEFINED(desc)) return desc;
-  var obj = new $Object();
+
   if (IsDataDescriptor(desc)) {
-    obj.value = desc.getValue();
-    obj.writable = desc.isWritable();
+    return { value: desc.getValue(),
+             writable: desc.isWritable(),
+             enumerable: desc.isEnumerable(),
+             configurable: desc.isConfigurable() };
   }
-  if (IsAccessorDescriptor(desc)) {
-    obj.get = desc.getGet();
-    obj.set = desc.getSet();
-  }
-  obj.enumerable = desc.isEnumerable();
-  obj.configurable = desc.isConfigurable();
-  return obj;
+  // Must be an AccessorDescriptor then. We never return a generic descriptor.
+  return { get: desc.getGet(),
+           set: desc.getSet(),
+           enumerable: desc.isEnumerable(),
+           configurable: desc.isConfigurable() };
 }
 
 // ES5 8.10.5.
@@ -391,6 +404,7 @@
 }
 
 PropertyDescriptor.prototype.__proto__ = null;
+
 PropertyDescriptor.prototype.toString = function() {
   return "[object PropertyDescriptor]";
 };
@@ -1050,6 +1064,10 @@
 
 // ECMA-262 section 15.7.4.3
 function NumberToLocaleString() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Number.prototype.toLocaleString"]);
+  }
   return this.toString();
 }
 
@@ -1070,6 +1088,10 @@
   if (f < 0 || f > 20) {
     throw new $RangeError("toFixed() digits argument must be between 0 and 20");
   }
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Number.prototype.toFixed"]);
+  }
   var x = ToNumber(this);
   return %NumberToFixed(x, f);
 }
@@ -1084,6 +1106,10 @@
       throw new $RangeError("toExponential() argument must be between 0 and 20");
     }
   }
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Number.prototype.toExponential"]);
+  }
   var x = ToNumber(this);
   return %NumberToExponential(x, f);
 }
@@ -1091,6 +1117,10 @@
 
 // ECMA-262 section 15.7.4.7
 function NumberToPrecision(precision) {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Number.prototype.toPrecision"]);
+  }
   if (IS_UNDEFINED(precision)) return ToString(%_ValueOf(this));
   var p = TO_INTEGER(precision);
   if (p < 1 || p > 21) {
diff --git a/src/v8threads.cc b/src/v8threads.cc
index 4b033fc..26169b5 100644
--- a/src/v8threads.cc
+++ b/src/v8threads.cc
@@ -43,90 +43,94 @@
 
 
 // Constructor for the Locker object.  Once the Locker is constructed the
-// current thread will be guaranteed to have the big V8 lock.
-Locker::Locker() : has_lock_(false), top_level_(true) {
-  // TODO(isolates): When Locker has Isolate parameter and it is provided, grab
-  // that one instead of using the current one.
-  // We pull default isolate for Locker constructor w/o p[arameter.
-  // A thread should not enter an isolate before acquiring a lock,
-  // in cases which mandate using Lockers.
-  // So getting a lock is the first thing threads do in a scenario where
-  // multple threads share an isolate. Hence, we need to access
-  // 'locking isolate' before we can actually enter into default isolate.
-  internal::Isolate* isolate = internal::Isolate::GetDefaultIsolateForLocking();
-  ASSERT(isolate != NULL);
-
+// current thread will be guaranteed to have the lock for a given isolate.
+Locker::Locker(v8::Isolate* isolate)
+  : has_lock_(false),
+    top_level_(false),
+    isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
+  if (isolate_ == NULL) {
+    isolate_ = i::Isolate::GetDefaultIsolateForLocking();
+  }
   // Record that the Locker has been used at least once.
   active_ = true;
   // Get the big lock if necessary.
-  if (!isolate->thread_manager()->IsLockedByCurrentThread()) {
-    isolate->thread_manager()->Lock();
+  if (!isolate_->thread_manager()->IsLockedByCurrentThread()) {
+    isolate_->thread_manager()->Lock();
     has_lock_ = true;
 
-    if (isolate->IsDefaultIsolate()) {
-      // This only enters if not yet entered.
-      internal::Isolate::EnterDefaultIsolate();
-    }
-
-    ASSERT(internal::Thread::HasThreadLocal(
-        internal::Isolate::thread_id_key()));
-
     // Make sure that V8 is initialized.  Archiving of threads interferes
     // with deserialization by adding additional root pointers, so we must
     // initialize here, before anyone can call ~Locker() or Unlocker().
-    if (!isolate->IsInitialized()) {
+    if (!isolate_->IsInitialized()) {
+      isolate_->Enter();
       V8::Initialize();
+      isolate_->Exit();
     }
+
     // This may be a locker within an unlocker in which case we have to
     // get the saved state for this thread and restore it.
-    if (isolate->thread_manager()->RestoreThread()) {
+    if (isolate_->thread_manager()->RestoreThread()) {
       top_level_ = false;
     } else {
-      internal::ExecutionAccess access(isolate);
-      isolate->stack_guard()->ClearThread(access);
-      isolate->stack_guard()->InitThread(access);
+      internal::ExecutionAccess access(isolate_);
+      isolate_->stack_guard()->ClearThread(access);
+      isolate_->stack_guard()->InitThread(access);
+    }
+    if (isolate_->IsDefaultIsolate()) {
+      // This only enters if not yet entered.
+      internal::Isolate::EnterDefaultIsolate();
     }
   }
-  ASSERT(isolate->thread_manager()->IsLockedByCurrentThread());
+  ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
 }
 
 
-bool Locker::IsLocked() {
-  return internal::Isolate::Current()->thread_manager()->
-      IsLockedByCurrentThread();
+bool Locker::IsLocked(v8::Isolate* isolate) {
+  i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  if (internal_isolate == NULL) {
+    internal_isolate = i::Isolate::GetDefaultIsolateForLocking();
+  }
+  return internal_isolate->thread_manager()->IsLockedByCurrentThread();
 }
 
 
 Locker::~Locker() {
-  // TODO(isolate): this should use a field storing the isolate it
-  // locked instead.
-  internal::Isolate* isolate = internal::Isolate::Current();
-  ASSERT(isolate->thread_manager()->IsLockedByCurrentThread());
+  ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
   if (has_lock_) {
-    if (top_level_) {
-      isolate->thread_manager()->FreeThreadResources();
-    } else {
-      isolate->thread_manager()->ArchiveThread();
+    if (isolate_->IsDefaultIsolate()) {
+      isolate_->Exit();
     }
-    isolate->thread_manager()->Unlock();
+    if (top_level_) {
+      isolate_->thread_manager()->FreeThreadResources();
+    } else {
+      isolate_->thread_manager()->ArchiveThread();
+    }
+    isolate_->thread_manager()->Unlock();
   }
 }
 
 
-Unlocker::Unlocker() {
-  internal::Isolate* isolate = internal::Isolate::Current();
-  ASSERT(isolate->thread_manager()->IsLockedByCurrentThread());
-  isolate->thread_manager()->ArchiveThread();
-  isolate->thread_manager()->Unlock();
+Unlocker::Unlocker(v8::Isolate* isolate)
+  : isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
+  if (isolate_ == NULL) {
+    isolate_ = i::Isolate::GetDefaultIsolateForLocking();
+  }
+  ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
+  if (isolate_->IsDefaultIsolate()) {
+    isolate_->Exit();
+  }
+  isolate_->thread_manager()->ArchiveThread();
+  isolate_->thread_manager()->Unlock();
 }
 
 
 Unlocker::~Unlocker() {
-  // TODO(isolates): check it's the isolate we unlocked.
-  internal::Isolate* isolate = internal::Isolate::Current();
-  ASSERT(!isolate->thread_manager()->IsLockedByCurrentThread());
-  isolate->thread_manager()->Lock();
-  isolate->thread_manager()->RestoreThread();
+  ASSERT(!isolate_->thread_manager()->IsLockedByCurrentThread());
+  isolate_->thread_manager()->Lock();
+  isolate_->thread_manager()->RestoreThread();
+  if (isolate_->IsDefaultIsolate()) {
+    isolate_->Enter();
+  }
 }
 
 
@@ -144,17 +148,20 @@
 
 
 bool ThreadManager::RestoreThread() {
+  ASSERT(IsLockedByCurrentThread());
   // First check whether the current thread has been 'lazily archived', ie
   // not archived at all.  If that is the case we put the state storage we
   // had prepared back in the free list, since we didn't need it after all.
   if (lazily_archived_thread_.Equals(ThreadId::Current())) {
     lazily_archived_thread_ = ThreadId::Invalid();
-    ASSERT(Isolate::CurrentPerIsolateThreadData()->thread_state() ==
-           lazily_archived_thread_state_);
+    Isolate::PerIsolateThreadData* per_thread =
+        isolate_->FindPerThreadDataForThisThread();
+    ASSERT(per_thread != NULL);
+    ASSERT(per_thread->thread_state() == lazily_archived_thread_state_);
     lazily_archived_thread_state_->set_id(ThreadId::Invalid());
     lazily_archived_thread_state_->LinkInto(ThreadState::FREE_LIST);
     lazily_archived_thread_state_ = NULL;
-    Isolate::CurrentPerIsolateThreadData()->set_thread_state(NULL);
+    per_thread->set_thread_state(NULL);
     return true;
   }
 
@@ -168,7 +175,7 @@
     EagerlyArchiveThread();
   }
   Isolate::PerIsolateThreadData* per_thread =
-      Isolate::CurrentPerIsolateThreadData();
+      isolate_->FindPerThreadDataForThisThread();
   if (per_thread == NULL || per_thread->thread_state() == NULL) {
     // This is a new thread.
     isolate_->stack_guard()->InitThread(access);
@@ -178,7 +185,7 @@
   char* from = state->data();
   from = isolate_->handle_scope_implementer()->RestoreThread(from);
   from = isolate_->RestoreThread(from);
-  from = Relocatable::RestoreState(from);
+  from = Relocatable::RestoreState(isolate_, from);
 #ifdef ENABLE_DEBUGGER_SUPPORT
   from = isolate_->debug()->RestoreDebug(from);
 #endif
@@ -300,9 +307,12 @@
 void ThreadManager::ArchiveThread() {
   ASSERT(lazily_archived_thread_.Equals(ThreadId::Invalid()));
   ASSERT(!IsArchived());
+  ASSERT(IsLockedByCurrentThread());
   ThreadState* state = GetFreeThreadState();
   state->Unlink();
-  Isolate::CurrentPerIsolateThreadData()->set_thread_state(state);
+  Isolate::PerIsolateThreadData* per_thread =
+      isolate_->FindOrAllocatePerThreadDataForThisThread();
+  per_thread->set_thread_state(state);
   lazily_archived_thread_ = ThreadId::Current();
   lazily_archived_thread_state_ = state;
   ASSERT(state->id().Equals(ThreadId::Invalid()));
@@ -312,6 +322,7 @@
 
 
 void ThreadManager::EagerlyArchiveThread() {
+  ASSERT(IsLockedByCurrentThread());
   ThreadState* state = lazily_archived_thread_state_;
   state->LinkInto(ThreadState::IN_USE_LIST);
   char* to = state->data();
@@ -319,7 +330,7 @@
   // in ThreadManager::Iterate(ObjectVisitor*).
   to = isolate_->handle_scope_implementer()->ArchiveThread(to);
   to = isolate_->ArchiveThread(to);
-  to = Relocatable::ArchiveState(to);
+  to = Relocatable::ArchiveState(isolate_, to);
 #ifdef ENABLE_DEBUGGER_SUPPORT
   to = isolate_->debug()->ArchiveDebug(to);
 #endif
@@ -344,11 +355,11 @@
 
 
 bool ThreadManager::IsArchived() {
-  Isolate::PerIsolateThreadData* data = Isolate::CurrentPerIsolateThreadData();
+  Isolate::PerIsolateThreadData* data =
+      isolate_->FindPerThreadDataForThisThread();
   return data != NULL && data->thread_state() != NULL;
 }
 
-
 void ThreadManager::Iterate(ObjectVisitor* v) {
   // Expecting no threads during serialization/deserialization
   for (ThreadState* state = FirstThreadStateInUse();
diff --git a/src/version.cc b/src/version.cc
index 47e7fe2..34549fa 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -33,9 +33,9 @@
 // NOTE these macros are used by the SCons build script so their names
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
-#define MINOR_VERSION     2
+#define MINOR_VERSION     3
 #define BUILD_NUMBER      10
-#define PATCH_LEVEL       40
+#define PATCH_LEVEL       39
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
 #define IS_CANDIDATE_VERSION 0
diff --git a/src/vm-state.h b/src/vm-state.h
index 11fc6d6..2062340 100644
--- a/src/vm-state.h
+++ b/src/vm-state.h
@@ -28,6 +28,7 @@
 #ifndef V8_VM_STATE_H_
 #define V8_VM_STATE_H_
 
+#include "allocation.h"
 #include "isolate.h"
 
 namespace v8 {
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 9541a58..8db54f0 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -61,9 +61,15 @@
 }
 
 
-void Assembler::emit_code_target(Handle<Code> target, RelocInfo::Mode rmode) {
+void Assembler::emit_code_target(Handle<Code> target,
+                                 RelocInfo::Mode rmode,
+                                 unsigned ast_id) {
   ASSERT(RelocInfo::IsCodeTarget(rmode));
-  RecordRelocInfo(rmode);
+  if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
+    RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, ast_id);
+  } else {
+    RecordRelocInfo(rmode);
+  }
   int current = code_targets_.length();
   if (current > 0 && code_targets_.last().is_identical_to(target)) {
     // Optimization if we keep jumping to the same code target.
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 6e4f005..745fdae 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -458,6 +458,20 @@
     int last_imm32 = pos - (current + sizeof(int32_t));
     long_at_put(current, last_imm32);
   }
+  while (L->is_near_linked()) {
+    int fixup_pos = L->near_link_pos();
+    int offset_to_next =
+        static_cast<int>(*reinterpret_cast<int8_t*>(addr_at(fixup_pos)));
+    ASSERT(offset_to_next <= 0);
+    int disp = pos - (fixup_pos + sizeof(int8_t));
+    ASSERT(is_int8(disp));
+    set_byte_at(fixup_pos, disp);
+    if (offset_to_next < 0) {
+      L->link_to(fixup_pos + offset_to_next, Label::kNear);
+    } else {
+      L->UnuseNear();
+    }
+  }
   L->bind_to(pos);
 }
 
@@ -467,19 +481,6 @@
 }
 
 
-void Assembler::bind(NearLabel* L) {
-  ASSERT(!L->is_bound());
-  while (L->unresolved_branches_ > 0) {
-    int branch_pos = L->unresolved_positions_[L->unresolved_branches_ - 1];
-    int disp = pc_offset() - branch_pos;
-    ASSERT(is_int8(disp));
-    set_byte_at(branch_pos - sizeof(int8_t), disp);
-    L->unresolved_branches_--;
-  }
-  L->bind_to(pc_offset());
-}
-
-
 void Assembler::GrowBuffer() {
   ASSERT(buffer_overflow());
   if (!own_buffer_) FATAL("external code buffer is too small");
@@ -869,12 +870,14 @@
 }
 
 
-void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
+void Assembler::call(Handle<Code> target,
+                     RelocInfo::Mode rmode,
+                     unsigned ast_id) {
   positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   // 1110 1000 #32-bit disp.
   emit(0xE8);
-  emit_code_target(target, rmode);
+  emit_code_target(target, rmode, ast_id);
 }
 
 
@@ -1212,7 +1215,7 @@
 }
 
 
-void Assembler::j(Condition cc, Label* L) {
+void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
   if (cc == always) {
     jmp(L);
     return;
@@ -1236,6 +1239,17 @@
       emit(0x80 | cc);
       emitl(offs - long_size);
     }
+  } else if (distance == Label::kNear) {
+    // 0111 tttn #8-bit disp
+    emit(0x70 | cc);
+    byte disp = 0x00;
+    if (L->is_near_linked()) {
+      int offset = L->near_link_pos() - pc_offset();
+      ASSERT(is_int8(offset));
+      disp = static_cast<byte>(offset & 0xFF);
+    }
+    L->link_to(pc_offset(), Label::kNear);
+    emit(disp);
   } else if (L->is_linked()) {
     // 0000 1111 1000 tttn #32-bit disp.
     emit(0x0F);
@@ -1265,27 +1279,7 @@
 }
 
 
-void Assembler::j(Condition cc, NearLabel* L, Hint hint) {
-  EnsureSpace ensure_space(this);
-  ASSERT(0 <= cc && cc < 16);
-  if (FLAG_emit_branch_hints && hint != no_hint) emit(hint);
-  if (L->is_bound()) {
-    const int short_size = 2;
-    int offs = L->pos() - pc_offset();
-    ASSERT(offs <= 0);
-    ASSERT(is_int8(offs - short_size));
-    // 0111 tttn #8-bit disp
-    emit(0x70 | cc);
-    emit((offs - short_size) & 0xFF);
-  } else {
-    emit(0x70 | cc);
-    emit(0x00);      // The displacement will be resolved later.
-    L->link_to(pc_offset());
-  }
-}
-
-
-void Assembler::jmp(Label* L) {
+void Assembler::jmp(Label* L, Label::Distance distance) {
   EnsureSpace ensure_space(this);
   const int short_size = sizeof(int8_t);
   const int long_size = sizeof(int32_t);
@@ -1301,7 +1295,17 @@
       emit(0xE9);
       emitl(offs - long_size);
     }
-  } else  if (L->is_linked()) {
+  } else if (distance == Label::kNear) {
+    emit(0xEB);
+    byte disp = 0x00;
+    if (L->is_near_linked()) {
+      int offset = L->near_link_pos() - pc_offset();
+      ASSERT(is_int8(offset));
+      disp = static_cast<byte>(offset & 0xFF);
+    }
+    L->link_to(pc_offset(), Label::kNear);
+    emit(disp);
+  } else if (L->is_linked()) {
     // 1110 1001 #32-bit disp.
     emit(0xE9);
     emitl(L->pos());
@@ -1325,24 +1329,6 @@
 }
 
 
-void Assembler::jmp(NearLabel* L) {
-  EnsureSpace ensure_space(this);
-  if (L->is_bound()) {
-    const int short_size = 2;
-    int offs = L->pos() - pc_offset();
-    ASSERT(offs <= 0);
-    ASSERT(is_int8(offs - short_size));
-    // 1110 1011 #8-bit disp.
-    emit(0xEB);
-    emit((offs - short_size) & 0xFF);
-  } else {
-    emit(0xEB);
-    emit(0x00);      // The displacement will be resolved later.
-    L->link_to(pc_offset());
-  }
-}
-
-
 void Assembler::jmp(Register target) {
   EnsureSpace ensure_space(this);
   // Opcode FF/4 r64.
@@ -2540,6 +2526,24 @@
 }
 
 
+void Assembler::movq(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  if (dst.low_bits() == 4) {
+    // Avoid unnecessary SIB byte.
+    emit(0xf3);
+    emit_optional_rex_32(dst, src);
+    emit(0x0F);
+    emit(0x7e);
+    emit_sse_operand(dst, src);
+  } else {
+    emit(0x66);
+    emit_optional_rex_32(src, dst);
+    emit(0x0F);
+    emit(0xD6);
+    emit_sse_operand(src, dst);
+  }
+}
+
 void Assembler::movdqa(const Operand& dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
   emit(0x66);
@@ -2603,6 +2607,42 @@
 }
 
 
+void Assembler::movaps(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  if (src.low_bits() == 4) {
+    // Try to avoid an unnecessary SIB byte.
+    emit_optional_rex_32(src, dst);
+    emit(0x0F);
+    emit(0x29);
+    emit_sse_operand(src, dst);
+  } else {
+    emit_optional_rex_32(dst, src);
+    emit(0x0F);
+    emit(0x28);
+    emit_sse_operand(dst, src);
+  }
+}
+
+
+void Assembler::movapd(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  if (src.low_bits() == 4) {
+    // Try to avoid an unnecessary SIB byte.
+    emit(0x66);
+    emit_optional_rex_32(src, dst);
+    emit(0x0F);
+    emit(0x29);
+    emit_sse_operand(src, dst);
+  } else {
+    emit(0x66);
+    emit_optional_rex_32(dst, src);
+    emit(0x0F);
+    emit(0x28);
+    emit_sse_operand(dst, src);
+  }
+}
+
+
 void Assembler::movss(XMMRegister dst, const Operand& src) {
   EnsureSpace ensure_space(this);
   emit(0xF3);  // single
@@ -2833,6 +2873,15 @@
 }
 
 
+void Assembler::xorps(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x57);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
   emit(0xF2);
@@ -2863,6 +2912,21 @@
 }
 
 
+void Assembler::roundsd(XMMRegister dst, XMMRegister src,
+                        Assembler::RoundingMode mode) {
+  ASSERT(CpuFeatures::IsEnabled(SSE4_1));
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0f);
+  emit(0x3a);
+  emit(0x0b);
+  emit_sse_operand(dst, src);
+  // Mask precision exeption.
+  emit(static_cast<byte>(mode) | 0x8);
+}
+
+
 void Assembler::movmskpd(Register dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
   emit(0x66);
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 9453277..7769b03 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -327,22 +327,6 @@
 }
 
 
-enum Hint {
-  no_hint = 0,
-  not_taken = 0x2e,
-  taken = 0x3e
-};
-
-// The result of negating a hint is as if the corresponding condition
-// were negated by NegateCondition.  That is, no_hint is mapped to
-// itself and not_taken and taken are mapped to each other.
-inline Hint NegateHint(Hint hint) {
-  return (hint == no_hint)
-      ? no_hint
-      : ((hint == not_taken) ? taken : not_taken);
-}
-
-
 // -----------------------------------------------------------------------------
 // Machine instruction Immediates
 
@@ -1178,12 +1162,13 @@
   // but it may be bound only once.
 
   void bind(Label* L);  // binds an unbound label L to the current code position
-  void bind(NearLabel* L);
 
   // Calls
   // Call near relative 32-bit displacement, relative to next instruction.
   void call(Label* L);
-  void call(Handle<Code> target, RelocInfo::Mode rmode);
+  void call(Handle<Code> target,
+            RelocInfo::Mode rmode,
+            unsigned ast_id = kNoASTId);
 
   // Calls directly to the given address using a relative offset.
   // Should only ever be used in Code objects for calls within the
@@ -1200,7 +1185,8 @@
   // Jumps
   // Jump short or near relative.
   // Use a 32-bit signed displacement.
-  void jmp(Label* L);  // unconditional jump to L
+  // Unconditional jump to L
+  void jmp(Label* L, Label::Distance distance = Label::kFar);
   void jmp(Handle<Code> target, RelocInfo::Mode rmode);
 
   // Jump near absolute indirect (r64)
@@ -1209,16 +1195,12 @@
   // Jump near absolute indirect (m64)
   void jmp(const Operand& src);
 
-  // Short jump
-  void jmp(NearLabel* L);
-
   // Conditional jumps
-  void j(Condition cc, Label* L);
+  void j(Condition cc,
+         Label* L,
+         Label::Distance distance = Label::kFar);
   void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode);
 
-  // Conditional short jump
-  void j(Condition cc, NearLabel* L, Hint hint = no_hint);
-
   // Floating-point operations
   void fld(int i);
 
@@ -1291,15 +1273,24 @@
   void movd(Register dst, XMMRegister src);
   void movq(XMMRegister dst, Register src);
   void movq(Register dst, XMMRegister src);
+  void movq(XMMRegister dst, XMMRegister src);
   void extractps(Register dst, XMMRegister src, byte imm8);
 
-  void movsd(const Operand& dst, XMMRegister src);
+  // Don't use this unless it's important to keep the
+  // top half of the destination register unchanged.
+  // Used movaps when moving double values and movq for integer
+  // values in xmm registers.
   void movsd(XMMRegister dst, XMMRegister src);
+
+  void movsd(const Operand& dst, XMMRegister src);
   void movsd(XMMRegister dst, const Operand& src);
 
   void movdqa(const Operand& dst, XMMRegister src);
   void movdqa(XMMRegister dst, const Operand& src);
 
+  void movapd(XMMRegister dst, XMMRegister src);
+  void movaps(XMMRegister dst, XMMRegister src);
+
   void movss(XMMRegister dst, const Operand& src);
   void movss(const Operand& dst, XMMRegister src);
 
@@ -1331,11 +1322,21 @@
   void andpd(XMMRegister dst, XMMRegister src);
   void orpd(XMMRegister dst, XMMRegister src);
   void xorpd(XMMRegister dst, XMMRegister src);
+  void xorps(XMMRegister dst, XMMRegister src);
   void sqrtsd(XMMRegister dst, XMMRegister src);
 
   void ucomisd(XMMRegister dst, XMMRegister src);
   void ucomisd(XMMRegister dst, const Operand& src);
 
+  enum RoundingMode {
+    kRoundToNearest = 0x0,
+    kRoundDown      = 0x1,
+    kRoundUp        = 0x2,
+    kRoundToZero    = 0x3
+  };
+
+  void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+
   void movmskpd(Register dst, XMMRegister src);
 
   // The first argument is the reg field, the second argument is the r/m field.
@@ -1408,7 +1409,9 @@
   inline void emitl(uint32_t x);
   inline void emitq(uint64_t x, RelocInfo::Mode rmode);
   inline void emitw(uint16_t x);
-  inline void emit_code_target(Handle<Code> target, RelocInfo::Mode rmode);
+  inline void emit_code_target(Handle<Code> target,
+                               RelocInfo::Mode rmode,
+                               unsigned ast_id = kNoASTId);
   void emit(Immediate x) { emitl(x.value_); }
 
   // Emits a REX prefix that encodes a 64-bit operand size and
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index a549633..fc4581c 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -98,6 +98,7 @@
   // Set expected number of arguments to zero (not changing rax).
   __ Set(rbx, 0);
   __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ SetCallKind(rcx, CALL_AS_METHOD);
   __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
           RelocInfo::CODE_TARGET);
 }
@@ -342,11 +343,12 @@
     Handle<Code> code =
         masm->isolate()->builtins()->HandleApiCallConstruct();
     ParameterCount expected(0);
-    __ InvokeCode(code, expected, expected,
-                  RelocInfo::CODE_TARGET, CALL_FUNCTION);
+    __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
+                  CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
   } else {
     ParameterCount actual(rax);
-    __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+    __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
   }
 
   // Restore context from the frame.
@@ -498,7 +500,8 @@
   } else {
     ParameterCount actual(rax);
     // Function must be in rdi.
-    __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+    __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
   }
 
   // Exit the JS frame. Notice that this also removes the empty
@@ -526,17 +529,23 @@
 
   // Push a copy of the function onto the stack.
   __ push(rdi);
+  // Push call kind information.
+  __ push(rcx);
 
   __ push(rdi);  // Function is also the parameter to the runtime call.
   __ CallRuntime(Runtime::kLazyCompile, 1);
+
+  // Restore call kind information.
+  __ pop(rcx);
+  // Restore receiver.
   __ pop(rdi);
 
   // Tear down temporary frame.
   __ LeaveInternalFrame();
 
   // Do a tail-call of the compiled function.
-  __ lea(rcx, FieldOperand(rax, Code::kHeaderSize));
-  __ jmp(rcx);
+  __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
+  __ jmp(rax);
 }
 
 
@@ -546,17 +555,23 @@
 
   // Push a copy of the function onto the stack.
   __ push(rdi);
+  // Push call kind information.
+  __ push(rcx);
 
   __ push(rdi);  // Function is also the parameter to the runtime call.
   __ CallRuntime(Runtime::kLazyRecompile, 1);
 
-  // Restore function and tear down temporary frame.
+  // Restore call kind information.
+  __ pop(rcx);
+  // Restore function.
   __ pop(rdi);
+
+  // Tear down temporary frame.
   __ LeaveInternalFrame();
 
   // Do a tail-call of the compiled function.
-  __ lea(rcx, FieldOperand(rax, Code::kHeaderSize));
-  __ jmp(rcx);
+  __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
+  __ jmp(rax);
 }
 
 
@@ -576,15 +591,15 @@
   __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
 
   // Switch on the state.
-  NearLabel not_no_registers, not_tos_rax;
+  Label not_no_registers, not_tos_rax;
   __ cmpq(rcx, Immediate(FullCodeGenerator::NO_REGISTERS));
-  __ j(not_equal, &not_no_registers);
+  __ j(not_equal, &not_no_registers, Label::kNear);
   __ ret(1 * kPointerSize);  // Remove state.
 
   __ bind(&not_no_registers);
   __ movq(rax, Operand(rsp, 2 * kPointerSize));
   __ cmpq(rcx, Immediate(FullCodeGenerator::TOS_REG));
-  __ j(not_equal, &not_tos_rax);
+  __ j(not_equal, &not_tos_rax, Label::kNear);
   __ ret(2 * kPointerSize);  // Remove state, rax.
 
   __ bind(&not_tos_rax);
@@ -658,19 +673,25 @@
              Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
     __ j(not_equal, &shift_arguments);
 
+    // Do not transform the receiver for natives.
+    // SharedFunctionInfo is already loaded into rbx.
+    __ testb(FieldOperand(rbx, SharedFunctionInfo::kES5NativeByteOffset),
+             Immediate(1 << SharedFunctionInfo::kES5NativeBitWithinByte));
+    __ j(not_zero, &shift_arguments);
+
     // Compute the receiver in non-strict mode.
     __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
-    __ JumpIfSmi(rbx, &convert_to_object);
+    __ JumpIfSmi(rbx, &convert_to_object, Label::kNear);
 
     __ CompareRoot(rbx, Heap::kNullValueRootIndex);
     __ j(equal, &use_global_receiver);
     __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
     __ j(equal, &use_global_receiver);
 
+    STATIC_ASSERT(LAST_JS_OBJECT_TYPE + 1 == LAST_TYPE);
+    STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
     __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
-    __ j(below, &convert_to_object);
-    __ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
-    __ j(below_equal, &shift_arguments);
+    __ j(above_equal, &shift_arguments);
 
     __ bind(&convert_to_object);
     __ EnterInternalFrame();  // In order to preserve argument count.
@@ -686,7 +707,7 @@
     __ LeaveInternalFrame();
     // Restore the function to rdi.
     __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
-    __ jmp(&patch_receiver);
+    __ jmp(&patch_receiver, Label::kNear);
 
     // Use the global receiver object from the called function as the
     // receiver.
@@ -734,6 +755,7 @@
     __ j(not_zero, &function);
     __ Set(rbx, 0);
     __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
+    __ SetCallKind(rcx, CALL_AS_METHOD);
     __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
             RelocInfo::CODE_TARGET);
     __ bind(&function);
@@ -747,13 +769,15 @@
              FieldOperand(rdx,
                           SharedFunctionInfo::kFormalParameterCountOffset));
   __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+  __ SetCallKind(rcx, CALL_AS_METHOD);
   __ cmpq(rax, rbx);
   __ j(not_equal,
        masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
        RelocInfo::CODE_TARGET);
 
   ParameterCount expected(0);
-  __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION);
+  __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION,
+                NullCallWrapper(), CALL_AS_METHOD);
 }
 
 
@@ -822,8 +846,13 @@
            Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
   __ j(not_equal, &push_receiver);
 
+  // Do not transform the receiver for natives.
+  __ testb(FieldOperand(rdx, SharedFunctionInfo::kES5NativeByteOffset),
+           Immediate(1 << SharedFunctionInfo::kES5NativeBitWithinByte));
+  __ j(not_zero, &push_receiver);
+
   // Compute the receiver in non-strict mode.
-  __ JumpIfSmi(rbx, &call_to_object);
+  __ JumpIfSmi(rbx, &call_to_object, Label::kNear);
   __ CompareRoot(rbx, Heap::kNullValueRootIndex);
   __ j(equal, &use_global_receiver);
   __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
@@ -831,17 +860,17 @@
 
   // If given receiver is already a JavaScript object then there's no
   // reason for converting it.
+  STATIC_ASSERT(LAST_JS_OBJECT_TYPE + 1 == LAST_TYPE);
+  STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
   __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
-  __ j(below, &call_to_object);
-  __ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
-  __ j(below_equal, &push_receiver);
+  __ j(above_equal, &push_receiver);
 
   // Convert the receiver to an object.
   __ bind(&call_to_object);
   __ push(rbx);
   __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
   __ movq(rbx, rax);
-  __ jmp(&push_receiver);
+  __ jmp(&push_receiver, Label::kNear);
 
   // Use the current global receiver object as the receiver.
   __ bind(&use_global_receiver);
@@ -888,7 +917,8 @@
   ParameterCount actual(rax);
   __ SmiToInteger32(rax, rax);
   __ movq(rdi, Operand(rbp, kFunctionOffset));
-  __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+  __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
 
   __ LeaveInternalFrame();
   __ ret(3 * kPointerSize);  // remove function, receiver, and arguments
@@ -1324,11 +1354,11 @@
   // Push the function on the stack.
   __ push(rdi);
 
-  // Preserve the number of arguments on the stack. Must preserve both
-  // rax and rbx because these registers are used when copying the
+  // Preserve the number of arguments on the stack. Must preserve rax,
+  // rbx and rcx because these registers are used when copying the
   // arguments and the receiver.
-  __ Integer32ToSmi(rcx, rax);
-  __ push(rcx);
+  __ Integer32ToSmi(r8, rax);
+  __ push(r8);
 }
 
 
@@ -1352,6 +1382,7 @@
   // ----------- S t a t e -------------
   //  -- rax : actual number of arguments
   //  -- rbx : expected number of arguments
+  //  -- rcx : call kind information
   //  -- rdx : code entry to call
   // -----------------------------------
 
@@ -1372,14 +1403,14 @@
     // Copy receiver and all expected arguments.
     const int offset = StandardFrameConstants::kCallerSPOffset;
     __ lea(rax, Operand(rbp, rax, times_pointer_size, offset));
-    __ Set(rcx, -1);  // account for receiver
+    __ Set(r8, -1);  // account for receiver
 
     Label copy;
     __ bind(&copy);
-    __ incq(rcx);
+    __ incq(r8);
     __ push(Operand(rax, 0));
     __ subq(rax, Immediate(kPointerSize));
-    __ cmpq(rcx, rbx);
+    __ cmpq(r8, rbx);
     __ j(less, &copy);
     __ jmp(&invoke);
   }
@@ -1391,23 +1422,23 @@
     // Copy receiver and all actual arguments.
     const int offset = StandardFrameConstants::kCallerSPOffset;
     __ lea(rdi, Operand(rbp, rax, times_pointer_size, offset));
-    __ Set(rcx, -1);  // account for receiver
+    __ Set(r8, -1);  // account for receiver
 
     Label copy;
     __ bind(&copy);
-    __ incq(rcx);
+    __ incq(r8);
     __ push(Operand(rdi, 0));
     __ subq(rdi, Immediate(kPointerSize));
-    __ cmpq(rcx, rax);
+    __ cmpq(r8, rax);
     __ j(less, &copy);
 
     // Fill remaining expected arguments with undefined values.
     Label fill;
     __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
     __ bind(&fill);
-    __ incq(rcx);
+    __ incq(r8);
     __ push(kScratchRegister);
-    __ cmpq(rcx, rbx);
+    __ cmpq(r8, rbx);
     __ j(less, &fill);
 
     // Restore function pointer.
@@ -1456,17 +1487,17 @@
 
   // If the result was -1 it means that we couldn't optimize the
   // function. Just return and continue in the unoptimized version.
-  NearLabel skip;
+  Label skip;
   __ SmiCompare(rax, Smi::FromInt(-1));
-  __ j(not_equal, &skip);
+  __ j(not_equal, &skip, Label::kNear);
   __ ret(0);
 
   // If we decide not to perform on-stack replacement we perform a
   // stack guard check to enable interrupts.
   __ bind(&stack_check);
-  NearLabel ok;
+  Label ok;
   __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
-  __ j(above_equal, &ok);
+  __ j(above_equal, &ok, Label::kNear);
 
   StackCheckStub stub;
   __ TailCallStub(&stub);
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index c365385..7075e66 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -40,15 +40,15 @@
 
 void ToNumberStub::Generate(MacroAssembler* masm) {
   // The ToNumber stub takes one argument in eax.
-  NearLabel check_heap_number, call_builtin;
+  Label check_heap_number, call_builtin;
   __ SmiTest(rax);
-  __ j(not_zero, &check_heap_number);
+  __ j(not_zero, &check_heap_number, Label::kNear);
   __ Ret();
 
   __ bind(&check_heap_number);
   __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
                  Heap::kHeapNumberMapRootIndex);
-  __ j(not_equal, &call_builtin);
+  __ j(not_equal, &call_builtin, Label::kNear);
   __ Ret();
 
   __ bind(&call_builtin);
@@ -232,12 +232,28 @@
 
 
 void ToBooleanStub::Generate(MacroAssembler* masm) {
-  NearLabel false_result, true_result, not_string;
+  Label false_result, true_result, not_string;
   __ movq(rax, Operand(rsp, 1 * kPointerSize));
 
+  // undefined -> false
+  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+  __ j(equal, &false_result);
+
+  // Boolean -> its value
+  __ CompareRoot(rax, Heap::kFalseValueRootIndex);
+  __ j(equal, &false_result);
+  __ CompareRoot(rax, Heap::kTrueValueRootIndex);
+  __ j(equal, &true_result);
+
+  // Smis: 0 -> false, all other -> true
+  __ Cmp(rax, Smi::FromInt(0));
+  __ j(equal, &false_result);
+  Condition is_smi = __ CheckSmi(rax);
+  __ j(is_smi, &true_result);
+
   // 'null' => false.
   __ CompareRoot(rax, Heap::kNullValueRootIndex);
-  __ j(equal, &false_result);
+  __ j(equal, &false_result, Label::kNear);
 
   // Get the map and type of the heap object.
   // We don't use CmpObjectType because we manipulate the type field.
@@ -247,28 +263,28 @@
   // Undetectable => false.
   __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset));
   __ and_(rbx, Immediate(1 << Map::kIsUndetectable));
-  __ j(not_zero, &false_result);
+  __ j(not_zero, &false_result, Label::kNear);
 
   // JavaScript object => true.
   __ cmpq(rcx, Immediate(FIRST_JS_OBJECT_TYPE));
-  __ j(above_equal, &true_result);
+  __ j(above_equal, &true_result, Label::kNear);
 
   // String value => false iff empty.
   __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
-  __ j(above_equal, &not_string);
+  __ j(above_equal, &not_string, Label::kNear);
   __ movq(rdx, FieldOperand(rax, String::kLengthOffset));
   __ SmiTest(rdx);
-  __ j(zero, &false_result);
-  __ jmp(&true_result);
+  __ j(zero, &false_result, Label::kNear);
+  __ jmp(&true_result, Label::kNear);
 
   __ bind(&not_string);
   __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
-  __ j(not_equal, &true_result);
+  __ j(not_equal, &true_result, Label::kNear);
   // HeapNumber => false iff +0, -0, or NaN.
   // These three cases set the zero flag when compared to zero using ucomisd.
-  __ xorpd(xmm0, xmm0);
+  __ xorps(xmm0, xmm0);
   __ ucomisd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
-  __ j(zero, &false_result);
+  __ j(zero, &false_result, Label::kNear);
   // Fall through to |true_result|.
 
   // Return 1/0 for true/false in rax.
@@ -322,15 +338,354 @@
 };
 
 
-Handle<Code> GetTypeRecordingBinaryOpStub(int key,
-    TRBinaryOpIC::TypeInfo type_info,
-    TRBinaryOpIC::TypeInfo result_type_info) {
-  TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
+// Get the integer part of a heap number.
+// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
+void IntegerConvert(MacroAssembler* masm,
+                    Register result,
+                    Register source) {
+  // Result may be rcx. If result and source are the same register, source will
+  // be overwritten.
+  ASSERT(!result.is(rdi) && !result.is(rbx));
+  // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
+  // cvttsd2si (32-bit version) directly.
+  Register double_exponent = rbx;
+  Register double_value = rdi;
+  Label done, exponent_63_plus;
+  // Get double and extract exponent.
+  __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
+  // Clear result preemptively, in case we need to return zero.
+  __ xorl(result, result);
+  __ movq(xmm0, double_value);  // Save copy in xmm0 in case we need it there.
+  // Double to remove sign bit, shift exponent down to least significant bits.
+  // and subtract bias to get the unshifted, unbiased exponent.
+  __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
+  __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
+  __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
+  // Check whether the exponent is too big for a 63 bit unsigned integer.
+  __ cmpl(double_exponent, Immediate(63));
+  __ j(above_equal, &exponent_63_plus, Label::kNear);
+  // Handle exponent range 0..62.
+  __ cvttsd2siq(result, xmm0);
+  __ jmp(&done, Label::kNear);
+
+  __ bind(&exponent_63_plus);
+  // Exponent negative or 63+.
+  __ cmpl(double_exponent, Immediate(83));
+  // If exponent negative or above 83, number contains no significant bits in
+  // the range 0..2^31, so result is zero, and rcx already holds zero.
+  __ j(above, &done, Label::kNear);
+
+  // Exponent in rage 63..83.
+  // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
+  // the least significant exponent-52 bits.
+
+  // Negate low bits of mantissa if value is negative.
+  __ addq(double_value, double_value);  // Move sign bit to carry.
+  __ sbbl(result, result);  // And convert carry to -1 in result register.
+  // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
+  __ addl(double_value, result);
+  // Do xor in opposite directions depending on where we want the result
+  // (depending on whether result is rcx or not).
+
+  if (result.is(rcx)) {
+    __ xorl(double_value, result);
+    // Left shift mantissa by (exponent - mantissabits - 1) to save the
+    // bits that have positional values below 2^32 (the extra -1 comes from the
+    // doubling done above to move the sign bit into the carry flag).
+    __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
+    __ shll_cl(double_value);
+    __ movl(result, double_value);
+  } else {
+    // As the then-branch, but move double-value to result before shifting.
+    __ xorl(result, double_value);
+    __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
+    __ shll_cl(result);
+  }
+
+  __ bind(&done);
+}
+
+
+Handle<Code> GetUnaryOpStub(int key, UnaryOpIC::TypeInfo type_info) {
+  UnaryOpStub stub(key, type_info);
   return stub.GetCode();
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+void UnaryOpStub::Generate(MacroAssembler* masm) {
+  switch (operand_type_) {
+    case UnaryOpIC::UNINITIALIZED:
+      GenerateTypeTransition(masm);
+      break;
+    case UnaryOpIC::SMI:
+      GenerateSmiStub(masm);
+      break;
+    case UnaryOpIC::HEAP_NUMBER:
+      GenerateHeapNumberStub(masm);
+      break;
+    case UnaryOpIC::GENERIC:
+      GenerateGenericStub(masm);
+      break;
+  }
+}
+
+
+void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+  __ pop(rcx);  // Save return address.
+  __ push(rax);
+  // Left and right arguments are now on top.
+  // Push this stub's key. Although the operation and the type info are
+  // encoded into the key, the encoding is opaque, so push them too.
+  __ Push(Smi::FromInt(MinorKey()));
+  __ Push(Smi::FromInt(op_));
+  __ Push(Smi::FromInt(operand_type_));
+
+  __ push(rcx);  // Push return address.
+
+  // Patch the caller to an appropriate specialized stub and return the
+  // operation result to the caller of the stub.
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kUnaryOp_Patch),
+                        masm->isolate()),
+      4,
+      1);
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+  switch (op_) {
+    case Token::SUB:
+      GenerateSmiStubSub(masm);
+      break;
+    case Token::BIT_NOT:
+      GenerateSmiStubBitNot(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
+  Label slow;
+  GenerateSmiCodeSub(masm, &slow, &slow, Label::kNear, Label::kNear);
+  __ bind(&slow);
+  GenerateTypeTransition(masm);
+}
+
+
+void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
+  Label non_smi;
+  GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
+  __ bind(&non_smi);
+  GenerateTypeTransition(masm);
+}
+
+
+void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
+                                     Label* non_smi,
+                                     Label* slow,
+                                     Label::Distance non_smi_near,
+                                     Label::Distance slow_near) {
+  Label done;
+  __ JumpIfNotSmi(rax, non_smi, non_smi_near);
+  __ SmiNeg(rax, rax, &done, Label::kNear);
+  __ jmp(slow, slow_near);
+  __ bind(&done);
+  __ ret(0);
+}
+
+
+void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
+                                        Label* non_smi,
+                                        Label::Distance non_smi_near) {
+  __ JumpIfNotSmi(rax, non_smi, non_smi_near);
+  __ SmiNot(rax, rax);
+  __ ret(0);
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+  switch (op_) {
+    case Token::SUB:
+      GenerateHeapNumberStubSub(masm);
+      break;
+    case Token::BIT_NOT:
+      GenerateHeapNumberStubBitNot(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
+  Label non_smi, slow, call_builtin;
+  GenerateSmiCodeSub(masm, &non_smi, &call_builtin, Label::kNear);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeSub(masm, &slow);
+  __ bind(&slow);
+  GenerateTypeTransition(masm);
+  __ bind(&call_builtin);
+  GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateHeapNumberStubBitNot(
+    MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeBitNot(masm, &slow);
+  __ bind(&slow);
+  GenerateTypeTransition(masm);
+}
+
+
+void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
+                                            Label* slow) {
+  // Check if the operand is a heap number.
+  __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+                 Heap::kHeapNumberMapRootIndex);
+  __ j(not_equal, slow);
+
+  // Operand is a float, negate its value by flipping the sign bit.
+  if (mode_ == UNARY_OVERWRITE) {
+    __ Set(kScratchRegister, 0x01);
+    __ shl(kScratchRegister, Immediate(63));
+    __ xor_(FieldOperand(rax, HeapNumber::kValueOffset), kScratchRegister);
+  } else {
+    // Allocate a heap number before calculating the answer,
+    // so we don't have an untagged double around during GC.
+    Label slow_allocate_heapnumber, heapnumber_allocated;
+    __ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber);
+    __ jmp(&heapnumber_allocated);
+
+    __ bind(&slow_allocate_heapnumber);
+    __ EnterInternalFrame();
+    __ push(rax);
+    __ CallRuntime(Runtime::kNumberAlloc, 0);
+    __ movq(rcx, rax);
+    __ pop(rax);
+    __ LeaveInternalFrame();
+    __ bind(&heapnumber_allocated);
+    // rcx: allocated 'empty' number
+
+    // Copy the double value to the new heap number, flipping the sign.
+    __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
+    __ Set(kScratchRegister, 0x01);
+    __ shl(kScratchRegister, Immediate(63));
+    __ xor_(rdx, kScratchRegister);  // Flip sign.
+    __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
+    __ movq(rax, rcx);
+  }
+  __ ret(0);
+}
+
+
+void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
+                                               Label* slow) {
+  // Check if the operand is a heap number.
+  __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+                 Heap::kHeapNumberMapRootIndex);
+  __ j(not_equal, slow);
+
+  // Convert the heap number in rax to an untagged integer in rcx.
+  IntegerConvert(masm, rax, rax);
+
+  // Do the bitwise operation and smi tag the result.
+  __ notl(rax);
+  __ Integer32ToSmi(rax, rax);
+  __ ret(0);
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
+  switch (op_) {
+    case Token::SUB:
+      GenerateGenericStubSub(masm);
+      break;
+    case Token::BIT_NOT:
+      GenerateGenericStubBitNot(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeSub(masm, &non_smi, &slow, Label::kNear);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeSub(masm, &slow);
+  __ bind(&slow);
+  GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
+  Label non_smi, slow;
+  GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
+  __ bind(&non_smi);
+  GenerateHeapNumberCodeBitNot(masm, &slow);
+  __ bind(&slow);
+  GenerateGenericCodeFallback(masm);
+}
+
+
+void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
+  // Handle the slow case by jumping to the JavaScript builtin.
+  __ pop(rcx);  // pop return address
+  __ push(rax);
+  __ push(rcx);  // push return address
+  switch (op_) {
+    case Token::SUB:
+      __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+      break;
+    case Token::BIT_NOT:
+      __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+const char* UnaryOpStub::GetName() {
+  if (name_ != NULL) return name_;
+  const int kMaxNameLength = 100;
+  name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+      kMaxNameLength);
+  if (name_ == NULL) return "OOM";
+  const char* op_name = Token::Name(op_);
+  const char* overwrite_name = NULL;  // Make g++ happy.
+  switch (mode_) {
+    case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
+    case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
+  }
+
+  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+               "UnaryOpStub_%s_%s_%s",
+               op_name,
+               overwrite_name,
+               UnaryOpIC::GetName(operand_type_));
+  return name_;
+}
+
+
+Handle<Code> GetBinaryOpStub(int key,
+                             BinaryOpIC::TypeInfo type_info,
+                             BinaryOpIC::TypeInfo result_type_info) {
+  BinaryOpStub stub(key, type_info, result_type_info);
+  return stub.GetCode();
+}
+
+
+void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
   __ pop(rcx);  // Save return address.
   __ push(rdx);
   __ push(rax);
@@ -346,36 +701,39 @@
   // Patch the caller to an appropriate specialized stub and return the
   // operation result to the caller of the stub.
   __ TailCallExternalReference(
-      ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch),
+      ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
                         masm->isolate()),
       5,
       1);
 }
 
 
-void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
+void BinaryOpStub::Generate(MacroAssembler* masm) {
   switch (operands_type_) {
-    case TRBinaryOpIC::UNINITIALIZED:
+    case BinaryOpIC::UNINITIALIZED:
       GenerateTypeTransition(masm);
       break;
-    case TRBinaryOpIC::SMI:
+    case BinaryOpIC::SMI:
       GenerateSmiStub(masm);
       break;
-    case TRBinaryOpIC::INT32:
+    case BinaryOpIC::INT32:
       UNREACHABLE();
       // The int32 case is identical to the Smi case.  We avoid creating this
       // ic state on x64.
       break;
-    case TRBinaryOpIC::HEAP_NUMBER:
+    case BinaryOpIC::HEAP_NUMBER:
       GenerateHeapNumberStub(masm);
       break;
-    case TRBinaryOpIC::ODDBALL:
+    case BinaryOpIC::ODDBALL:
       GenerateOddballStub(masm);
       break;
-    case TRBinaryOpIC::STRING:
+    case BinaryOpIC::BOTH_STRING:
+      GenerateBothStringStub(masm);
+      break;
+    case BinaryOpIC::STRING:
       GenerateStringStub(masm);
       break;
-    case TRBinaryOpIC::GENERIC:
+    case BinaryOpIC::GENERIC:
       GenerateGeneric(masm);
       break;
     default:
@@ -384,7 +742,7 @@
 }
 
 
-const char* TypeRecordingBinaryOpStub::GetName() {
+const char* BinaryOpStub::GetName() {
   if (name_ != NULL) return name_;
   const int kMaxNameLength = 100;
   name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
@@ -400,19 +758,20 @@
   }
 
   OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
-               "TypeRecordingBinaryOpStub_%s_%s_%s",
+               "BinaryOpStub_%s_%s_%s",
                op_name,
                overwrite_name,
-               TRBinaryOpIC::GetName(operands_type_));
+               BinaryOpIC::GetName(operands_type_));
   return name_;
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
+void BinaryOpStub::GenerateSmiCode(
+    MacroAssembler* masm,
     Label* slow,
     SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
 
-  // Arguments to TypeRecordingBinaryOpStub are in rdx and rax.
+  // Arguments to BinaryOpStub are in rdx and rax.
   Register left = rdx;
   Register right = rax;
 
@@ -558,10 +917,9 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateFloatingPointCode(
-    MacroAssembler* masm,
-    Label* allocation_failure,
-    Label* non_numeric_failure) {
+void BinaryOpStub::GenerateFloatingPointCode(MacroAssembler* masm,
+                                             Label* allocation_failure,
+                                             Label* non_numeric_failure) {
   switch (op_) {
     case Token::ADD:
     case Token::SUB:
@@ -660,32 +1018,32 @@
   // No fall-through from this generated code.
   if (FLAG_debug_code) {
     __ Abort("Unexpected fall-through in "
-             "TypeRecordingBinaryStub::GenerateFloatingPointCode.");
+             "BinaryStub::GenerateFloatingPointCode.");
   }
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
+void BinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
   ASSERT(op_ == Token::ADD);
-  NearLabel left_not_string, call_runtime;
+  Label left_not_string, call_runtime;
 
   // Registers containing left and right operands respectively.
   Register left = rdx;
   Register right = rax;
 
   // Test if left operand is a string.
-  __ JumpIfSmi(left, &left_not_string);
+  __ JumpIfSmi(left, &left_not_string, Label::kNear);
   __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
-  __ j(above_equal, &left_not_string);
+  __ j(above_equal, &left_not_string, Label::kNear);
   StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
   GenerateRegisterArgsPush(masm);
   __ TailCallStub(&string_add_left_stub);
 
   // Left operand is not a string, test right.
   __ bind(&left_not_string);
-  __ JumpIfSmi(right, &call_runtime);
+  __ JumpIfSmi(right, &call_runtime, Label::kNear);
   __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
-  __ j(above_equal, &call_runtime);
+  __ j(above_equal, &call_runtime, Label::kNear);
 
   StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
   GenerateRegisterArgsPush(masm);
@@ -696,7 +1054,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
+void BinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
   GenerateRegisterArgsPush(masm);
   switch (op_) {
     case Token::ADD:
@@ -738,10 +1096,10 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
   Label call_runtime;
-  if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
-      result_type_ == TRBinaryOpIC::SMI) {
+  if (result_type_ == BinaryOpIC::UNINITIALIZED ||
+      result_type_ == BinaryOpIC::SMI) {
     // Only allow smi results.
     GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
   } else {
@@ -761,17 +1119,47 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
-  ASSERT(operands_type_ == TRBinaryOpIC::STRING);
+void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+  ASSERT(operands_type_ == BinaryOpIC::STRING);
   ASSERT(op_ == Token::ADD);
   GenerateStringAddCode(masm);
   // Try to add arguments as strings, otherwise, transition to the generic
-  // TRBinaryOpIC type.
+  // BinaryOpIC type.
   GenerateTypeTransition(masm);
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
+  Label call_runtime;
+  ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+  ASSERT(op_ == Token::ADD);
+  // If both arguments are strings, call the string add stub.
+  // Otherwise, do a transition.
+
+  // Registers containing left and right operands respectively.
+  Register left = rdx;
+  Register right = rax;
+
+  // Test if left operand is a string.
+  __ JumpIfSmi(left, &call_runtime);
+  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
+  __ j(above_equal, &call_runtime);
+
+  // Test if right operand is a string.
+  __ JumpIfSmi(right, &call_runtime);
+  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
+  __ j(above_equal, &call_runtime);
+
+  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+  GenerateRegisterArgsPush(masm);
+  __ TailCallStub(&string_add_stub);
+
+  __ bind(&call_runtime);
+  GenerateTypeTransition(masm);
+}
+
+
+void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
   Label call_runtime;
 
   if (op_ == Token::ADD) {
@@ -781,18 +1169,18 @@
   }
 
   // Convert oddball arguments to numbers.
-  NearLabel check, done;
+  Label check, done;
   __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
-  __ j(not_equal, &check);
+  __ j(not_equal, &check, Label::kNear);
   if (Token::IsBitOp(op_)) {
     __ xor_(rdx, rdx);
   } else {
     __ LoadRoot(rdx, Heap::kNanValueRootIndex);
   }
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(&check);
   __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
-  __ j(not_equal, &done);
+  __ j(not_equal, &done, Label::kNear);
   if (Token::IsBitOp(op_)) {
     __ xor_(rax, rax);
   } else {
@@ -804,7 +1192,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
   Label gc_required, not_number;
   GenerateFloatingPointCode(masm, &gc_required, &not_number);
 
@@ -816,7 +1204,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
   Label call_runtime, call_string_add_or_runtime;
 
   GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
@@ -833,9 +1221,8 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
-    MacroAssembler* masm,
-    Label* alloc_failure) {
+void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
+                                                Label* alloc_failure) {
   Label skip_allocation;
   OverwriteMode mode = mode_;
   switch (mode) {
@@ -873,7 +1260,7 @@
 }
 
 
-void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
   __ pop(rcx);
   __ push(rdx);
   __ push(rax);
@@ -900,11 +1287,10 @@
   Label skip_cache;
   const bool tagged = (argument_type_ == TAGGED);
   if (tagged) {
-    NearLabel input_not_smi;
-    NearLabel loaded;
+    Label input_not_smi, loaded;
     // Test that rax is a number.
     __ movq(rax, Operand(rsp, kPointerSize));
-    __ JumpIfNotSmi(rax, &input_not_smi);
+    __ JumpIfNotSmi(rax, &input_not_smi, Label::kNear);
     // Input is a smi. Untag and load it onto the FPU stack.
     // Then load the bits of the double into rbx.
     __ SmiToInteger32(rax, rax);
@@ -915,7 +1301,7 @@
     __ movq(rdx, xmm1);
     __ fld_d(Operand(rsp, 0));
     __ addq(rsp, Immediate(kDoubleSize));
-    __ jmp(&loaded);
+    __ jmp(&loaded, Label::kNear);
 
     __ bind(&input_not_smi);
     // Check if input is a HeapNumber.
@@ -990,9 +1376,9 @@
   __ addl(rcx, rcx);
   __ lea(rcx, Operand(rax, rcx, times_8, 0));
   // Check if cache matches: Double value is stored in uint32_t[2] array.
-  NearLabel cache_miss;
+  Label cache_miss;
   __ cmpq(rbx, Operand(rcx, 0));
-  __ j(not_equal, &cache_miss);
+  __ j(not_equal, &cache_miss, Label::kNear);
   // Cache hit!
   __ movq(rax, Operand(rcx, 2 * kIntSize));
   if (tagged) {
@@ -1100,8 +1486,8 @@
     __ j(below, &in_range);
     // Check for infinity and NaN. Both return NaN for sin.
     __ cmpl(rdi, Immediate(0x7ff));
-    NearLabel non_nan_result;
-    __ j(not_equal, &non_nan_result);
+    Label non_nan_result;
+    __ j(not_equal, &non_nan_result, Label::kNear);
     // Input is +/-Infinity or NaN. Result is NaN.
     __ fstp(0);
     __ LoadRoot(kScratchRegister, Heap::kNanValueRootIndex);
@@ -1129,7 +1515,7 @@
 
     // Compute st(0) % st(1)
     {
-      NearLabel partial_remainder_loop;
+      Label partial_remainder_loop;
       __ bind(&partial_remainder_loop);
       __ fprem1();
       __ fwait();
@@ -1166,74 +1552,6 @@
 }
 
 
-// Get the integer part of a heap number.
-// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
-void IntegerConvert(MacroAssembler* masm,
-                    Register result,
-                    Register source) {
-  // Result may be rcx. If result and source are the same register, source will
-  // be overwritten.
-  ASSERT(!result.is(rdi) && !result.is(rbx));
-  // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
-  // cvttsd2si (32-bit version) directly.
-  Register double_exponent = rbx;
-  Register double_value = rdi;
-  NearLabel done, exponent_63_plus;
-  // Get double and extract exponent.
-  __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
-  // Clear result preemptively, in case we need to return zero.
-  __ xorl(result, result);
-  __ movq(xmm0, double_value);  // Save copy in xmm0 in case we need it there.
-  // Double to remove sign bit, shift exponent down to least significant bits.
-  // and subtract bias to get the unshifted, unbiased exponent.
-  __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
-  __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
-  __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
-  // Check whether the exponent is too big for a 63 bit unsigned integer.
-  __ cmpl(double_exponent, Immediate(63));
-  __ j(above_equal, &exponent_63_plus);
-  // Handle exponent range 0..62.
-  __ cvttsd2siq(result, xmm0);
-  __ jmp(&done);
-
-  __ bind(&exponent_63_plus);
-  // Exponent negative or 63+.
-  __ cmpl(double_exponent, Immediate(83));
-  // If exponent negative or above 83, number contains no significant bits in
-  // the range 0..2^31, so result is zero, and rcx already holds zero.
-  __ j(above, &done);
-
-  // Exponent in rage 63..83.
-  // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
-  // the least significant exponent-52 bits.
-
-  // Negate low bits of mantissa if value is negative.
-  __ addq(double_value, double_value);  // Move sign bit to carry.
-  __ sbbl(result, result);  // And convert carry to -1 in result register.
-  // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
-  __ addl(double_value, result);
-  // Do xor in opposite directions depending on where we want the result
-  // (depending on whether result is rcx or not).
-
-  if (result.is(rcx)) {
-    __ xorl(double_value, result);
-    // Left shift mantissa by (exponent - mantissabits - 1) to save the
-    // bits that have positional values below 2^32 (the extra -1 comes from the
-    // doubling done above to move the sign bit into the carry flag).
-    __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
-    __ shll_cl(double_value);
-    __ movl(result, double_value);
-  } else {
-    // As the then-branch, but move double-value to result before shifting.
-    __ xorl(result, double_value);
-    __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
-    __ shll_cl(result);
-  }
-
-  __ bind(&done);
-}
-
-
 // Input: rdx, rax are the left and right objects of a bit op.
 // Output: rax, rcx are left and right integers for a bit op.
 void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
@@ -1390,8 +1708,8 @@
 
   __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
 
-  NearLabel first_smi, check_second;
-  __ JumpIfSmi(first, &first_smi);
+  Label first_smi;
+  __ JumpIfSmi(first, &first_smi, Label::kNear);
   __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
   __ j(not_equal, on_not_smis);
   // Convert HeapNumber to smi if possible.
@@ -1406,7 +1724,6 @@
   __ j(not_equal, on_not_smis);
   __ Integer32ToSmi(first, smi_result);
 
-  __ bind(&check_second);
   __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
   __ bind(&first_smi);
   if (FLAG_debug_code) {
@@ -1432,91 +1749,6 @@
 }
 
 
-void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
-  Label slow, done;
-
-  if (op_ == Token::SUB) {
-    if (include_smi_code_) {
-      // Check whether the value is a smi.
-      Label try_float;
-      __ JumpIfNotSmi(rax, &try_float);
-      if (negative_zero_ == kIgnoreNegativeZero) {
-        __ SmiCompare(rax, Smi::FromInt(0));
-        __ j(equal, &done);
-      }
-      __ SmiNeg(rax, rax, &done);
-      __ jmp(&slow);  // zero, if not handled above, and Smi::kMinValue.
-
-      // Try floating point case.
-      __ bind(&try_float);
-    } else if (FLAG_debug_code) {
-      __ AbortIfSmi(rax);
-    }
-
-    __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
-                   Heap::kHeapNumberMapRootIndex);
-    __ j(not_equal, &slow);
-    // Operand is a float, negate its value by flipping sign bit.
-    __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
-    __ Set(kScratchRegister, 0x01);
-    __ shl(kScratchRegister, Immediate(63));
-    __ xor_(rdx, kScratchRegister);  // Flip sign.
-    // rdx is value to store.
-    if (overwrite_ == UNARY_OVERWRITE) {
-      __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
-    } else {
-      __ AllocateHeapNumber(rcx, rbx, &slow);
-      // rcx: allocated 'empty' number
-      __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
-      __ movq(rax, rcx);
-    }
-  } else if (op_ == Token::BIT_NOT) {
-    if (include_smi_code_) {
-      Label try_float;
-      __ JumpIfNotSmi(rax, &try_float);
-      __ SmiNot(rax, rax);
-      __ jmp(&done);
-      // Try floating point case.
-      __ bind(&try_float);
-    } else if (FLAG_debug_code) {
-      __ AbortIfSmi(rax);
-    }
-
-    // Check if the operand is a heap number.
-    __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
-                   Heap::kHeapNumberMapRootIndex);
-    __ j(not_equal, &slow);
-
-    // Convert the heap number in rax to an untagged integer in rcx.
-    IntegerConvert(masm, rax, rax);
-
-    // Do the bitwise operation and smi tag the result.
-    __ notl(rax);
-    __ Integer32ToSmi(rax, rax);
-  }
-
-  // Return from the stub.
-  __ bind(&done);
-  __ StubReturn(1);
-
-  // Handle the slow case by jumping to the JavaScript builtin.
-  __ bind(&slow);
-  __ pop(rcx);  // pop return address
-  __ push(rax);
-  __ push(rcx);  // push return address
-  switch (op_) {
-    case Token::SUB:
-      __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
-      break;
-    case Token::BIT_NOT:
-      __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
-      break;
-    default:
-      UNREACHABLE();
-  }
-}
-
-
 void MathPowStub::Generate(MacroAssembler* masm) {
   // Registers are used as follows:
   // rdx = base
@@ -1562,20 +1794,20 @@
   __ movq(rdx, rax);
 
   // Get absolute value of exponent.
-  NearLabel no_neg;
+  Label no_neg;
   __ cmpl(rax, Immediate(0));
-  __ j(greater_equal, &no_neg);
+  __ j(greater_equal, &no_neg, Label::kNear);
   __ negl(rax);
   __ bind(&no_neg);
 
   // Load xmm1 with 1.
-  __ movsd(xmm1, xmm3);
-  NearLabel while_true;
-  NearLabel no_multiply;
+  __ movaps(xmm1, xmm3);
+  Label while_true;
+  Label no_multiply;
 
   __ bind(&while_true);
   __ shrl(rax, Immediate(1));
-  __ j(not_carry, &no_multiply);
+  __ j(not_carry, &no_multiply, Label::kNear);
   __ mulsd(xmm1, xmm0);
   __ bind(&no_multiply);
   __ mulsd(xmm0, xmm0);
@@ -1587,8 +1819,8 @@
   __ j(positive, &allocate_return);
   // Special case if xmm1 has reached infinity.
   __ divsd(xmm3, xmm1);
-  __ movsd(xmm1, xmm3);
-  __ xorpd(xmm0, xmm0);
+  __ movaps(xmm1, xmm3);
+  __ xorps(xmm0, xmm0);
   __ ucomisd(xmm0, xmm1);
   __ j(equal, &call_runtime);
 
@@ -1605,12 +1837,11 @@
   __ ucomisd(xmm1, xmm1);
   __ j(parity_even, &call_runtime);
 
-  NearLabel base_not_smi;
-  NearLabel handle_special_cases;
-  __ JumpIfNotSmi(rdx, &base_not_smi);
+  Label base_not_smi, handle_special_cases;
+  __ JumpIfNotSmi(rdx, &base_not_smi, Label::kNear);
   __ SmiToInteger32(rdx, rdx);
   __ cvtlsi2sd(xmm0, rdx);
-  __ jmp(&handle_special_cases);
+  __ jmp(&handle_special_cases, Label::kNear);
 
   __ bind(&base_not_smi);
   __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
@@ -1625,22 +1856,22 @@
 
   // base is in xmm0 and exponent is in xmm1.
   __ bind(&handle_special_cases);
-  NearLabel not_minus_half;
+  Label not_minus_half;
   // Test for -0.5.
   // Load xmm2 with -0.5.
   __ movq(rcx, V8_UINT64_C(0xBFE0000000000000), RelocInfo::NONE);
   __ movq(xmm2, rcx);
   // xmm2 now has -0.5.
   __ ucomisd(xmm2, xmm1);
-  __ j(not_equal, &not_minus_half);
+  __ j(not_equal, &not_minus_half, Label::kNear);
 
   // Calculates reciprocal of square root.
   // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-  __ xorpd(xmm1, xmm1);
+  __ xorps(xmm1, xmm1);
   __ addsd(xmm1, xmm0);
   __ sqrtsd(xmm1, xmm1);
   __ divsd(xmm3, xmm1);
-  __ movsd(xmm1, xmm3);
+  __ movaps(xmm1, xmm3);
   __ jmp(&allocate_return);
 
   // Test for 0.5.
@@ -1653,8 +1884,8 @@
   __ j(not_equal, &call_runtime);
   // Calculates square root.
   // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-  __ xorpd(xmm1, xmm1);
-  __ addsd(xmm1, xmm0);
+  __ xorps(xmm1, xmm1);
+  __ addsd(xmm1, xmm0);  // Convert -0 to 0.
   __ sqrtsd(xmm1, xmm1);
 
   __ bind(&allocate_return);
@@ -1951,7 +2182,7 @@
 
   // rax: RegExp data (FixedArray)
   // Check the representation and encoding of the subject string.
-  NearLabel seq_ascii_string, seq_two_byte_string, check_code;
+  Label seq_ascii_string, seq_two_byte_string, check_code;
   __ movq(rdi, Operand(rsp, kSubjectOffset));
   __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
   __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
@@ -1959,10 +2190,10 @@
   __ andb(rbx, Immediate(
       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask));
   STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
-  __ j(zero, &seq_two_byte_string);
+  __ j(zero, &seq_two_byte_string, Label::kNear);
   // Any other flat string must be a flat ascii string.
   __ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
-  __ j(zero, &seq_ascii_string);
+  __ j(zero, &seq_ascii_string, Label::kNear);
 
   // Check for flat cons string.
   // A flat cons string is a cons string where the second part is the empty
@@ -1986,7 +2217,7 @@
   __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
            Immediate(kStringRepresentationMask | kStringEncodingMask));
   STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
-  __ j(zero, &seq_two_byte_string);
+  __ j(zero, &seq_two_byte_string, Label::kNear);
   // Any other flat string must be ascii.
   __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
            Immediate(kStringRepresentationMask));
@@ -1997,7 +2228,7 @@
   // rax: RegExp data (FixedArray)
   __ movq(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
   __ Set(rcx, 1);  // Type is ascii.
-  __ jmp(&check_code);
+  __ jmp(&check_code, Label::kNear);
 
   __ bind(&seq_two_byte_string);
   // rdi: subject string (flat two-byte)
@@ -2008,9 +2239,8 @@
   __ bind(&check_code);
   // Check that the irregexp code has been generated for the actual string
   // encoding. If it has, the field contains a code object otherwise it contains
-  // the hole.
-  __ CmpObjectType(r11, CODE_TYPE, kScratchRegister);
-  __ j(not_equal, &runtime);
+  // smi (code flushing support)
+  __ JumpIfSmi(r11, &runtime);
 
   // rdi: subject string
   // rcx: encoding of subject string (1 if ascii, 0 if two_byte);
@@ -2083,13 +2313,13 @@
 
   // Argument 4: End of string data
   // Argument 3: Start of string data
-  NearLabel setup_two_byte, setup_rest;
+  Label setup_two_byte, setup_rest;
   __ testb(rcx, rcx);  // Last use of rcx as encoding of subject string.
-  __ j(zero, &setup_two_byte);
+  __ j(zero, &setup_two_byte, Label::kNear);
   __ SmiToInteger32(rcx, FieldOperand(rdi, String::kLengthOffset));
   __ lea(arg4, FieldOperand(rdi, rcx, times_1, SeqAsciiString::kHeaderSize));
   __ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqAsciiString::kHeaderSize));
-  __ jmp(&setup_rest);
+  __ jmp(&setup_rest, Label::kNear);
   __ bind(&setup_two_byte);
   __ SmiToInteger32(rcx, FieldOperand(rdi, String::kLengthOffset));
   __ lea(arg4, FieldOperand(rdi, rcx, times_2, SeqTwoByteString::kHeaderSize));
@@ -2114,10 +2344,10 @@
   __ LeaveApiExitFrame();
 
   // Check the result.
-  NearLabel success;
+  Label success;
   Label exception;
   __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
-  __ j(equal, &success);
+  __ j(equal, &success, Label::kNear);
   __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
   __ j(equal, &exception);
   __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
@@ -2166,12 +2396,12 @@
   // rbx: last_match_info backing store (FixedArray)
   // rcx: offsets vector
   // rdx: number of capture registers
-  NearLabel next_capture, done;
+  Label next_capture, done;
   // Capture register counter starts from number of capture registers and
   // counts down until wraping after zero.
   __ bind(&next_capture);
   __ subq(rdx, Immediate(1));
-  __ j(negative, &done);
+  __ j(negative, &done, Label::kNear);
   // Read the value from the static offsets vector buffer and make it a smi.
   __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
   __ Integer32ToSmi(rdi, rdi);
@@ -2204,8 +2434,8 @@
   __ movq(pending_exception_operand, rdx);
 
   __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
-  NearLabel termination_exception;
-  __ j(equal, &termination_exception);
+  Label termination_exception;
+  __ j(equal, &termination_exception, Label::kNear);
   __ Throw(rax);
 
   __ bind(&termination_exception);
@@ -2330,9 +2560,13 @@
   // Heap::GetNumberStringCache.
   Label is_smi;
   Label load_result_from_cache;
+  Factory* factory = masm->isolate()->factory();
   if (!object_is_smi) {
     __ JumpIfSmi(object, &is_smi);
-    __ CheckMap(object, FACTORY->heap_number_map(), not_found, true);
+    __ CheckMap(object,
+                factory->heap_number_map(),
+                not_found,
+                DONT_DO_SMI_CHECK);
 
     STATIC_ASSERT(8 == kDoubleSize);
     __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
@@ -2419,6 +2653,7 @@
   ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
 
   Label check_unequal_objects, done;
+  Factory* factory = masm->isolate()->factory();
 
   // Compare two smis if required.
   if (include_smi_compare_) {
@@ -2446,16 +2681,16 @@
 
   // Two identical objects are equal unless they are both NaN or undefined.
   {
-    NearLabel not_identical;
+    Label not_identical;
     __ cmpq(rax, rdx);
-    __ j(not_equal, &not_identical);
+    __ j(not_equal, &not_identical, Label::kNear);
 
     if (cc_ != equal) {
       // Check for undefined.  undefined OP undefined is false even though
       // undefined == undefined.
-      NearLabel check_for_nan;
+      Label check_for_nan;
       __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
-      __ j(not_equal, &check_for_nan);
+      __ j(not_equal, &check_for_nan, Label::kNear);
       __ Set(rax, NegativeComparisonResult(cc_));
       __ ret(0);
       __ bind(&check_for_nan);
@@ -2466,20 +2701,19 @@
     // Note: if cc_ != equal, never_nan_nan_ is not used.
     // We cannot set rax to EQUAL until just before return because
     // rax must be unchanged on jump to not_identical.
-
     if (never_nan_nan_ && (cc_ == equal)) {
       __ Set(rax, EQUAL);
       __ ret(0);
     } else {
-      NearLabel heap_number;
+      Label heap_number;
       // If it's not a heap number, then return equal for (in)equality operator.
       __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
-             FACTORY->heap_number_map());
-      __ j(equal, &heap_number);
+             factory->heap_number_map());
+      __ j(equal, &heap_number, Label::kNear);
       if (cc_ != equal) {
         // Call runtime on identical JSObjects.  Otherwise return equal.
         __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
-        __ j(above_equal, &not_identical);
+        __ j(above_equal, &not_identical, Label::kNear);
       }
       __ Set(rax, EQUAL);
       __ ret(0);
@@ -2519,7 +2753,7 @@
 
         // Check if the non-smi operand is a heap number.
         __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
-               FACTORY->heap_number_map());
+               factory->heap_number_map());
         // If heap number, handle it in the slow case.
         __ j(equal, &slow);
         // Return non-equal.  ebx (the lower half of rbx) is not zero.
@@ -2535,9 +2769,9 @@
 
       // If the first object is a JS object, we have done pointer comparison.
       STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-      NearLabel first_non_object;
+      Label first_non_object;
       __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
-      __ j(below, &first_non_object);
+      __ j(below, &first_non_object, Label::kNear);
       // Return non-zero (eax (not rax) is not zero)
       Label return_not_equal;
       STATIC_ASSERT(kHeapObjectTag != 0);
@@ -2564,14 +2798,14 @@
   // Generate the number comparison code.
   if (include_number_compare_) {
     Label non_number_comparison;
-    NearLabel unordered;
+    Label unordered;
     FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
     __ xorl(rax, rax);
     __ xorl(rcx, rcx);
     __ ucomisd(xmm0, xmm1);
 
     // Don't base result on EFLAGS when a NaN is involved.
-    __ j(parity_even, &unordered);
+    __ j(parity_even, &unordered, Label::kNear);
     // Return a result of -1, 0, or 1, based on EFLAGS.
     __ setcc(above, rax);
     __ setcc(below, rcx);
@@ -2611,13 +2845,21 @@
       rdx, rax, rcx, rbx, &check_unequal_objects);
 
   // Inline comparison of ascii strings.
-  StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+  if (cc_ == equal) {
+    StringCompareStub::GenerateFlatAsciiStringEquals(masm,
                                                      rdx,
                                                      rax,
                                                      rcx,
-                                                     rbx,
-                                                     rdi,
-                                                     r8);
+                                                     rbx);
+  } else {
+    StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+                                                       rdx,
+                                                       rax,
+                                                       rcx,
+                                                       rbx,
+                                                       rdi,
+                                                       r8);
+  }
 
 #ifdef DEBUG
   __ Abort("Unexpected fall-through from string comparison");
@@ -2628,7 +2870,7 @@
     // Not strict equality.  Objects are unequal if
     // they are both JSObjects and not undetectable,
     // and their pointers are different.
-    NearLabel not_both_objects, return_unequal;
+    Label not_both_objects, return_unequal;
     // At most one is a smi, so we can test for smi by adding the two.
     // A smi plus a heap object has the low bit set, a heap object plus
     // a heap object has the low bit clear.
@@ -2636,17 +2878,17 @@
     STATIC_ASSERT(kSmiTagMask == 1);
     __ lea(rcx, Operand(rax, rdx, times_1, 0));
     __ testb(rcx, Immediate(kSmiTagMask));
-    __ j(not_zero, &not_both_objects);
+    __ j(not_zero, &not_both_objects, Label::kNear);
     __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
-    __ j(below, &not_both_objects);
+    __ j(below, &not_both_objects, Label::kNear);
     __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
-    __ j(below, &not_both_objects);
+    __ j(below, &not_both_objects, Label::kNear);
     __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
              Immediate(1 << Map::kIsUndetectable));
-    __ j(zero, &return_unequal);
+    __ j(zero, &return_unequal, Label::kNear);
     __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
              Immediate(1 << Map::kIsUndetectable));
-    __ j(zero, &return_unequal);
+    __ j(zero, &return_unequal, Label::kNear);
     // The objects are both undetectable, so they both compare as the value
     // undefined, and are equal.
     __ Set(rax, EQUAL);
@@ -2704,30 +2946,22 @@
 void CallFunctionStub::Generate(MacroAssembler* masm) {
   Label slow;
 
-  // If the receiver might be a value (string, number or boolean) check for this
-  // and box it if it is.
-  if (ReceiverMightBeValue()) {
+  // The receiver might implicitly be the global object. This is
+  // indicated by passing the hole as the receiver to the call
+  // function stub.
+  if (ReceiverMightBeImplicit()) {
+    Label call;
     // Get the receiver from the stack.
     // +1 ~ return address
-    Label receiver_is_value, receiver_is_js_object;
     __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
-
-    // Check if receiver is a smi (which is a number value).
-    __ JumpIfSmi(rax, &receiver_is_value);
-
-    // Check if the receiver is a valid JS object.
-    __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi);
-    __ j(above_equal, &receiver_is_js_object);
-
-    // Call the runtime to box the value.
-    __ bind(&receiver_is_value);
-    __ EnterInternalFrame();
-    __ push(rax);
-    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-    __ LeaveInternalFrame();
-    __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax);
-
-    __ bind(&receiver_is_js_object);
+    // Call as function is indicated with the hole.
+    __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+    __ j(not_equal, &call, Label::kNear);
+    // Patch the receiver on the stack with the global receiver object.
+    __ movq(rbx, GlobalObjectOperand());
+    __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+    __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rbx);
+    __ bind(&call);
   }
 
   // Get the function to call from the stack.
@@ -2742,7 +2976,23 @@
 
   // Fast-case: Just invoke the function.
   ParameterCount actual(argc_);
-  __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+
+  if (ReceiverMightBeImplicit()) {
+    Label call_as_function;
+    __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+    __ j(equal, &call_as_function);
+    __ InvokeFunction(rdi,
+                      actual,
+                      JUMP_FUNCTION,
+                      NullCallWrapper(),
+                      CALL_AS_METHOD);
+    __ bind(&call_as_function);
+  }
+  __ InvokeFunction(rdi,
+                    actual,
+                    JUMP_FUNCTION,
+                    NullCallWrapper(),
+                    CALL_AS_FUNCTION);
 
   // Slow-case: Non-function called.
   __ bind(&slow);
@@ -2876,11 +3126,11 @@
   // Handling of failure.
   __ bind(&failure_returned);
 
-  NearLabel retry;
+  Label retry;
   // If the returned exception is RETRY_AFTER_GC continue at retry label
   STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
   __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
-  __ j(zero, &retry);
+  __ j(zero, &retry, Label::kNear);
 
   // Special handling of out of memory exceptions.
   __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
@@ -3180,11 +3430,11 @@
   // real lookup and update the call site cache.
   if (!HasCallSiteInlineCheck()) {
     // Look up the function and the map in the instanceof cache.
-    NearLabel miss;
+    Label miss;
     __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
-    __ j(not_equal, &miss);
+    __ j(not_equal, &miss, Label::kNear);
     __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
-    __ j(not_equal, &miss);
+    __ j(not_equal, &miss, Label::kNear);
     __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
     __ ret(2 * kPointerSize);
     __ bind(&miss);
@@ -3220,15 +3470,15 @@
   __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
 
   // Loop through the prototype chain looking for the function prototype.
-  NearLabel loop, is_instance, is_not_instance;
+  Label loop, is_instance, is_not_instance;
   __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
   __ bind(&loop);
   __ cmpq(rcx, rbx);
-  __ j(equal, &is_instance);
+  __ j(equal, &is_instance, Label::kNear);
   __ cmpq(rcx, kScratchRegister);
   // The code at is_not_instance assumes that kScratchRegister contains a
   // non-zero GCable value (the null object in this case).
-  __ j(equal, &is_not_instance);
+  __ j(equal, &is_not_instance, Label::kNear);
   __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
   __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
   __ jmp(&loop);
@@ -3449,10 +3699,14 @@
     MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharCodeAt slow case");
 
+  Factory* factory = masm->isolate()->factory();
   // Index is not a smi.
   __ bind(&index_not_smi_);
   // If index is a heap number, try converting it to an integer.
-  __ CheckMap(index_, FACTORY->heap_number_map(), index_not_number_, true);
+  __ CheckMap(index_,
+              factory->heap_number_map(),
+              index_not_number_,
+              DONT_DO_SMI_CHECK);
   call_helper.BeforeCall(masm);
   __ push(object_);
   __ push(index_);
@@ -3592,10 +3846,10 @@
   // rax: first string
   // rdx: second string
   // Check if either of the strings are empty. In that case return the other.
-  NearLabel second_not_zero_length, both_not_zero_length;
+  Label second_not_zero_length, both_not_zero_length;
   __ movq(rcx, FieldOperand(rdx, String::kLengthOffset));
   __ SmiTest(rcx);
-  __ j(not_zero, &second_not_zero_length);
+  __ j(not_zero, &second_not_zero_length, Label::kNear);
   // Second string is empty, result is first string which is already in rax.
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->string_add_native(), 1);
@@ -3603,7 +3857,7 @@
   __ bind(&second_not_zero_length);
   __ movq(rbx, FieldOperand(rax, String::kLengthOffset));
   __ SmiTest(rbx);
-  __ j(not_zero, &both_not_zero_length);
+  __ j(not_zero, &both_not_zero_length, Label::kNear);
   // First string is empty, result is second string which is in rdx.
   __ movq(rax, rdx);
   __ IncrementCounter(counters->string_add_native(), 1);
@@ -3897,9 +4151,9 @@
   ASSERT(count.is(rcx));  // rep movs count
 
   // Nothing to do for zero characters.
-  NearLabel done;
+  Label done;
   __ testl(count, count);
-  __ j(zero, &done);
+  __ j(zero, &done, Label::kNear);
 
   // Make count the number of bytes to copy.
   if (!ascii) {
@@ -3908,9 +4162,9 @@
   }
 
   // Don't enter the rep movs if there are less than 4 bytes to copy.
-  NearLabel last_bytes;
+  Label last_bytes;
   __ testl(count, Immediate(~7));
-  __ j(zero, &last_bytes);
+  __ j(zero, &last_bytes, Label::kNear);
 
   // Copy from edi to esi using rep movs instruction.
   __ movl(kScratchRegister, count);
@@ -3924,7 +4178,7 @@
   // Check if there are more bytes to copy.
   __ bind(&last_bytes);
   __ testl(count, count);
-  __ j(zero, &done);
+  __ j(zero, &done, Label::kNear);
 
   // Copy remaining characters.
   Label loop;
@@ -3952,10 +4206,10 @@
 
   // Make sure that both characters are not digits as such strings has a
   // different hash algorithm. Don't try to look for these in the symbol table.
-  NearLabel not_array_index;
+  Label not_array_index;
   __ leal(scratch, Operand(c1, -'0'));
   __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
-  __ j(above, &not_array_index);
+  __ j(above, &not_array_index, Label::kNear);
   __ leal(scratch, Operand(c2, -'0'));
   __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
   __ j(below_equal, not_found);
@@ -4017,9 +4271,9 @@
                          SymbolTable::kElementsStartOffset));
 
     // If entry is undefined no string with this hash can be found.
-    NearLabel is_string;
+    Label is_string;
     __ CmpObjectType(candidate, ODDBALL_TYPE, map);
-    __ j(not_equal, &is_string);
+    __ j(not_equal, &is_string, Label::kNear);
 
     __ CompareRoot(candidate, Heap::kUndefinedValueRootIndex);
     __ j(equal, not_found);
@@ -4263,6 +4517,47 @@
 }
 
 
+void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+                                                      Register left,
+                                                      Register right,
+                                                      Register scratch1,
+                                                      Register scratch2) {
+  Register length = scratch1;
+
+  // Compare lengths.
+  Label check_zero_length;
+  __ movq(length, FieldOperand(left, String::kLengthOffset));
+  __ SmiCompare(length, FieldOperand(right, String::kLengthOffset));
+  __ j(equal, &check_zero_length, Label::kNear);
+  __ Move(rax, Smi::FromInt(NOT_EQUAL));
+  __ ret(0);
+
+  // Check if the length is zero.
+  Label compare_chars;
+  __ bind(&check_zero_length);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ SmiTest(length);
+  __ j(not_zero, &compare_chars, Label::kNear);
+  __ Move(rax, Smi::FromInt(EQUAL));
+  __ ret(0);
+
+  // Compare characters.
+  __ bind(&compare_chars);
+  Label strings_not_equal;
+  GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
+                                &strings_not_equal, Label::kNear);
+
+  // Characters are equal.
+  __ Move(rax, Smi::FromInt(EQUAL));
+  __ ret(0);
+
+  // Characters are not equal.
+  __ bind(&strings_not_equal);
+  __ Move(rax, Smi::FromInt(NOT_EQUAL));
+  __ ret(0);
+}
+
+
 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
                                                         Register left,
                                                         Register right,
@@ -4282,8 +4577,8 @@
             FieldOperand(right, String::kLengthOffset));
   // Register scratch4 now holds left.length - right.length.
   const Register length_difference = scratch4;
-  NearLabel left_shorter;
-  __ j(less, &left_shorter);
+  Label left_shorter;
+  __ j(less, &left_shorter, Label::kNear);
   // The right string isn't longer that the left one.
   // Get the right string's length by subtracting the (non-negative) difference
   // from the left string's length.
@@ -4292,54 +4587,30 @@
   // Register scratch1 now holds Min(left.length, right.length).
   const Register min_length = scratch1;
 
-  NearLabel compare_lengths;
+  Label compare_lengths;
   // If min-length is zero, go directly to comparing lengths.
   __ SmiTest(min_length);
-  __ j(zero, &compare_lengths);
+  __ j(zero, &compare_lengths, Label::kNear);
 
-  __ SmiToInteger32(min_length, min_length);
+  // Compare loop.
+  Label result_not_equal;
+  GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
+                                &result_not_equal, Label::kNear);
 
-  // Registers scratch2 and scratch3 are free.
-  NearLabel result_not_equal;
-  Label loop;
-  {
-    // Check characters 0 .. min_length - 1 in a loop.
-    // Use scratch3 as loop index, min_length as limit and scratch2
-    // for computation.
-    const Register index = scratch3;
-    __ Set(index, 0);  // Index into strings.
-    __ bind(&loop);
-    // Compare characters.
-    // TODO(lrn): Could we load more than one character at a time?
-    __ movb(scratch2, FieldOperand(left,
-                                   index,
-                                   times_1,
-                                   SeqAsciiString::kHeaderSize));
-    // Increment index and use -1 modifier on next load to give
-    // the previous load extra time to complete.
-    __ addl(index, Immediate(1));
-    __ cmpb(scratch2, FieldOperand(right,
-                                   index,
-                                   times_1,
-                                   SeqAsciiString::kHeaderSize - 1));
-    __ j(not_equal, &result_not_equal);
-    __ cmpl(index, min_length);
-    __ j(not_equal, &loop);
-  }
   // Completed loop without finding different characters.
   // Compare lengths (precomputed).
   __ bind(&compare_lengths);
   __ SmiTest(length_difference);
-  __ j(not_zero, &result_not_equal);
+  __ j(not_zero, &result_not_equal, Label::kNear);
 
   // Result is EQUAL.
   __ Move(rax, Smi::FromInt(EQUAL));
   __ ret(0);
 
-  NearLabel result_greater;
+  Label result_greater;
   __ bind(&result_not_equal);
   // Unequal comparison of left to right, either character or length.
-  __ j(greater, &result_greater);
+  __ j(greater, &result_greater, Label::kNear);
 
   // Result is LESS.
   __ Move(rax, Smi::FromInt(LESS));
@@ -4352,6 +4623,36 @@
 }
 
 
+void StringCompareStub::GenerateAsciiCharsCompareLoop(
+    MacroAssembler* masm,
+    Register left,
+    Register right,
+    Register length,
+    Register scratch,
+    Label* chars_not_equal,
+    Label::Distance near_jump) {
+  // Change index to run from -length to -1 by adding length to string
+  // start. This means that loop ends when index reaches zero, which
+  // doesn't need an additional compare.
+  __ SmiToInteger32(length, length);
+  __ lea(left,
+         FieldOperand(left, length, times_1, SeqAsciiString::kHeaderSize));
+  __ lea(right,
+         FieldOperand(right, length, times_1, SeqAsciiString::kHeaderSize));
+  __ neg(length);
+  Register index = length;  // index = -length;
+
+  // Compare loop.
+  Label loop;
+  __ bind(&loop);
+  __ movb(scratch, Operand(left, index, times_1, 0));
+  __ cmpb(scratch, Operand(right, index, times_1, 0));
+  __ j(not_equal, chars_not_equal, near_jump);
+  __ addq(index, Immediate(1));
+  __ j(not_zero, &loop);
+}
+
+
 void StringCompareStub::Generate(MacroAssembler* masm) {
   Label runtime;
 
@@ -4364,9 +4665,9 @@
   __ movq(rax, Operand(rsp, 1 * kPointerSize));  // right
 
   // Check for identity.
-  NearLabel not_same;
+  Label not_same;
   __ cmpq(rdx, rax);
-  __ j(not_equal, &not_same);
+  __ j(not_equal, &not_same, Label::kNear);
   __ Move(rax, Smi::FromInt(EQUAL));
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->string_compare_native(), 1);
@@ -4394,16 +4695,16 @@
 
 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::SMIS);
-  NearLabel miss;
-  __ JumpIfNotBothSmi(rdx, rax, &miss);
+  Label miss;
+  __ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear);
 
   if (GetCondition() == equal) {
     // For equality we do not care about the sign of the result.
     __ subq(rax, rdx);
   } else {
-    NearLabel done;
+    Label done;
     __ subq(rdx, rax);
-    __ j(no_overflow, &done);
+    __ j(no_overflow, &done, Label::kNear);
     // Correct sign of result in case of overflow.
     __ SmiNot(rdx, rdx);
     __ bind(&done);
@@ -4419,16 +4720,16 @@
 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::HEAP_NUMBERS);
 
-  NearLabel generic_stub;
-  NearLabel unordered;
-  NearLabel miss;
+  Label generic_stub;
+  Label unordered;
+  Label miss;
   Condition either_smi = masm->CheckEitherSmi(rax, rdx);
-  __ j(either_smi, &generic_stub);
+  __ j(either_smi, &generic_stub, Label::kNear);
 
   __ CmpObjectType(rax, HEAP_NUMBER_TYPE, rcx);
-  __ j(not_equal, &miss);
+  __ j(not_equal, &miss, Label::kNear);
   __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
-  __ j(not_equal, &miss);
+  __ j(not_equal, &miss, Label::kNear);
 
   // Load left and right operand
   __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
@@ -4438,7 +4739,7 @@
   __ ucomisd(xmm0, xmm1);
 
   // Don't base result on EFLAGS when a NaN is involved.
-  __ j(parity_even, &unordered);
+  __ j(parity_even, &unordered, Label::kNear);
 
   // Return a result of -1, 0, or 1, based on EFLAGS.
   // Performing mov, because xor would destroy the flag register.
@@ -4459,16 +4760,133 @@
 }
 
 
+void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::SYMBOLS);
+  ASSERT(GetCondition() == equal);
+
+  // Registers containing left and right operands respectively.
+  Register left = rdx;
+  Register right = rax;
+  Register tmp1 = rcx;
+  Register tmp2 = rbx;
+
+  // Check that both operands are heap objects.
+  Label miss;
+  Condition cond = masm->CheckEitherSmi(left, right, tmp1);
+  __ j(cond, &miss, Label::kNear);
+
+  // Check that both operands are symbols.
+  __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+  __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+  __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+  __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kSymbolTag != 0);
+  __ and_(tmp1, tmp2);
+  __ testb(tmp1, Immediate(kIsSymbolMask));
+  __ j(zero, &miss, Label::kNear);
+
+  // Symbols are compared by identity.
+  Label done;
+  __ cmpq(left, right);
+  // Make sure rax is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  ASSERT(right.is(rax));
+  __ j(not_equal, &done, Label::kNear);
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ Move(rax, Smi::FromInt(EQUAL));
+  __ bind(&done);
+  __ ret(0);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::STRINGS);
+  ASSERT(GetCondition() == equal);
+  Label miss;
+
+  // Registers containing left and right operands respectively.
+  Register left = rdx;
+  Register right = rax;
+  Register tmp1 = rcx;
+  Register tmp2 = rbx;
+  Register tmp3 = rdi;
+
+  // Check that both operands are heap objects.
+  Condition cond = masm->CheckEitherSmi(left, right, tmp1);
+  __ j(cond, &miss);
+
+  // Check that both operands are strings. This leaves the instance
+  // types loaded in tmp1 and tmp2.
+  __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+  __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+  __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+  __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+  __ movq(tmp3, tmp1);
+  STATIC_ASSERT(kNotStringTag != 0);
+  __ or_(tmp3, tmp2);
+  __ testb(tmp3, Immediate(kIsNotStringMask));
+  __ j(not_zero, &miss);
+
+  // Fast check for identical strings.
+  Label not_same;
+  __ cmpq(left, right);
+  __ j(not_equal, &not_same, Label::kNear);
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ Move(rax, Smi::FromInt(EQUAL));
+  __ ret(0);
+
+  // Handle not identical strings.
+  __ bind(&not_same);
+
+  // Check that both strings are symbols. If they are, we're done
+  // because we already know they are not identical.
+  Label do_compare;
+  STATIC_ASSERT(kSymbolTag != 0);
+  __ and_(tmp1, tmp2);
+  __ testb(tmp1, Immediate(kIsSymbolMask));
+  __ j(zero, &do_compare, Label::kNear);
+  // Make sure rax is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  ASSERT(right.is(rax));
+  __ ret(0);
+
+  // Check that both strings are sequential ASCII.
+  Label runtime;
+  __ bind(&do_compare);
+  __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
+
+  // Compare flat ASCII strings. Returns when done.
+  StringCompareStub::GenerateFlatAsciiStringEquals(
+      masm, left, right, tmp1, tmp2);
+
+  // Handle more complex cases in runtime.
+  __ bind(&runtime);
+  __ pop(tmp1);  // Return address.
+  __ push(left);
+  __ push(right);
+  __ push(tmp1);
+  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::OBJECTS);
-  NearLabel miss;
+  Label miss;
   Condition either_smi = masm->CheckEitherSmi(rdx, rax);
-  __ j(either_smi, &miss);
+  __ j(either_smi, &miss, Label::kNear);
 
   __ CmpObjectType(rax, JS_OBJECT_TYPE, rcx);
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss, Label::kNear);
   __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
-  __ j(not_equal, &miss, not_taken);
+  __ j(not_equal, &miss, Label::kNear);
 
   ASSERT(GetCondition() == equal);
   __ subq(rax, rdx);
@@ -4510,6 +4928,206 @@
 }
 
 
+MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
+    MacroAssembler* masm,
+    Label* miss,
+    Label* done,
+    Register properties,
+    String* name,
+    Register r0) {
+  // If names of slots in range from 1 to kProbes - 1 for the hash value are
+  // not equal to the name and kProbes-th slot is not used (its name is the
+  // undefined value), it guarantees the hash table doesn't contain the
+  // property. It's true even if some slots represent deleted properties
+  // (their names are the null value).
+  for (int i = 0; i < kInlinedProbes; i++) {
+    // r0 points to properties hash.
+    // Compute the masked index: (hash + i + i * i) & mask.
+    Register index = r0;
+    // Capacity is smi 2^n.
+    __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
+    __ decl(index);
+    __ and_(index,
+            Immediate(name->Hash() + StringDictionary::GetProbeOffset(i)));
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    __ lea(index, Operand(index, index, times_2, 0));  // index *= 3.
+
+    Register entity_name = r0;
+    // Having undefined at this place means the name is not contained.
+    ASSERT_EQ(kSmiTagSize, 1);
+    __ movq(entity_name, Operand(properties,
+                                 index,
+                                 times_pointer_size,
+                                 kElementsStartOffset - kHeapObjectTag));
+    __ Cmp(entity_name, masm->isolate()->factory()->undefined_value());
+    __ j(equal, done);
+
+    // Stop if found the property.
+    __ Cmp(entity_name, Handle<String>(name));
+    __ j(equal, miss);
+
+    // Check if the entry name is not a symbol.
+    __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
+    __ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
+             Immediate(kIsSymbolMask));
+    __ j(zero, miss);
+  }
+
+  StringDictionaryLookupStub stub(properties,
+                                  r0,
+                                  r0,
+                                  StringDictionaryLookupStub::NEGATIVE_LOOKUP);
+  __ Push(Handle<Object>(name));
+  __ push(Immediate(name->Hash()));
+  MaybeObject* result = masm->TryCallStub(&stub);
+  if (result->IsFailure()) return result;
+  __ testq(r0, r0);
+  __ j(not_zero, miss);
+  __ jmp(done);
+  return result;
+}
+
+
+// Probe the string dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found leaving the
+// index into the dictionary in |r1|. Jump to the |miss| label
+// otherwise.
+void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
+                                                        Label* miss,
+                                                        Label* done,
+                                                        Register elements,
+                                                        Register name,
+                                                        Register r0,
+                                                        Register r1) {
+  // Assert that name contains a string.
+  if (FLAG_debug_code) __ AbortIfNotString(name);
+
+  __ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
+  __ decl(r0);
+
+  for (int i = 0; i < kInlinedProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    __ movl(r1, FieldOperand(name, String::kHashFieldOffset));
+    __ shrl(r1, Immediate(String::kHashShift));
+    if (i > 0) {
+      __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
+    }
+    __ and_(r1, r0);
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    __ lea(r1, Operand(r1, r1, times_2, 0));  // r1 = r1 * 3
+
+    // Check if the key is identical to the name.
+    __ cmpq(name, Operand(elements, r1, times_pointer_size,
+                          kElementsStartOffset - kHeapObjectTag));
+    __ j(equal, done);
+  }
+
+  StringDictionaryLookupStub stub(elements,
+                                  r0,
+                                  r1,
+                                  POSITIVE_LOOKUP);
+  __ push(name);
+  __ movl(r0, FieldOperand(name, String::kHashFieldOffset));
+  __ shrl(r0, Immediate(String::kHashShift));
+  __ push(r0);
+  __ CallStub(&stub);
+
+  __ testq(r0, r0);
+  __ j(zero, miss);
+  __ jmp(done);
+}
+
+
+void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+  // Stack frame on entry:
+  //  esp[0 * kPointerSize]: return address.
+  //  esp[1 * kPointerSize]: key's hash.
+  //  esp[2 * kPointerSize]: key.
+  // Registers:
+  //  dictionary_: StringDictionary to probe.
+  //  result_: used as scratch.
+  //  index_: will hold an index of entry if lookup is successful.
+  //          might alias with result_.
+  // Returns:
+  //  result_ is zero if lookup failed, non zero otherwise.
+
+  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+  Register scratch = result_;
+
+  __ SmiToInteger32(scratch, FieldOperand(dictionary_, kCapacityOffset));
+  __ decl(scratch);
+  __ push(scratch);
+
+  // If names of slots in range from 1 to kProbes - 1 for the hash value are
+  // not equal to the name and kProbes-th slot is not used (its name is the
+  // undefined value), it guarantees the hash table doesn't contain the
+  // property. It's true even if some slots represent deleted properties
+  // (their names are the null value).
+  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    __ movq(scratch, Operand(rsp, 2 * kPointerSize));
+    if (i > 0) {
+      __ addl(scratch, Immediate(StringDictionary::GetProbeOffset(i)));
+    }
+    __ and_(scratch, Operand(rsp, 0));
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    __ lea(index_, Operand(scratch, scratch, times_2, 0));  // index *= 3.
+
+    // Having undefined at this place means the name is not contained.
+    __ movq(scratch, Operand(dictionary_,
+                             index_,
+                             times_pointer_size,
+                             kElementsStartOffset - kHeapObjectTag));
+
+    __ Cmp(scratch, masm->isolate()->factory()->undefined_value());
+    __ j(equal, &not_in_dictionary);
+
+    // Stop if found the property.
+    __ cmpq(scratch, Operand(rsp, 3 * kPointerSize));
+    __ j(equal, &in_dictionary);
+
+    if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+      // If we hit a non symbol key during negative lookup
+      // we have to bailout as this key might be equal to the
+      // key we are looking for.
+
+      // Check if the entry name is not a symbol.
+      __ movq(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
+      __ testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
+               Immediate(kIsSymbolMask));
+      __ j(zero, &maybe_in_dictionary);
+    }
+  }
+
+  __ bind(&maybe_in_dictionary);
+  // If we are doing negative lookup then probing failure should be
+  // treated as a lookup success. For positive lookup probing failure
+  // should be treated as lookup failure.
+  if (mode_ == POSITIVE_LOOKUP) {
+    __ movq(scratch, Immediate(0));
+    __ Drop(1);
+    __ ret(2 * kPointerSize);
+  }
+
+  __ bind(&in_dictionary);
+  __ movq(scratch, Immediate(1));
+  __ Drop(1);
+  __ ret(2 * kPointerSize);
+
+  __ bind(&not_in_dictionary);
+  __ movq(scratch, Immediate(0));
+  __ Drop(1);
+  __ ret(2 * kPointerSize);
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index f97d099..2774403 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -71,21 +71,113 @@
 };
 
 
-class TypeRecordingBinaryOpStub: public CodeStub {
+class UnaryOpStub: public CodeStub {
  public:
-  TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
+  UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
       : op_(op),
         mode_(mode),
-        operands_type_(TRBinaryOpIC::UNINITIALIZED),
-        result_type_(TRBinaryOpIC::UNINITIALIZED),
+        operand_type_(UnaryOpIC::UNINITIALIZED),
+        name_(NULL) {
+  }
+
+  UnaryOpStub(
+      int key,
+      UnaryOpIC::TypeInfo operand_type)
+      : op_(OpBits::decode(key)),
+        mode_(ModeBits::decode(key)),
+        operand_type_(operand_type),
+        name_(NULL) {
+  }
+
+ private:
+  Token::Value op_;
+  UnaryOverwriteMode mode_;
+
+  // Operand type information determined at runtime.
+  UnaryOpIC::TypeInfo operand_type_;
+
+  char* name_;
+
+  const char* GetName();
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("UnaryOpStub %d (op %s), "
+           "(mode %d, runtime_type_info %s)\n",
+           MinorKey(),
+           Token::String(op_),
+           static_cast<int>(mode_),
+           UnaryOpIC::GetName(operand_type_));
+  }
+#endif
+
+  class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
+  class OpBits: public BitField<Token::Value, 1, 7> {};
+  class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
+
+  Major MajorKey() { return UnaryOp; }
+  int MinorKey() {
+    return ModeBits::encode(mode_)
+           | OpBits::encode(op_)
+           | OperandTypeInfoBits::encode(operand_type_);
+  }
+
+  // Note: A lot of the helper functions below will vanish when we use virtual
+  // function instead of switch more often.
+  void Generate(MacroAssembler* masm);
+
+  void GenerateTypeTransition(MacroAssembler* masm);
+
+  void GenerateSmiStub(MacroAssembler* masm);
+  void GenerateSmiStubSub(MacroAssembler* masm);
+  void GenerateSmiStubBitNot(MacroAssembler* masm);
+  void GenerateSmiCodeSub(MacroAssembler* masm,
+                          Label* non_smi,
+                          Label* slow,
+                          Label::Distance non_smi_near = Label::kFar,
+                          Label::Distance slow_near = Label::kFar);
+  void GenerateSmiCodeBitNot(MacroAssembler* masm,
+                             Label* non_smi,
+                             Label::Distance non_smi_near);
+
+  void GenerateHeapNumberStub(MacroAssembler* masm);
+  void GenerateHeapNumberStubSub(MacroAssembler* masm);
+  void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
+  void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
+  void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
+
+  void GenerateGenericStub(MacroAssembler* masm);
+  void GenerateGenericStubSub(MacroAssembler* masm);
+  void GenerateGenericStubBitNot(MacroAssembler* masm);
+  void GenerateGenericCodeFallback(MacroAssembler* masm);
+
+  virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
+
+  virtual InlineCacheState GetICState() {
+    return UnaryOpIC::ToState(operand_type_);
+  }
+
+  virtual void FinishCode(Code* code) {
+    code->set_unary_op_type(operand_type_);
+  }
+};
+
+
+class BinaryOpStub: public CodeStub {
+ public:
+  BinaryOpStub(Token::Value op, OverwriteMode mode)
+      : op_(op),
+        mode_(mode),
+        operands_type_(BinaryOpIC::UNINITIALIZED),
+        result_type_(BinaryOpIC::UNINITIALIZED),
         name_(NULL) {
     ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
   }
 
-  TypeRecordingBinaryOpStub(
+  BinaryOpStub(
       int key,
-      TRBinaryOpIC::TypeInfo operands_type,
-      TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
+      BinaryOpIC::TypeInfo operands_type,
+      BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
       : op_(OpBits::decode(key)),
         mode_(ModeBits::decode(key)),
         operands_type_(operands_type),
@@ -102,8 +194,8 @@
   OverwriteMode mode_;
 
   // Operand type information determined at runtime.
-  TRBinaryOpIC::TypeInfo operands_type_;
-  TRBinaryOpIC::TypeInfo result_type_;
+  BinaryOpIC::TypeInfo operands_type_;
+  BinaryOpIC::TypeInfo result_type_;
 
   char* name_;
 
@@ -111,22 +203,22 @@
 
 #ifdef DEBUG
   void Print() {
-    PrintF("TypeRecordingBinaryOpStub %d (op %s), "
+    PrintF("BinaryOpStub %d (op %s), "
            "(mode %d, runtime_type_info %s)\n",
            MinorKey(),
            Token::String(op_),
            static_cast<int>(mode_),
-           TRBinaryOpIC::GetName(operands_type_));
+           BinaryOpIC::GetName(operands_type_));
   }
 #endif
 
   // Minor key encoding in 15 bits RRRTTTOOOOOOOMM.
   class ModeBits: public BitField<OverwriteMode, 0, 2> {};
   class OpBits: public BitField<Token::Value, 2, 7> {};
-  class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 9, 3> {};
-  class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 12, 3> {};
+  class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 9, 3> {};
+  class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 12, 3> {};
 
-  Major MajorKey() { return TypeRecordingBinaryOp; }
+  Major MajorKey() { return BinaryOp; }
   int MinorKey() {
     return OpBits::encode(op_)
            | ModeBits::encode(mode_)
@@ -152,6 +244,7 @@
   void GenerateHeapNumberStub(MacroAssembler* masm);
   void GenerateOddballStub(MacroAssembler* masm);
   void GenerateStringStub(MacroAssembler* masm);
+  void GenerateBothStringStub(MacroAssembler* masm);
   void GenerateGenericStub(MacroAssembler* masm);
 
   void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
@@ -159,15 +252,15 @@
   void GenerateTypeTransition(MacroAssembler* masm);
   void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
 
-  virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
+  virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
 
   virtual InlineCacheState GetICState() {
-    return TRBinaryOpIC::ToState(operands_type_);
+    return BinaryOpIC::ToState(operands_type_);
   }
 
   virtual void FinishCode(Code* code) {
-    code->set_type_recording_binary_op_type(operands_type_);
-    code->set_type_recording_binary_op_result_type(result_type_);
+    code->set_binary_op_type(operands_type_);
+    code->set_binary_op_result_type(result_type_);
   }
 
   friend class CodeGenerator;
@@ -276,10 +369,9 @@
 
 class StringCompareStub: public CodeStub {
  public:
-  explicit StringCompareStub() {}
+  StringCompareStub() {}
 
-  // Compare two flat ascii strings and returns result in rax after popping two
-  // arguments from the stack.
+  // Compares two flat ASCII strings and returns result in rax.
   static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
                                               Register left,
                                               Register right,
@@ -288,11 +380,27 @@
                                               Register scratch3,
                                               Register scratch4);
 
- private:
-  Major MajorKey() { return StringCompare; }
-  int MinorKey() { return 0; }
+  // Compares two flat ASCII strings for equality and returns result
+  // in rax.
+  static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+                                            Register left,
+                                            Register right,
+                                            Register scratch1,
+                                            Register scratch2);
 
-  void Generate(MacroAssembler* masm);
+ private:
+  virtual Major MajorKey() { return StringCompare; }
+  virtual int MinorKey() { return 0; }
+  virtual void Generate(MacroAssembler* masm);
+
+  static void GenerateAsciiCharsCompareLoop(
+      MacroAssembler* masm,
+      Register left,
+      Register right,
+      Register length,
+      Register scratch,
+      Label* chars_not_equal,
+      Label::Distance near_jump = Label::kFar);
 };
 
 
@@ -333,6 +441,74 @@
 };
 
 
+class StringDictionaryLookupStub: public CodeStub {
+ public:
+  enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+  StringDictionaryLookupStub(Register dictionary,
+                             Register result,
+                             Register index,
+                             LookupMode mode)
+      : dictionary_(dictionary), result_(result), index_(index), mode_(mode) { }
+
+  void Generate(MacroAssembler* masm);
+
+  MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
+      MacroAssembler* masm,
+      Label* miss,
+      Label* done,
+      Register properties,
+      String* name,
+      Register r0);
+
+  static void GeneratePositiveLookup(MacroAssembler* masm,
+                                     Label* miss,
+                                     Label* done,
+                                     Register elements,
+                                     Register name,
+                                     Register r0,
+                                     Register r1);
+
+ private:
+  static const int kInlinedProbes = 4;
+  static const int kTotalProbes = 20;
+
+  static const int kCapacityOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kCapacityIndex * kPointerSize;
+
+  static const int kElementsStartOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
+
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("StringDictionaryLookupStub\n");
+  }
+#endif
+
+  Major MajorKey() { return StringDictionaryNegativeLookup; }
+
+  int MinorKey() {
+    return DictionaryBits::encode(dictionary_.code()) |
+        ResultBits::encode(result_.code()) |
+        IndexBits::encode(index_.code()) |
+        LookupModeBits::encode(mode_);
+  }
+
+  class DictionaryBits: public BitField<int, 0, 4> {};
+  class ResultBits: public BitField<int, 4, 4> {};
+  class IndexBits: public BitField<int, 8, 4> {};
+  class LookupModeBits: public BitField<LookupMode, 12, 1> {};
+
+  Register dictionary_;
+  Register result_;
+  Register index_;
+  LookupMode mode_;
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_X64_CODE_STUBS_X64_H_
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 2b7b7b7..7bb2e61 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -1021,12 +1021,26 @@
         current += PrintRightOperand(current);
         AppendToBuffer(", %s, %d", NameOfCPURegister(regop), (*current) & 3);
         current += 1;
+      } else if (third_byte == 0x0b) {
+        get_modrm(*current, &mod, &regop, &rm);
+         // roundsd xmm, xmm/m64, imm8
+        AppendToBuffer("roundsd %s, ", NameOfCPURegister(regop));
+        current += PrintRightOperand(current);
+        AppendToBuffer(", %d", (*current) & 3);
+        current += 1;
       } else {
         UnimplementedInstruction();
       }
     } else {
       get_modrm(*current, &mod, &regop, &rm);
-      if (opcode == 0x6E) {
+      if (opcode == 0x28) {
+        AppendToBuffer("movapd %s, ", NameOfXMMRegister(regop));
+        current += PrintRightXMMOperand(current);
+      } else if (opcode == 0x29) {
+        AppendToBuffer("movapd ");
+        current += PrintRightXMMOperand(current);
+        AppendToBuffer(", %s", NameOfXMMRegister(regop));
+      } else if (opcode == 0x6E) {
         AppendToBuffer("mov%c %s,",
                        rex_w() ? 'q' : 'd',
                        NameOfXMMRegister(regop));
@@ -1044,6 +1058,10 @@
         AppendToBuffer("movdqa ");
         current += PrintRightXMMOperand(current);
         AppendToBuffer(", %s", NameOfXMMRegister(regop));
+      } else if (opcode == 0xD6) {
+        AppendToBuffer("movq ");
+        current += PrintRightXMMOperand(current);
+        AppendToBuffer(", %s", NameOfXMMRegister(regop));
       } else {
         const char* mnemonic = "?";
         if (opcode == 0x50) {
@@ -1145,6 +1163,11 @@
       get_modrm(*current, &mod, &regop, &rm);
       AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop));
       current += PrintRightXMMOperand(current);
+    } else if (opcode == 0x7E) {
+      int mod, regop, rm;
+      get_modrm(*current, &mod, &regop, &rm);
+      AppendToBuffer("movq %s, ", NameOfXMMRegister(regop));
+      current += PrintRightXMMOperand(current);
     } else {
       UnimplementedInstruction();
     }
@@ -1162,6 +1185,22 @@
       current += 4;
     }  // else no immediate displacement.
     AppendToBuffer("nop");
+
+  } else if (opcode == 0x28) {
+    // movaps xmm, xmm/m128
+    int mod, regop, rm;
+    get_modrm(*current, &mod, &regop, &rm);
+    AppendToBuffer("movaps %s, ", NameOfXMMRegister(regop));
+    current += PrintRightXMMOperand(current);
+
+  } else if (opcode == 0x29) {
+    // movaps xmm/m128, xmm
+    int mod, regop, rm;
+    get_modrm(*current, &mod, &regop, &rm);
+    AppendToBuffer("movaps ");
+    current += PrintRightXMMOperand(current);
+    AppendToBuffer(", %s", NameOfXMMRegister(regop));
+
   } else if (opcode == 0xA2 || opcode == 0x31) {
     // RDTSC or CPUID
     AppendToBuffer("%s", mnemonic);
@@ -1173,6 +1212,13 @@
     byte_size_operand_ = idesc.byte_size_operation;
     current += PrintOperands(idesc.mnem, idesc.op_order_, current);
 
+  } else if (opcode == 0x57) {
+    // xorps xmm, xmm/m128
+    int mod, regop, rm;
+    get_modrm(*current, &mod, &regop, &rm);
+    AppendToBuffer("xorps %s, ", NameOfXMMRegister(regop));
+    current += PrintRightXMMOperand(current);
+
   } else if ((opcode & 0xF0) == 0x80) {
     // Jcc: Conditional jump (branch).
     current = data + JumpConditional(data);
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 6933d78..57baf77 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -44,6 +44,12 @@
 #define __ ACCESS_MASM(masm_)
 
 
+static unsigned GetPropertyId(Property* property) {
+  if (property->is_synthetic()) return AstNode::kNoNumber;
+  return property->id();
+}
+
+
 class JumpPatchSite BASE_EMBEDDED {
  public:
   explicit JumpPatchSite(MacroAssembler* masm)
@@ -57,14 +63,18 @@
     ASSERT(patch_site_.is_bound() == info_emitted_);
   }
 
-  void EmitJumpIfNotSmi(Register reg, NearLabel* target) {
+  void EmitJumpIfNotSmi(Register reg,
+                        Label* target,
+                        Label::Distance near_jump = Label::kFar) {
     __ testb(reg, Immediate(kSmiTagMask));
-    EmitJump(not_carry, target);   // Always taken before patched.
+    EmitJump(not_carry, target, near_jump);   // Always taken before patched.
   }
 
-  void EmitJumpIfSmi(Register reg, NearLabel* target) {
+  void EmitJumpIfSmi(Register reg,
+                     Label* target,
+                     Label::Distance near_jump = Label::kFar) {
     __ testb(reg, Immediate(kSmiTagMask));
-    EmitJump(carry, target);  // Never taken before patched.
+    EmitJump(carry, target, near_jump);  // Never taken before patched.
   }
 
   void EmitPatchInfo() {
@@ -80,11 +90,11 @@
 
  private:
   // jc will be patched with jz, jnc will become jnz.
-  void EmitJump(Condition cc, NearLabel* target) {
+  void EmitJump(Condition cc, Label* target, Label::Distance near_jump) {
     ASSERT(!patch_site_.is_bound() && !info_emitted_);
     ASSERT(cc == carry || cc == not_carry);
     __ bind(&patch_site_);
-    __ j(cc, target);
+    __ j(cc, target, near_jump);
   }
 
   MacroAssembler* masm_;
@@ -120,6 +130,22 @@
     __ int3();
   }
 #endif
+
+  // Strict mode functions need to replace the receiver with undefined
+  // when called as functions (without an explicit receiver
+  // object). rcx is zero for method calls and non-zero for function
+  // calls.
+  if (info->is_strict_mode()) {
+    Label ok;
+    __ testq(rcx, rcx);
+    __ j(zero, &ok, Label::kNear);
+    // +1 for return address.
+    int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+    __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+    __ movq(Operand(rsp, receiver_offset), kScratchRegister);
+    __ bind(&ok);
+  }
+
   __ push(rbp);  // Caller's frame pointer.
   __ movq(rbp, rsp);
   __ push(rsi);  // Callee's context.
@@ -233,9 +259,9 @@
 
     { Comment cmnt(masm_, "[ Stack check");
       PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
-      NearLabel ok;
+      Label ok;
       __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
-      __ j(above_equal, &ok);
+      __ j(above_equal, &ok, Label::kNear);
       StackCheckStub stub;
       __ CallStub(&stub);
       __ bind(&ok);
@@ -264,9 +290,9 @@
 
 void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
   Comment cmnt(masm_, "[ Stack check");
-  NearLabel ok;
+  Label ok;
   __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
-  __ j(above_equal, &ok);
+  __ j(above_equal, &ok, Label::kNear);
   StackCheckStub stub;
   __ CallStub(&stub);
   // Record a mapping of this PC offset to the OSR id.  This is used to find
@@ -479,10 +505,10 @@
 void FullCodeGenerator::AccumulatorValueContext::Plug(
     Label* materialize_true,
     Label* materialize_false) const {
-  NearLabel done;
+  Label done;
   __ bind(materialize_true);
   __ Move(result_register(), isolate()->factory()->true_value());
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(materialize_false);
   __ Move(result_register(), isolate()->factory()->false_value());
   __ bind(&done);
@@ -492,10 +518,10 @@
 void FullCodeGenerator::StackValueContext::Plug(
     Label* materialize_true,
     Label* materialize_false) const {
-  NearLabel done;
+  Label done;
   __ bind(materialize_true);
   __ Push(isolate()->factory()->true_value());
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(materialize_false);
   __ Push(isolate()->factory()->false_value());
   __ bind(&done);
@@ -543,25 +569,10 @@
 void FullCodeGenerator::DoTest(Label* if_true,
                                Label* if_false,
                                Label* fall_through) {
-  // Emit the inlined tests assumed by the stub.
-  __ CompareRoot(result_register(), Heap::kUndefinedValueRootIndex);
-  __ j(equal, if_false);
-  __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
-  __ j(equal, if_true);
-  __ CompareRoot(result_register(), Heap::kFalseValueRootIndex);
-  __ j(equal, if_false);
-  STATIC_ASSERT(kSmiTag == 0);
-  __ Cmp(result_register(), Smi::FromInt(0));
-  __ j(equal, if_false);
-  Condition is_smi = masm_->CheckSmi(result_register());
-  __ j(is_smi, if_true);
-
-  // Call the ToBoolean stub for all other cases.
   ToBooleanStub stub;
   __ push(result_register());
   __ CallStub(&stub);
   __ testq(rax, rax);
-
   // The stub returns nonzero for true.
   Split(not_zero, if_true, if_false, fall_through);
 }
@@ -632,8 +643,8 @@
   // preparation to avoid preparing with the same AST id twice.
   if (!context()->IsTest() || !info_->IsOptimizable()) return;
 
-  NearLabel skip;
-  if (should_normalize) __ jmp(&skip);
+  Label skip;
+  if (should_normalize) __ jmp(&skip, Label::kNear);
 
   ForwardBailoutStack* current = forward_bailout_stack_;
   while (current != NULL) {
@@ -719,23 +730,20 @@
     }
 
   } else if (prop != NULL) {
-    if (function != NULL || mode == Variable::CONST) {
-      // We are declaring a function or constant that rewrites to a
-      // property.  Use (keyed) IC to set the initial value.  We
-      // cannot visit the rewrite because it's shared and we risk
-      // recording duplicate AST IDs for bailouts from optimized code.
+    // A const declaration aliasing a parameter is an illegal redeclaration.
+    ASSERT(mode != Variable::CONST);
+    if (function != NULL) {
+      // We are declaring a function that rewrites to a property.
+      // Use (keyed) IC to set the initial value.  We cannot visit the
+      // rewrite because it's shared and we risk recording duplicate AST
+      // IDs for bailouts from optimized code.
       ASSERT(prop->obj()->AsVariableProxy() != NULL);
       { AccumulatorValueContext for_object(this);
         EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
       }
-      if (function != NULL) {
-        __ push(rax);
-        VisitForAccumulatorValue(function);
-        __ pop(rdx);
-      } else {
-        __ movq(rdx, rax);
-        __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
-      }
+      __ push(rax);
+      VisitForAccumulatorValue(function);
+      __ pop(rdx);
       ASSERT(prop->key()->AsLiteral() != NULL &&
              prop->key()->AsLiteral()->handle()->IsSmi());
       __ Move(rcx, prop->key()->AsLiteral()->handle());
@@ -743,7 +751,7 @@
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
           : isolate()->builtins()->KeyedStoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
     }
   }
 }
@@ -801,10 +809,10 @@
     bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
     JumpPatchSite patch_site(masm_);
     if (inline_smi_code) {
-      NearLabel slow_case;
+      Label slow_case;
       __ movq(rcx, rdx);
       __ or_(rcx, rax);
-      patch_site.EmitJumpIfNotSmi(rcx, &slow_case);
+      patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
 
       __ cmpq(rdx, rax);
       __ j(not_equal, &next_test);
@@ -816,7 +824,7 @@
     // Record position before stub call for type feedback.
     SetSourcePosition(clause->position());
     Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
-    EmitCallIC(ic, &patch_site);
+    EmitCallIC(ic, &patch_site, clause->CompareId());
 
     __ testq(rax, rax);
     __ j(not_equal, &next_test);
@@ -901,9 +909,8 @@
   // check for an enum cache.  Leave the map in rbx for the subsequent
   // prototype load.
   __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
-  __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
-  __ cmpq(rdx, empty_descriptor_array_value);
-  __ j(equal, &call_runtime);
+  __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOrBitField3Offset));
+  __ JumpIfSmi(rdx, &call_runtime);
 
   // Check that there is an enum cache in the non-empty instance
   // descriptors (rdx).  This is the case if the next enumeration
@@ -912,9 +919,9 @@
   __ JumpIfSmi(rdx, &call_runtime);
 
   // For all objects but the receiver, check that the cache is empty.
-  NearLabel check_prototype;
+  Label check_prototype;
   __ cmpq(rcx, rax);
-  __ j(equal, &check_prototype);
+  __ j(equal, &check_prototype, Label::kNear);
   __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
   __ cmpq(rdx, empty_fixed_array_value);
   __ j(not_equal, &call_runtime);
@@ -927,9 +934,9 @@
 
   // The enum cache is valid.  Load the map of the object being
   // iterated over and use the cache for the iteration.
-  NearLabel use_cache;
+  Label use_cache;
   __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
-  __ jmp(&use_cache);
+  __ jmp(&use_cache, Label::kNear);
 
   // Get the set of properties to enumerate.
   __ bind(&call_runtime);
@@ -939,14 +946,14 @@
   // If we got a map from the runtime call, we can do a fast
   // modification check. Otherwise, we got a fixed array, and we have
   // to do a slow check.
-  NearLabel fixed_array;
+  Label fixed_array;
   __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
                  Heap::kMetaMapRootIndex);
-  __ j(not_equal, &fixed_array);
+  __ j(not_equal, &fixed_array, Label::kNear);
 
   // We got a map in register rax. Get the enumeration cache from it.
   __ bind(&use_cache);
-  __ movq(rcx, FieldOperand(rax, Map::kInstanceDescriptorsOffset));
+  __ LoadInstanceDescriptors(rax, rcx);
   __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
   __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
 
@@ -986,10 +993,10 @@
 
   // Check if the expected map still matches that of the enumerable.
   // If not, we have to filter the key.
-  NearLabel update_each;
+  Label update_each;
   __ movq(rcx, Operand(rsp, 4 * kPointerSize));
   __ cmpq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
-  __ j(equal, &update_each);
+  __ j(equal, &update_each, Label::kNear);
 
   // Convert the entry to a string or null if it isn't a property
   // anymore. If the property has been removed while iterating, we
@@ -1097,7 +1104,7 @@
   if (s != NULL && s->is_eval_scope()) {
     // Loop up the context chain.  There is no frame effect so it is
     // safe to use raw labels here.
-    NearLabel next, fast;
+    Label next, fast;
     if (!context.is(temp)) {
       __ movq(temp, context);
     }
@@ -1106,7 +1113,7 @@
     __ bind(&next);
     // Terminate at global context.
     __ cmpq(kScratchRegister, FieldOperand(temp, HeapObject::kMapOffset));
-    __ j(equal, &fast);
+    __ j(equal, &fast, Label::kNear);
     // Check that extension is NULL.
     __ cmpq(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
     __ j(not_equal, slow);
@@ -1125,7 +1132,7 @@
   RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
       ? RelocInfo::CODE_TARGET
       : RelocInfo::CODE_TARGET_CONTEXT;
-  EmitCallIC(ic, mode);
+  EmitCallIC(ic, mode, AstNode::kNoNumber);
 }
 
 
@@ -1206,7 +1213,7 @@
           __ Move(rax, key_literal->handle());
           Handle<Code> ic =
               isolate()->builtins()->KeyedLoadIC_Initialize();
-          EmitCallIC(ic, RelocInfo::CODE_TARGET);
+          EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
           __ jmp(done);
         }
       }
@@ -1229,7 +1236,7 @@
     __ Move(rcx, var->name());
     __ movq(rax, GlobalObjectOperand());
     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
     context()->Plug(rax);
 
   } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
@@ -1255,11 +1262,11 @@
     if (var->mode() == Variable::CONST) {
       // Constants may be the hole value if they have not been initialized.
       // Unhole them.
-      NearLabel done;
+      Label done;
       MemOperand slot_operand = EmitSlotSearch(slot, rax);
       __ movq(rax, slot_operand);
       __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
-      __ j(not_equal, &done);
+      __ j(not_equal, &done, Label::kNear);
       __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
       __ bind(&done);
       context()->Plug(rax);
@@ -1292,7 +1299,7 @@
 
     // Do a keyed property load.
     Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-    EmitCallIC(ic, RelocInfo::CODE_TARGET);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
     context()->Plug(rax);
   }
 }
@@ -1405,7 +1412,7 @@
             Handle<Code> ic = is_strict_mode()
                 ? isolate()->builtins()->StoreIC_Initialize_Strict()
                 : isolate()->builtins()->StoreIC_Initialize();
-            EmitCallIC(ic, RelocInfo::CODE_TARGET);
+            EmitCallIC(ic, RelocInfo::CODE_TARGET, key->id());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
           } else {
             VisitForEffect(value);
@@ -1610,13 +1617,13 @@
     SetSourcePosition(expr->position() + 1);
     AccumulatorValueContext context(this);
     if (ShouldInlineSmiCase(op)) {
-      EmitInlineSmiBinaryOp(expr,
+      EmitInlineSmiBinaryOp(expr->binary_operation(),
                             op,
                             mode,
                             expr->target(),
                             expr->value());
     } else {
-      EmitBinaryOp(op, mode);
+      EmitBinaryOp(expr->binary_operation(), op, mode);
     }
     // Deoptimization point in case the binary operation may have side effects.
     PrepareForBailout(expr->binary_operation(), TOS_REG);
@@ -1650,18 +1657,18 @@
   Literal* key = prop->key()->AsLiteral();
   __ Move(rcx, key->handle());
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  EmitCallIC(ic, RelocInfo::CODE_TARGET);
+  EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  EmitCallIC(ic, RelocInfo::CODE_TARGET);
+  EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
 }
 
 
-void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
                                               Token::Value op,
                                               OverwriteMode mode,
                                               Expression* left,
@@ -1669,18 +1676,18 @@
   // Do combined smi check of the operands. Left operand is on the
   // stack (popped into rdx). Right operand is in rax but moved into
   // rcx to make the shifts easier.
-  NearLabel done, stub_call, smi_case;
+  Label done, stub_call, smi_case;
   __ pop(rdx);
   __ movq(rcx, rax);
   __ or_(rax, rdx);
   JumpPatchSite patch_site(masm_);
-  patch_site.EmitJumpIfSmi(rax, &smi_case);
+  patch_site.EmitJumpIfSmi(rax, &smi_case, Label::kNear);
 
   __ bind(&stub_call);
   __ movq(rax, rcx);
-  TypeRecordingBinaryOpStub stub(op, mode);
-  EmitCallIC(stub.GetCode(), &patch_site);
-  __ jmp(&done);
+  BinaryOpStub stub(op, mode);
+  EmitCallIC(stub.GetCode(), &patch_site, expr->id());
+  __ jmp(&done, Label::kNear);
 
   __ bind(&smi_case);
   switch (op) {
@@ -1721,11 +1728,13 @@
 }
 
 
-void FullCodeGenerator::EmitBinaryOp(Token::Value op,
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
+                                     Token::Value op,
                                      OverwriteMode mode) {
   __ pop(rdx);
-  TypeRecordingBinaryOpStub stub(op, mode);
-  EmitCallIC(stub.GetCode(), NULL);  // NULL signals no inlined smi code.
+  BinaryOpStub stub(op, mode);
+  // NULL signals no inlined smi code.
+  EmitCallIC(stub.GetCode(), NULL, expr->id());
   context()->Plug(rax);
 }
 
@@ -1765,7 +1774,7 @@
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->StoreIC_Initialize_Strict()
           : isolate()->builtins()->StoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
       break;
     }
     case KEYED_PROPERTY: {
@@ -1788,7 +1797,7 @@
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
           : isolate()->builtins()->KeyedStoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
       break;
     }
   }
@@ -1814,7 +1823,7 @@
     Handle<Code> ic = is_strict_mode()
         ? isolate()->builtins()->StoreIC_Initialize_Strict()
         : isolate()->builtins()->StoreIC_Initialize();
-    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
 
   } else if (op == Token::INIT_CONST) {
     // Like var declarations, const declarations are hoisted to function
@@ -1917,7 +1926,7 @@
   Handle<Code> ic = is_strict_mode()
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
-  EmitCallIC(ic, RelocInfo::CODE_TARGET);
+  EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -1957,7 +1966,7 @@
   Handle<Code> ic = is_strict_mode()
       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
       : isolate()->builtins()->KeyedStoreIC_Initialize();
-  EmitCallIC(ic, RelocInfo::CODE_TARGET);
+  EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -2008,8 +2017,8 @@
   // Call the IC initialization code.
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic =
-      ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
-  EmitCallIC(ic, mode);
+      ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
+  EmitCallIC(ic, mode, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2018,8 +2027,7 @@
 
 
 void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
-                                            Expression* key,
-                                            RelocInfo::Mode mode) {
+                                            Expression* key) {
   // Load the key.
   VisitForAccumulatorValue(key);
 
@@ -2044,7 +2052,7 @@
   Handle<Code> ic =
       ISOLATE->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
   __ movq(rcx, Operand(rsp, (arg_count + 1) * kPointerSize));  // Key.
-  EmitCallIC(ic, mode);
+  EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2052,7 +2060,7 @@
 }
 
 
-void FullCodeGenerator::EmitCallWithStub(Call* expr) {
+void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
   // Code common for calls using the call stub.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
@@ -2064,7 +2072,7 @@
   // Record source position for debugger.
   SetSourcePosition(expr->position());
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
-  CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+  CallFunctionStub stub(arg_count, in_loop, flags);
   __ CallStub(&stub);
   RecordJSReturnSite(expr);
   // Restore context register.
@@ -2155,7 +2163,7 @@
     // Record source position for debugger.
     SetSourcePosition(expr->position());
     InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
-    CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+    CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_IMPLICIT);
     __ CallStub(&stub);
     RecordJSReturnSite(expr);
     // Restore context register.
@@ -2193,18 +2201,21 @@
     // function and receiver and have the slow path jump around this
     // code.
     if (done.is_linked()) {
-      NearLabel call;
-      __ jmp(&call);
+      Label call;
+      __ jmp(&call, Label::kNear);
       __ bind(&done);
       // Push function.
       __ push(rax);
       // Push global receiver.
-        __ movq(rbx, GlobalObjectOperand());
-        __ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
-        __ bind(&call);
+      __ movq(rbx, GlobalObjectOperand());
+      __ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+      __ bind(&call);
     }
 
-    EmitCallWithStub(expr);
+    // The receiver is either the global receiver or an object found
+    // by LoadContextSlot. That object could be the hole if the
+    // receiver is implicitly the global object.
+    EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
   } else if (fun->AsProperty() != NULL) {
     // Call to an object property.
     Property* prop = fun->AsProperty();
@@ -2236,18 +2247,18 @@
         SetSourcePosition(prop->position());
 
         Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-        EmitCallIC(ic, RelocInfo::CODE_TARGET);
+        EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
         // Push result (function).
         __ push(rax);
         // Push Global receiver.
         __ movq(rcx, GlobalObjectOperand());
         __ push(FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
-        EmitCallWithStub(expr);
+        EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
       } else {
         { PreservePositionScope scope(masm()->positions_recorder());
           VisitForStackValue(prop->obj());
         }
-        EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET);
+        EmitKeyedCallWithIC(expr, prop->key());
       }
     }
   } else {
@@ -2258,7 +2269,7 @@
     __ movq(rbx, GlobalObjectOperand());
     __ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
     // Emit function call.
-    EmitCallWithStub(expr);
+    EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
   }
 
 #ifdef DEBUG
@@ -2447,7 +2458,7 @@
   // Look for valueOf symbol in the descriptor array, and indicate false if
   // found. The type is not checked, so if it is a transition it is a false
   // negative.
-  __ movq(rbx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
+  __ LoadInstanceDescriptors(rbx, rbx);
   __ movq(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
   // rbx: descriptor array
   // rcx: length of descriptor array
@@ -2633,7 +2644,7 @@
 void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 0);
 
-  NearLabel exit;
+  Label exit;
   // Get the number of formal parameters.
   __ Move(rax, Smi::FromInt(scope()->num_parameters()));
 
@@ -2641,7 +2652,7 @@
   __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
   __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
          Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ j(not_equal, &exit);
+  __ j(not_equal, &exit, Label::kNear);
 
   // Arguments adaptor case: Read the arguments length from the
   // adaptor frame.
@@ -2762,7 +2773,7 @@
   __ movd(xmm1, rcx);
   __ movd(xmm0, rax);
   __ cvtss2sd(xmm1, xmm1);
-  __ xorpd(xmm0, xmm1);
+  __ xorps(xmm0, xmm1);
   __ subsd(xmm0, xmm1);
   __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
 
@@ -3047,17 +3058,17 @@
 void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
   ASSERT(args->length() >= 2);
 
-  int arg_count = args->length() - 2;  // For receiver and function.
-  VisitForStackValue(args->at(0));  // Receiver.
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i + 1));
+  int arg_count = args->length() - 2;  // 2 ~ receiver and function.
+  for (int i = 0; i < arg_count + 1; i++) {
+    VisitForStackValue(args->at(i));
   }
-  VisitForAccumulatorValue(args->at(arg_count + 1));  // Function.
+  VisitForAccumulatorValue(args->last());  // Function.
 
-  // InvokeFunction requires function in rdi. Move it in there.
-  if (!result_register().is(rdi)) __ movq(rdi, result_register());
+  // InvokeFunction requires the function in rdi. Move it in there.
+  __ movq(rdi, result_register());
   ParameterCount count(arg_count);
-  __ InvokeFunction(rdi, count, CALL_FUNCTION);
+  __ InvokeFunction(rdi, count, CALL_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   context()->Plug(rax);
 }
@@ -3178,7 +3189,7 @@
   __ movq(cache,
           FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
 
-  NearLabel done, not_found;
+  Label done, not_found;
   // tmp now holds finger offset as a smi.
   ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
   __ movq(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
@@ -3188,12 +3199,12 @@
                             index.reg,
                             index.scale,
                             FixedArray::kHeaderSize));
-  __ j(not_equal, &not_found);
+  __ j(not_equal, &not_found, Label::kNear);
   __ movq(rax, FieldOperand(cache,
                             index.reg,
                             index.scale,
                             FixedArray::kHeaderSize + kPointerSize));
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   __ bind(&not_found);
   // Call runtime to perform the lookup.
@@ -3217,25 +3228,25 @@
   VisitForAccumulatorValue(args->at(1));
   __ pop(left);
 
-  NearLabel done, fail, ok;
+  Label done, fail, ok;
   __ cmpq(left, right);
-  __ j(equal, &ok);
+  __ j(equal, &ok, Label::kNear);
   // Fail if either is a non-HeapObject.
   Condition either_smi = masm()->CheckEitherSmi(left, right, tmp);
-  __ j(either_smi, &fail);
-  __ j(zero, &fail);
+  __ j(either_smi, &fail, Label::kNear);
+  __ j(zero, &fail, Label::kNear);
   __ movq(tmp, FieldOperand(left, HeapObject::kMapOffset));
   __ cmpb(FieldOperand(tmp, Map::kInstanceTypeOffset),
           Immediate(JS_REGEXP_TYPE));
-  __ j(not_equal, &fail);
+  __ j(not_equal, &fail, Label::kNear);
   __ cmpq(tmp, FieldOperand(right, HeapObject::kMapOffset));
-  __ j(not_equal, &fail);
+  __ j(not_equal, &fail, Label::kNear);
   __ movq(tmp, FieldOperand(left, JSRegExp::kDataOffset));
   __ cmpq(tmp, FieldOperand(right, JSRegExp::kDataOffset));
-  __ j(equal, &ok);
+  __ j(equal, &ok, Label::kNear);
   __ bind(&fail);
   __ Move(rax, isolate()->factory()->false_value());
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(&ok);
   __ Move(rax, isolate()->factory()->true_value());
   __ bind(&done);
@@ -3595,9 +3606,10 @@
     // Call the JS runtime function using a call IC.
     __ Move(rcx, expr->name());
     InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+    RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
     Handle<Code> ic =
-        ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
-    EmitCallIC(ic, RelocInfo::CODE_TARGET);
+        ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
+    EmitCallIC(ic, mode, expr->id());
     // Restore context register.
     __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   } else {
@@ -3709,46 +3721,13 @@
       break;
     }
 
-    case Token::SUB: {
-      Comment cmt(masm_, "[ UnaryOperation (SUB)");
-      bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
-      UnaryOverwriteMode overwrite =
-          can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-      GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS);
-      // GenericUnaryOpStub expects the argument to be in the
-      // accumulator register rax.
-      VisitForAccumulatorValue(expr->expression());
-      __ CallStub(&stub);
-      context()->Plug(rax);
+    case Token::SUB:
+      EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
       break;
-    }
 
-    case Token::BIT_NOT: {
-      Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
-      // The generic unary operation stub expects the argument to be
-      // in the accumulator register rax.
-      VisitForAccumulatorValue(expr->expression());
-      Label done;
-      bool inline_smi_case = ShouldInlineSmiCase(expr->op());
-      if (inline_smi_case) {
-        Label call_stub;
-        __ JumpIfNotSmi(rax, &call_stub);
-        __ SmiNot(rax, rax);
-        __ jmp(&done);
-        __ bind(&call_stub);
-      }
-      bool overwrite = expr->expression()->ResultOverwriteAllowed();
-      UnaryOverwriteMode mode =
-          overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-      UnaryOpFlags flags = inline_smi_case
-          ? NO_UNARY_SMI_CODE_IN_STUB
-          : NO_UNARY_FLAGS;
-      GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
-      __ CallStub(&stub);
-      __ bind(&done);
-      context()->Plug(rax);
+    case Token::BIT_NOT:
+      EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
       break;
-    }
 
     default:
       UNREACHABLE();
@@ -3756,6 +3735,23 @@
 }
 
 
+void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
+                                           const char* comment) {
+  // TODO(svenpanne): Allowing format strings in Comment would be nice here...
+  Comment cmt(masm_, comment);
+  bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
+  UnaryOverwriteMode overwrite =
+      can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
+  UnaryOpStub stub(expr->op(), overwrite);
+  // UnaryOpStub expects the argument to be in the
+  // accumulator register rax.
+  VisitForAccumulatorValue(expr->expression());
+  SetSourcePosition(expr->position());
+  EmitCallIC(stub.GetCode(), NULL, expr->id());
+  context()->Plug(rax);
+}
+
+
 void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
   Comment cmnt(masm_, "[ CountOperation");
   SetSourcePosition(expr->position());
@@ -3819,10 +3815,10 @@
   }
 
   // Call ToNumber only if operand is not a smi.
-  NearLabel no_conversion;
+  Label no_conversion;
   Condition is_smi;
   is_smi = masm_->CheckSmi(rax);
-  __ j(is_smi, &no_conversion);
+  __ j(is_smi, &no_conversion, Label::kNear);
   ToNumberStub convert_stub;
   __ CallStub(&convert_stub);
   __ bind(&no_conversion);
@@ -3848,7 +3844,7 @@
   }
 
   // Inline smi case if we are in a loop.
-  NearLabel stub_call, done;
+  Label done, stub_call;
   JumpPatchSite patch_site(masm_);
 
   if (ShouldInlineSmiCase(expr->op())) {
@@ -3857,10 +3853,10 @@
     } else {
       __ SmiSubConstant(rax, rax, Smi::FromInt(1));
     }
-    __ j(overflow, &stub_call);
+    __ j(overflow, &stub_call, Label::kNear);
     // We could eliminate this smi check if we split the code at
     // the first smi check before calling ToNumber.
-    patch_site.EmitJumpIfSmi(rax, &done);
+    patch_site.EmitJumpIfSmi(rax, &done, Label::kNear);
 
     __ bind(&stub_call);
     // Call stub. Undo operation first.
@@ -3875,14 +3871,14 @@
   SetSourcePosition(expr->position());
 
   // Call stub for +1/-1.
-  TypeRecordingBinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
+  BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
   if (expr->op() == Token::INC) {
     __ Move(rdx, Smi::FromInt(1));
   } else {
     __ movq(rdx, rax);
     __ Move(rax, Smi::FromInt(1));
   }
-  EmitCallIC(stub.GetCode(), &patch_site);
+  EmitCallIC(stub.GetCode(), &patch_site, expr->CountId());
   __ bind(&done);
 
   // Store the value returned in rax.
@@ -3915,7 +3911,7 @@
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->StoreIC_Initialize_Strict()
           : isolate()->builtins()->StoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -3932,7 +3928,7 @@
       Handle<Code> ic = is_strict_mode()
           ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
           : isolate()->builtins()->KeyedStoreIC_Initialize();
-      EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -3959,7 +3955,7 @@
     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
-    EmitCallIC(ic, RelocInfo::CODE_TARGET);
+    EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
     PrepareForBailout(expr, TOS_REG);
     context()->Plug(rax);
   } else if (proxy != NULL &&
@@ -4145,10 +4141,10 @@
       bool inline_smi_code = ShouldInlineSmiCase(op);
       JumpPatchSite patch_site(masm_);
       if (inline_smi_code) {
-        NearLabel slow_case;
+        Label slow_case;
         __ movq(rcx, rdx);
         __ or_(rcx, rax);
-        patch_site.EmitJumpIfNotSmi(rcx, &slow_case);
+        patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
         __ cmpq(rdx, rax);
         Split(cc, if_true, if_false, NULL);
         __ bind(&slow_case);
@@ -4157,7 +4153,7 @@
       // Record position and call the compare IC.
       SetSourcePosition(expr->position());
       Handle<Code> ic = CompareIC::GetUninitialized(op);
-      EmitCallIC(ic, &patch_site);
+      EmitCallIC(ic, &patch_site, expr->id());
 
       PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
       __ testq(rax, rax);
@@ -4217,7 +4213,9 @@
 }
 
 
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
+                                   RelocInfo::Mode mode,
+                                   unsigned ast_id) {
   ASSERT(mode == RelocInfo::CODE_TARGET ||
          mode == RelocInfo::CODE_TARGET_CONTEXT);
   Counters* counters = isolate()->counters();
@@ -4236,34 +4234,13 @@
     default:
       break;
   }
-
-  __ call(ic, mode);
-
-  // Crankshaft doesn't need patching of inlined loads and stores.
-  // When compiling the snapshot we need to produce code that works
-  // with and without Crankshaft.
-  if (V8::UseCrankshaft() && !Serializer::enabled()) {
-    return;
-  }
-
-  // If we're calling a (keyed) load or store stub, we have to mark
-  // the call as containing no inlined code so we will not attempt to
-  // patch it.
-  switch (ic->kind()) {
-    case Code::LOAD_IC:
-    case Code::KEYED_LOAD_IC:
-    case Code::STORE_IC:
-    case Code::KEYED_STORE_IC:
-      __ nop();  // Signals no inlined code.
-      break;
-    default:
-      // Do nothing.
-      break;
-  }
+  __ call(ic, mode, ast_id);
 }
 
 
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
+                                   JumpPatchSite* patch_site,
+                                   unsigned ast_id) {
   Counters* counters = isolate()->counters();
   switch (ic->kind()) {
     case Code::LOAD_IC:
@@ -4280,8 +4257,7 @@
     default:
       break;
   }
-
-  __ call(ic, RelocInfo::CODE_TARGET);
+  __ call(ic, RelocInfo::CODE_TARGET, ast_id);
   if (patch_site != NULL && patch_site->is_bound()) {
     patch_site->EmitPatchInfo();
   } else {
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 5ca56ac..cec8894 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -97,58 +97,6 @@
 }
 
 
-// Probe the string dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found leaving the
-// index into the dictionary in |r1|. Jump to the |miss| label
-// otherwise.
-static void GenerateStringDictionaryProbes(MacroAssembler* masm,
-                                           Label* miss,
-                                           Label* done,
-                                           Register elements,
-                                           Register name,
-                                           Register r0,
-                                           Register r1) {
-  // Assert that name contains a string.
-  if (FLAG_debug_code) __ AbortIfNotString(name);
-
-  // Compute the capacity mask.
-  const int kCapacityOffset =
-      StringDictionary::kHeaderSize +
-      StringDictionary::kCapacityIndex * kPointerSize;
-  __ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
-  __ decl(r0);
-
-  // Generate an unrolled loop that performs a few probes before
-  // giving up. Measurements done on Gmail indicate that 2 probes
-  // cover ~93% of loads from dictionaries.
-  static const int kProbes = 4;
-  const int kElementsStartOffset =
-      StringDictionary::kHeaderSize +
-      StringDictionary::kElementsStartIndex * kPointerSize;
-  for (int i = 0; i < kProbes; i++) {
-    // Compute the masked index: (hash + i + i * i) & mask.
-    __ movl(r1, FieldOperand(name, String::kHashFieldOffset));
-    __ shrl(r1, Immediate(String::kHashShift));
-    if (i > 0) {
-      __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
-    }
-    __ and_(r1, r0);
-
-    // Scale the index by multiplying by the entry size.
-    ASSERT(StringDictionary::kEntrySize == 3);
-    __ lea(r1, Operand(r1, r1, times_2, 0));  // r1 = r1 * 3
-
-    // Check if the key is identical to the name.
-    __ cmpq(name, Operand(elements, r1, times_pointer_size,
-                          kElementsStartOffset - kHeapObjectTag));
-    if (i != kProbes - 1) {
-      __ j(equal, done);
-    } else {
-      __ j(not_equal, miss);
-    }
-  }
-}
-
 
 // Helper function used to load a property from a dictionary backing storage.
 // This function may return false negatives, so miss_label
@@ -179,13 +127,13 @@
   Label done;
 
   // Probe the dictionary.
-  GenerateStringDictionaryProbes(masm,
-                                 miss_label,
-                                 &done,
-                                 elements,
-                                 name,
-                                 r0,
-                                 r1);
+  StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+                                                     miss_label,
+                                                     &done,
+                                                     elements,
+                                                     name,
+                                                     r0,
+                                                     r1);
 
   // If probing finds an entry in the dictionary, r0 contains the
   // index into the dictionary. Check that the value is a normal
@@ -237,13 +185,13 @@
   Label done;
 
   // Probe the dictionary.
-  GenerateStringDictionaryProbes(masm,
-                                 miss_label,
-                                 &done,
-                                 elements,
-                                 name,
-                                 scratch0,
-                                 scratch1);
+  StringDictionaryLookupStub::GeneratePositiveLookup(masm,
+                                                     miss_label,
+                                                     &done,
+                                                     elements,
+                                                     name,
+                                                     scratch0,
+                                                     scratch1);
 
   // If probing finds an entry in the dictionary, scratch0 contains the
   // index into the dictionary. Check that the value is a normal
@@ -381,11 +329,6 @@
 }
 
 
-// The offset from the inlined patch site to the start of the inlined
-// load instruction.
-const int LoadIC::kOffsetToLoadInstruction = 20;
-
-
 void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax    : receiver
@@ -715,7 +658,7 @@
   char_at_generator.GenerateSlow(masm, call_helper);
 
   __ bind(&miss);
-  GenerateMiss(masm);
+  GenerateMiss(masm, false);
 }
 
 
@@ -758,7 +701,7 @@
       1);
 
   __ bind(&slow);
-  GenerateMiss(masm);
+  GenerateMiss(masm, false);
 }
 
 
@@ -852,10 +795,10 @@
   // rax: value
   // rbx: receiver's elements array (a FixedArray)
   // rcx: index
-  NearLabel non_smi_value;
+  Label non_smi_value;
   __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
           rax);
-  __ JumpIfNotSmi(rax, &non_smi_value);
+  __ JumpIfNotSmi(rax, &non_smi_value, Label::kNear);
   __ ret(0);
   __ bind(&non_smi_value);
   // Slow case that needs to retain rcx for use by RecordWrite.
@@ -870,7 +813,8 @@
 // The generated code falls through if both probes miss.
 static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
                                           int argc,
-                                          Code::Kind kind) {
+                                          Code::Kind kind,
+                                          Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   // rcx                      : function name
   // rdx                      : receiver
@@ -881,7 +825,7 @@
   Code::Flags flags = Code::ComputeFlags(kind,
                                          NOT_IN_LOOP,
                                          MONOMORPHIC,
-                                         Code::kNoExtraICState,
+                                         extra_ic_state,
                                          NORMAL,
                                          argc);
   Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
@@ -948,7 +892,8 @@
 
   // Invoke the function.
   ParameterCount actual(argc);
-  __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+  __ InvokeFunction(rdi, actual, JUMP_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
 }
 
 
@@ -980,7 +925,10 @@
 }
 
 
-static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
+static void GenerateCallMiss(MacroAssembler* masm,
+                             int argc,
+                             IC::UtilityId id,
+                             Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   // rcx                      : function name
   // rsp[0]                   : return address
@@ -1037,12 +985,21 @@
   }
 
   // Invoke the function.
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
   ParameterCount actual(argc);
-  __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+  __ InvokeFunction(rdi,
+                    actual,
+                    JUMP_FUNCTION,
+                    NullCallWrapper(),
+                    call_kind);
 }
 
 
-void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+void CallIC::GenerateMegamorphic(MacroAssembler* masm,
+                                 int argc,
+                                 Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   // rcx                      : function name
   // rsp[0]                   : return address
@@ -1055,8 +1012,8 @@
 
   // Get the receiver of the function from the stack; 1 ~ return address.
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-  GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC);
-  GenerateMiss(masm, argc);
+  GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
+  GenerateMiss(masm, argc, extra_ic_state);
 }
 
 
@@ -1072,11 +1029,13 @@
   // -----------------------------------
 
   GenerateCallNormal(masm, argc);
-  GenerateMiss(masm, argc);
+  GenerateMiss(masm, argc, Code::kNoExtraICState);
 }
 
 
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+void CallIC::GenerateMiss(MacroAssembler* masm,
+                          int argc,
+                          Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   // rcx                      : function name
   // rsp[0]                   : return address
@@ -1087,7 +1046,7 @@
   // rsp[(argc + 1) * 8]      : argument 0 = receiver
   // -----------------------------------
 
-  GenerateCallMiss(masm, argc, IC::kCallIC_Miss);
+  GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
 }
 
 
@@ -1178,7 +1137,10 @@
 
   __ bind(&lookup_monomorphic_cache);
   __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1);
-  GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC);
+  GenerateMonomorphicCacheProbe(masm,
+                                argc,
+                                Code::KEYED_CALL_IC,
+                                Code::kNoExtraICState);
   // Fall through on miss.
 
   __ bind(&slow_call);
@@ -1231,7 +1193,7 @@
   // rsp[(argc + 1) * 8]      : argument 0 = receiver
   // -----------------------------------
 
-  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss);
+  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
 }
 
 
@@ -1297,131 +1259,7 @@
 }
 
 
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
-  if (V8::UseCrankshaft()) return false;
-
-  // The address of the instruction following the call.
-  Address test_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-  // If the instruction following the call is not a test rax, nothing
-  // was inlined.
-  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
-  Address delta_address = test_instruction_address + 1;
-  // The delta to the start of the map check instruction.
-  int delta = *reinterpret_cast<int*>(delta_address);
-
-  // The map address is the last 8 bytes of the 10-byte
-  // immediate move instruction, so we add 2 to get the
-  // offset to the last 8 bytes.
-  Address map_address = test_instruction_address + delta + 2;
-  *(reinterpret_cast<Object**>(map_address)) = map;
-
-  // The offset is in the 32-bit displacement of a seven byte
-  // memory-to-register move instruction (REX.W 0x88 ModR/M disp32),
-  // so we add 3 to get the offset of the displacement.
-  Address offset_address =
-      test_instruction_address + delta + kOffsetToLoadInstruction + 3;
-  *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-  return true;
-}
-
-
-bool LoadIC::PatchInlinedContextualLoad(Address address,
-                                        Object* map,
-                                        Object* cell,
-                                        bool is_dont_delete) {
-  // TODO(<bug#>): implement this.
-  return false;
-}
-
-
-bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
-  if (V8::UseCrankshaft()) return false;
-
-  // The address of the instruction following the call.
-  Address test_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-
-  // If the instruction following the call is not a test rax, nothing
-  // was inlined.
-  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
-  // Extract the encoded deltas from the test rax instruction.
-  Address encoded_offsets_address = test_instruction_address + 1;
-  int encoded_offsets = *reinterpret_cast<int*>(encoded_offsets_address);
-  int delta_to_map_check = -(encoded_offsets & 0xFFFF);
-  int delta_to_record_write = encoded_offsets >> 16;
-
-  // Patch the map to check. The map address is the last 8 bytes of
-  // the 10-byte immediate move instruction.
-  Address map_check_address = test_instruction_address + delta_to_map_check;
-  Address map_address = map_check_address + 2;
-  *(reinterpret_cast<Object**>(map_address)) = map;
-
-  // Patch the offset in the store instruction. The offset is in the
-  // last 4 bytes of a 7 byte register-to-memory move instruction.
-  Address offset_address =
-      map_check_address + StoreIC::kOffsetToStoreInstruction + 3;
-  // The offset should have initial value (kMaxInt - 1), cleared value
-  // (-1) or we should be clearing the inlined version.
-  ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt - 1 ||
-         *reinterpret_cast<int*>(offset_address) == -1 ||
-         (offset == 0 && map == HEAP->null_value()));
-  *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-
-  // Patch the offset in the write-barrier code. The offset is the
-  // last 4 bytes of a 7 byte lea instruction.
-  offset_address = map_check_address + delta_to_record_write + 3;
-  // The offset should have initial value (kMaxInt), cleared value
-  // (-1) or we should be clearing the inlined version.
-  ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt ||
-         *reinterpret_cast<int*>(offset_address) == -1 ||
-         (offset == 0 && map == HEAP->null_value()));
-  *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-
-  return true;
-}
-
-
-static bool PatchInlinedMapCheck(Address address, Object* map) {
-  if (V8::UseCrankshaft()) return false;
-
-  // Arguments are address of start of call sequence that called
-  // the IC,
-  Address test_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-  // The keyed load has a fast inlined case if the IC call instruction
-  // is immediately followed by a test instruction.
-  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
-  // Fetch the offset from the test instruction to the map compare
-  // instructions (starting with the 64-bit immediate mov of the map
-  // address). This offset is stored in the last 4 bytes of the 5
-  // byte test instruction.
-  Address delta_address = test_instruction_address + 1;
-  int delta = *reinterpret_cast<int*>(delta_address);
-  // Compute the map address.  The map address is in the last 8 bytes
-  // of the 10-byte immediate mov instruction (incl. REX prefix), so we add 2
-  // to the offset to get the map address.
-  Address map_address = test_instruction_address + delta + 2;
-  // Patch the map check.
-  *(reinterpret_cast<Object**>(map_address)) = map;
-  return true;
-}
-
-
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
-  return PatchInlinedMapCheck(address, map);
-}
-
-
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
-  return PatchInlinedMapCheck(address, map);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
@@ -1437,8 +1275,10 @@
   __ push(rbx);  // return address
 
   // Perform tail call to the entry.
-  ExternalReference ref
-      = ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
+  ExternalReference ref = force_generic
+      ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric),
+                          masm->isolate())
+      : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
   __ TailCallExternalReference(ref, 2, 1);
 }
 
@@ -1503,11 +1343,6 @@
 }
 
 
-// The offset from the inlined patch site to the start of the inlined
-// store instruction.
-const int StoreIC::kOffsetToStoreInstruction = 20;
-
-
 void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax    : value
@@ -1627,7 +1462,7 @@
 }
 
 
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax     : value
   //  -- rcx     : key
@@ -1642,8 +1477,30 @@
   __ push(rbx);  // return address
 
   // Do tail-call to runtime routine.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+  ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+  // ----------- S t a t e -------------
+  //  -- rax     : value
+  //  -- rcx     : key
+  //  -- rdx     : receiver
+  //  -- rsp[0]  : return address
+  // -----------------------------------
+
+  __ pop(rbx);
+  __ push(rdx);  // receiver
+  __ push(rcx);  // key
+  __ push(rax);  // value
+  __ push(rbx);  // return address
+
+  // Do tail-call to runtime routine.
+  ExternalReference ref = force_generic
+    ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
+                        masm->isolate())
+    : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
   __ TailCallExternalReference(ref, 3, 1);
 }
 
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 822295e..9fa11fa 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -49,7 +49,7 @@
         deoptimization_index_(deoptimization_index) { }
   virtual ~SafepointGenerator() { }
 
-  virtual void BeforeCall(int call_size) {
+  virtual void BeforeCall(int call_size) const {
     ASSERT(call_size >= 0);
     // Ensure that we have enough space after the previous safepoint position
     // for the jump generated there.
@@ -62,7 +62,7 @@
     }
   }
 
-  virtual void AfterCall() {
+  virtual void AfterCall() const {
     codegen_->RecordSafepoint(pointers_, deoptimization_index_);
   }
 
@@ -91,7 +91,7 @@
 
 void LCodeGen::FinishCode(Handle<Code> code) {
   ASSERT(is_done());
-  code->set_stack_slots(StackSlotCount());
+  code->set_stack_slots(GetStackSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   PopulateDeoptimizationData(code);
   Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
@@ -140,13 +140,28 @@
   }
 #endif
 
+  // Strict mode functions need to replace the receiver with undefined
+  // when called as functions (without an explicit receiver
+  // object). rcx is zero for method calls and non-zero for function
+  // calls.
+  if (info_->is_strict_mode()) {
+    Label ok;
+    __ testq(rcx, rcx);
+    __ j(zero, &ok, Label::kNear);
+    // +1 for return address.
+    int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+    __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+    __ movq(Operand(rsp, receiver_offset), kScratchRegister);
+    __ bind(&ok);
+  }
+
   __ push(rbp);  // Caller's frame pointer.
   __ movq(rbp, rsp);
   __ push(rsi);  // Callee's context.
   __ push(rdi);  // Callee's JS function.
 
   // Reserve space for the stack slots needed by the code.
-  int slots = StackSlotCount();
+  int slots = GetStackSlotCount();
   if (slots > 0) {
     if (FLAG_debug_code) {
       __ Set(rax, slots);
@@ -290,7 +305,7 @@
   while (byte_count-- > 0) {
     __ int3();
   }
-  safepoints_.Emit(masm(), StackSlotCount());
+  safepoints_.Emit(masm(), GetStackSlotCount());
   return !is_aborted();
 }
 
@@ -418,7 +433,7 @@
     translation->StoreDoubleStackSlot(op->index());
   } else if (op->IsArgument()) {
     ASSERT(is_tagged);
-    int src_index = StackSlotCount() + op->index();
+    int src_index = GetStackSlotCount() + op->index();
     translation->StoreStackSlot(src_index);
   } else if (op->IsRegister()) {
     Register reg = ToRegister(op);
@@ -453,7 +468,7 @@
 
   // Signal that we don't inline smi code before these stubs in the
   // optimizing code generator.
-  if (code->kind() == Code::TYPE_RECORDING_BINARY_OP_IC ||
+  if (code->kind() == Code::BINARY_OP_IC ||
       code->kind() == Code::COMPARE_IC) {
     __ nop();
   }
@@ -690,7 +705,7 @@
   }
   __ bind(label->label());
   current_block_ = label->block_id();
-  LCodeGen::DoGap(label);
+  DoGap(label);
 }
 
 
@@ -716,6 +731,11 @@
 }
 
 
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+  DoGap(instr);
+}
+
+
 void LCodeGen::DoParameter(LParameter* instr) {
   // Nothing to do.
 }
@@ -780,27 +800,29 @@
 
     if (divisor < 0) divisor = -divisor;
 
-    NearLabel positive_dividend, done;
+    Label positive_dividend, done;
     __ testl(dividend, dividend);
-    __ j(not_sign, &positive_dividend);
+    __ j(not_sign, &positive_dividend, Label::kNear);
     __ negl(dividend);
     __ andl(dividend, Immediate(divisor - 1));
     __ negl(dividend);
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      __ j(not_zero, &done);
+      __ j(not_zero, &done, Label::kNear);
       DeoptimizeIf(no_condition, instr->environment());
     } else {
-      __ jmp(&done);
+      __ jmp(&done, Label::kNear);
     }
     __ bind(&positive_dividend);
     __ andl(dividend, Immediate(divisor - 1));
     __ bind(&done);
   } else {
-    LOperand* right = instr->InputAt(1);
-    Register right_reg = ToRegister(right);
+    Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
+    Register left_reg = ToRegister(instr->InputAt(0));
+    Register right_reg = ToRegister(instr->InputAt(1));
+    Register result_reg = ToRegister(instr->result());
 
-    ASSERT(ToRegister(instr->result()).is(rdx));
-    ASSERT(ToRegister(instr->InputAt(0)).is(rax));
+    ASSERT(left_reg.is(rax));
+    ASSERT(result_reg.is(rdx));
     ASSERT(!right_reg.is(rax));
     ASSERT(!right_reg.is(rdx));
 
@@ -810,21 +832,60 @@
       DeoptimizeIf(zero, instr->environment());
     }
 
+    __ testl(left_reg, left_reg);
+    __ j(zero, &remainder_eq_dividend, Label::kNear);
+    __ j(sign, &slow, Label::kNear);
+
+    __ testl(right_reg, right_reg);
+    __ j(not_sign, &both_positive, Label::kNear);
+    // The sign of the divisor doesn't matter.
+    __ neg(right_reg);
+
+    __ bind(&both_positive);
+    // If the dividend is smaller than the nonnegative
+    // divisor, the dividend is the result.
+    __ cmpl(left_reg, right_reg);
+    __ j(less, &remainder_eq_dividend, Label::kNear);
+
+    // Check if the divisor is a PowerOfTwo integer.
+    Register scratch = ToRegister(instr->TempAt(0));
+    __ movl(scratch, right_reg);
+    __ subl(scratch, Immediate(1));
+    __ testl(scratch, right_reg);
+    __ j(not_zero, &do_subtraction, Label::kNear);
+    __ andl(left_reg, scratch);
+    __ jmp(&remainder_eq_dividend, Label::kNear);
+
+    __ bind(&do_subtraction);
+    const int kUnfolds = 3;
+    // Try a few subtractions of the dividend.
+    __ movl(scratch, left_reg);
+    for (int i = 0; i < kUnfolds; i++) {
+      // Reduce the dividend by the divisor.
+      __ subl(left_reg, right_reg);
+      // Check if the dividend is less than the divisor.
+      __ cmpl(left_reg, right_reg);
+      __ j(less, &remainder_eq_dividend, Label::kNear);
+    }
+    __ movl(left_reg, scratch);
+
+    // Slow case, using idiv instruction.
+    __ bind(&slow);
     // Sign extend eax to edx.
     // (We are using only the low 32 bits of the values.)
     __ cdq();
 
     // Check for (0 % -x) that will produce negative zero.
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      NearLabel positive_left;
-      NearLabel done;
-      __ testl(rax, rax);
-      __ j(not_sign, &positive_left);
+      Label positive_left;
+      Label done;
+      __ testl(left_reg, left_reg);
+      __ j(not_sign, &positive_left, Label::kNear);
       __ idivl(right_reg);
 
       // Test the remainder for 0, because then the result would be -0.
-      __ testl(rdx, rdx);
-      __ j(not_zero, &done);
+      __ testl(result_reg, result_reg);
+      __ j(not_zero, &done, Label::kNear);
 
       DeoptimizeIf(no_condition, instr->environment());
       __ bind(&positive_left);
@@ -833,6 +894,12 @@
     } else {
       __ idivl(right_reg);
     }
+    __ jmp(&done, Label::kNear);
+
+    __ bind(&remainder_eq_dividend);
+    __ movl(result_reg, left_reg);
+
+    __ bind(&done);
   }
 }
 
@@ -855,9 +922,9 @@
 
   // Check for (0 / -x) that will produce negative zero.
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    NearLabel left_not_zero;
+    Label left_not_zero;
     __ testl(left_reg, left_reg);
-    __ j(not_zero, &left_not_zero);
+    __ j(not_zero, &left_not_zero, Label::kNear);
     __ testl(right_reg, right_reg);
     DeoptimizeIf(sign, instr->environment());
     __ bind(&left_not_zero);
@@ -865,9 +932,9 @@
 
   // Check for (-kMinInt / -1).
   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
-    NearLabel left_not_min_int;
+    Label left_not_min_int;
     __ cmpl(left_reg, Immediate(kMinInt));
-    __ j(not_zero, &left_not_min_int);
+    __ j(not_zero, &left_not_min_int, Label::kNear);
     __ cmpl(right_reg, Immediate(-1));
     DeoptimizeIf(zero, instr->environment());
     __ bind(&left_not_min_int);
@@ -946,9 +1013,9 @@
 
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
     // Bail out if the result is supposed to be negative zero.
-    NearLabel done;
+    Label done;
     __ testl(left, left);
-    __ j(not_zero, &done);
+    __ j(not_zero, &done, Label::kNear);
     if (right->IsConstantOperand()) {
       if (ToInteger32(LConstantOperand::cast(right)) <= 0) {
         DeoptimizeIf(no_condition, instr->environment());
@@ -1113,7 +1180,7 @@
   // Use xor to produce +0.0 in a fast and compact way, but avoid to
   // do so if the constant is -0.0.
   if (int_val == 0) {
-    __ xorpd(res, res);
+    __ xorps(res, res);
   } else {
     Register tmp = ToRegister(instr->TempAt(0));
     __ Set(tmp, int_val);
@@ -1153,13 +1220,13 @@
   Register input = ToRegister(instr->InputAt(0));
   Register result = ToRegister(instr->result());
   ASSERT(input.is(result));
-  NearLabel done;
+  Label done;
   // If the object is a smi return the object.
-  __ JumpIfSmi(input, &done);
+  __ JumpIfSmi(input, &done, Label::kNear);
 
   // If the object is not a value type, return the object.
   __ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister);
-  __ j(not_equal, &done);
+  __ j(not_equal, &done, Label::kNear);
   __ movq(result, FieldOperand(input, JSValue::kValueOffset));
 
   __ bind(&done);
@@ -1225,12 +1292,12 @@
       break;
     case Token::MOD:
       __ PrepareCallCFunction(2);
-      __ movsd(xmm0, left);
+      __ movaps(xmm0, left);
       ASSERT(right.is(xmm1));
       __ CallCFunction(
           ExternalReference::double_fp_operation(Token::MOD, isolate()), 2);
       __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-      __ movsd(result, xmm0);
+      __ movaps(result, xmm0);
       break;
     default:
       UNREACHABLE();
@@ -1244,7 +1311,7 @@
   ASSERT(ToRegister(instr->InputAt(1)).is(rax));
   ASSERT(ToRegister(instr->result()).is(rax));
 
-  TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
+  BinaryOpStub stub(instr->op(), NO_OVERWRITE);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   __ nop();  // Signals no inlined code.
 }
@@ -1290,7 +1357,7 @@
     EmitBranch(true_block, false_block, not_zero);
   } else if (r.IsDouble()) {
     XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
-    __ xorpd(xmm0, xmm0);
+    __ xorps(xmm0, xmm0);
     __ ucomisd(reg, xmm0);
     EmitBranch(true_block, false_block, not_equal);
   } else {
@@ -1318,14 +1385,14 @@
       __ JumpIfSmi(reg, true_label);
 
       // Test for double values. Plus/minus zero and NaN are false.
-      NearLabel call_stub;
+      Label call_stub;
       __ CompareRoot(FieldOperand(reg, HeapObject::kMapOffset),
                      Heap::kHeapNumberMapRootIndex);
-      __ j(not_equal, &call_stub);
+      __ j(not_equal, &call_stub, Label::kNear);
 
       // HeapNumber => false iff +0, -0, or NaN. These three cases set the
       // zero flag when compared to zero using ucomisd.
-      __ xorpd(xmm0, xmm0);
+      __ xorps(xmm0, xmm0);
       __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
       __ j(zero, false_label);
       __ jmp(true_label);
@@ -1435,20 +1502,20 @@
   LOperand* right = instr->InputAt(1);
   LOperand* result = instr->result();
 
-  NearLabel unordered;
+  Label unordered;
   if (instr->is_double()) {
     // Don't base result on EFLAGS when a NaN is involved. Instead
     // jump to the unordered case, which produces a false value.
     __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
-    __ j(parity_even, &unordered);
+    __ j(parity_even, &unordered, Label::kNear);
   } else {
     EmitCmpI(left, right);
   }
 
-  NearLabel done;
+  Label done;
   Condition cc = TokenToCondition(instr->op(), instr->is_double());
   __ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex);
-  __ j(cc, &done);
+  __ j(cc, &done, Label::kNear);
 
   __ bind(&unordered);
   __ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex);
@@ -1481,11 +1548,11 @@
   Register right = ToRegister(instr->InputAt(1));
   Register result = ToRegister(instr->result());
 
-  NearLabel different, done;
+  Label different, done;
   __ cmpq(left, right);
-  __ j(not_equal, &different);
+  __ j(not_equal, &different, Label::kNear);
   __ LoadRoot(result, Heap::kTrueValueRootIndex);
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(&different);
   __ LoadRoot(result, Heap::kFalseValueRootIndex);
   __ bind(&done);
@@ -1503,6 +1570,31 @@
 }
 
 
+void LCodeGen::DoCmpSymbolEq(LCmpSymbolEq* instr) {
+  Register left = ToRegister(instr->InputAt(0));
+  Register right = ToRegister(instr->InputAt(1));
+  Register result = ToRegister(instr->result());
+
+  Label done;
+  __ cmpq(left, right);
+  __ LoadRoot(result, Heap::kFalseValueRootIndex);
+  __ j(not_equal, &done, Label::kNear);
+  __ LoadRoot(result, Heap::kTrueValueRootIndex);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoCmpSymbolEqAndBranch(LCmpSymbolEqAndBranch* instr) {
+  Register left = ToRegister(instr->InputAt(0));
+  Register right = ToRegister(instr->InputAt(1));
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+  __ cmpq(left, right);
+  EmitBranch(true_block, false_block, equal);
+}
+
+
 void LCodeGen::DoIsNull(LIsNull* instr) {
   Register reg = ToRegister(instr->InputAt(0));
   Register result = ToRegister(instr->result());
@@ -1519,27 +1611,27 @@
   if (instr->is_strict()) {
     ASSERT(Heap::kTrueValueRootIndex >= 0);
     __ movl(result, Immediate(Heap::kTrueValueRootIndex));
-    NearLabel load;
-    __ j(equal, &load);
+    Label load;
+    __ j(equal, &load, Label::kNear);
     __ Set(result, Heap::kFalseValueRootIndex);
     __ bind(&load);
     __ LoadRootIndexed(result, result, 0);
   } else {
-    NearLabel true_value, false_value, done;
-    __ j(equal, &true_value);
+    Label false_value, true_value, done;
+    __ j(equal, &true_value, Label::kNear);
     __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
-    __ j(equal, &true_value);
-    __ JumpIfSmi(reg, &false_value);
+    __ j(equal, &true_value, Label::kNear);
+    __ JumpIfSmi(reg, &false_value, Label::kNear);
     // Check for undetectable objects by looking in the bit field in
     // the map. The object has already been smi checked.
     Register scratch = result;
     __ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
     __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
              Immediate(1 << Map::kIsUndetectable));
-    __ j(not_zero, &true_value);
+    __ j(not_zero, &true_value, Label::kNear);
     __ bind(&false_value);
     __ LoadRoot(result, Heap::kFalseValueRootIndex);
-    __ jmp(&done);
+    __ jmp(&done, Label::kNear);
     __ bind(&true_value);
     __ LoadRoot(result, Heap::kTrueValueRootIndex);
     __ bind(&done);
@@ -1674,6 +1766,40 @@
 }
 
 
+void LCodeGen::DoIsUndetectable(LIsUndetectable* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register result = ToRegister(instr->result());
+
+  ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+  Label false_label, done;
+  __ JumpIfSmi(input, &false_label);
+  __ movq(result, FieldOperand(input, HeapObject::kMapOffset));
+  __ testb(FieldOperand(result, Map::kBitFieldOffset),
+           Immediate(1 << Map::kIsUndetectable));
+  __ j(zero, &false_label);
+  __ LoadRoot(result, Heap::kTrueValueRootIndex);
+  __ jmp(&done);
+  __ bind(&false_label);
+  __ LoadRoot(result, Heap::kFalseValueRootIndex);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+  Register input = ToRegister(instr->InputAt(0));
+  Register temp = ToRegister(instr->TempAt(0));
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
+  __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
+  __ testb(FieldOperand(temp, Map::kBitFieldOffset),
+           Immediate(1 << Map::kIsUndetectable));
+  EmitBranch(true_block, false_block, not_zero);
+}
+
+
 static InstanceType TestType(HHasInstanceType* instr) {
   InstanceType from = instr->from();
   InstanceType to = instr->to();
@@ -1700,12 +1826,13 @@
 
   ASSERT(instr->hydrogen()->value()->representation().IsTagged());
   __ testl(input, Immediate(kSmiTagMask));
-  NearLabel done, is_false;
+  Label done, is_false;
   __ j(zero, &is_false);
   __ CmpObjectType(input, TestType(instr->hydrogen()), result);
-  __ j(NegateCondition(BranchCondition(instr->hydrogen())), &is_false);
+  __ j(NegateCondition(BranchCondition(instr->hydrogen())),
+       &is_false, Label::kNear);
   __ LoadRoot(result, Heap::kTrueValueRootIndex);
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(&is_false);
   __ LoadRoot(result, Heap::kFalseValueRootIndex);
   __ bind(&done);
@@ -1749,8 +1876,8 @@
   __ LoadRoot(result, Heap::kTrueValueRootIndex);
   __ testl(FieldOperand(input, String::kHashFieldOffset),
            Immediate(String::kContainsCachedArrayIndexMask));
-  NearLabel done;
-  __ j(zero, &done);
+  Label done;
+  __ j(zero, &done, Label::kNear);
   __ LoadRoot(result, Heap::kFalseValueRootIndex);
   __ bind(&done);
 }
@@ -1829,7 +1956,7 @@
   ASSERT(input.is(result));
   Register temp = ToRegister(instr->TempAt(0));
   Handle<String> class_name = instr->hydrogen()->class_name();
-  NearLabel done;
+  Label done;
   Label is_true, is_false;
 
   EmitClassOfTest(&is_true, &is_false, class_name, input, temp);
@@ -1838,7 +1965,7 @@
 
   __ bind(&is_true);
   __ LoadRoot(result, Heap::kTrueValueRootIndex);
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   __ bind(&is_false);
   __ LoadRoot(result, Heap::kFalseValueRootIndex);
@@ -1878,11 +2005,11 @@
   __ push(ToRegister(instr->InputAt(0)));
   __ push(ToRegister(instr->InputAt(1)));
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-  NearLabel true_value, done;
+  Label true_value, done;
   __ testq(rax, rax);
-  __ j(zero, &true_value);
+  __ j(zero, &true_value, Label::kNear);
   __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(&true_value);
   __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
   __ bind(&done);
@@ -1932,7 +2059,7 @@
   // This is the inlined call site instanceof cache. The two occurences of the
   // hole value will be patched to the last map/result pair generated by the
   // instanceof stub.
-  NearLabel cache_miss;
+  Label cache_miss;
   // Use a temp register to avoid memory operands with variable lengths.
   Register map = ToRegister(instr->TempAt(0));
   __ movq(map, FieldOperand(object, HeapObject::kMapOffset));
@@ -1940,7 +2067,7 @@
   __ movq(kScratchRegister, factory()->the_hole_value(),
           RelocInfo::EMBEDDED_OBJECT);
   __ cmpq(map, kScratchRegister);  // Patched to cached map.
-  __ j(not_equal, &cache_miss);
+  __ j(not_equal, &cache_miss, Label::kNear);
   // Patched to load either true or false.
   __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
 #ifdef DEBUG
@@ -1955,7 +2082,7 @@
   // before calling the deferred code.
   __ bind(&cache_miss);  // Null is not an instance of anything.
   __ CompareRoot(object, Heap::kNullValueRootIndex);
-  __ j(equal, &false_result);
+  __ j(equal, &false_result, Label::kNear);
 
   // String values are not instances of anything.
   __ JumpIfNotString(object, kScratchRegister, deferred->entry());
@@ -2022,11 +2149,11 @@
   if (op == Token::GT || op == Token::LTE) {
     condition = ReverseCondition(condition);
   }
-  NearLabel true_value, done;
+  Label true_value, done;
   __ testq(rax, rax);
-  __ j(condition, &true_value);
+  __ j(condition, &true_value, Label::kNear);
   __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
   __ bind(&true_value);
   __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
   __ bind(&done);
@@ -2061,7 +2188,7 @@
   }
   __ movq(rsp, rbp);
   __ pop(rbp);
-  __ Ret((ParameterCount() + 1) * kPointerSize, rcx);
+  __ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
 }
 
 
@@ -2159,23 +2286,29 @@
 }
 
 
-void LCodeGen::EmitLoadField(Register result,
-                             Register object,
-                             Handle<Map> type,
-                             Handle<String> name) {
+void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
+                                               Register object,
+                                               Handle<Map> type,
+                                               Handle<String> name) {
   LookupResult lookup;
   type->LookupInDescriptors(NULL, *name, &lookup);
-  ASSERT(lookup.IsProperty() && lookup.type() == FIELD);
-  int index = lookup.GetLocalFieldIndexFromMap(*type);
-  int offset = index * kPointerSize;
-  if (index < 0) {
-    // Negative property indices are in-object properties, indexed
-    // from the end of the fixed part of the object.
-    __ movq(result, FieldOperand(object, offset + type->instance_size()));
+  ASSERT(lookup.IsProperty() &&
+         (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
+  if (lookup.type() == FIELD) {
+    int index = lookup.GetLocalFieldIndexFromMap(*type);
+    int offset = index * kPointerSize;
+    if (index < 0) {
+      // Negative property indices are in-object properties, indexed
+      // from the end of the fixed part of the object.
+      __ movq(result, FieldOperand(object, offset + type->instance_size()));
+    } else {
+      // Non-negative property indices are in the properties array.
+      __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
+      __ movq(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
+    }
   } else {
-    // Non-negative property indices are in the properties array.
-    __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
-    __ movq(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
+    Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
+    LoadHeapObject(result, Handle<HeapObject>::cast(function));
   }
 }
 
@@ -2193,30 +2326,30 @@
     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
     CallCode(ic, RelocInfo::CODE_TARGET, instr);
   } else {
-    NearLabel done;
+    Label done;
     for (int i = 0; i < map_count - 1; ++i) {
       Handle<Map> map = instr->hydrogen()->types()->at(i);
-      NearLabel next;
+      Label next;
       __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
-      __ j(not_equal, &next);
-      EmitLoadField(result, object, map, name);
-      __ jmp(&done);
+      __ j(not_equal, &next, Label::kNear);
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
+      __ jmp(&done, Label::kNear);
       __ bind(&next);
     }
     Handle<Map> map = instr->hydrogen()->types()->last();
     __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
     if (instr->hydrogen()->need_generic()) {
-      NearLabel generic;
-      __ j(not_equal, &generic);
-      EmitLoadField(result, object, map, name);
-      __ jmp(&done);
+      Label generic;
+      __ j(not_equal, &generic, Label::kNear);
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
+      __ jmp(&done, Label::kNear);
       __ bind(&generic);
       __ Move(rcx, instr->hydrogen()->name());
       Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
       CallCode(ic, RelocInfo::CODE_TARGET, instr);
     } else {
       DeoptimizeIf(not_equal, instr->environment());
-      EmitLoadField(result, object, map, name);
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
     }
     __ bind(&done);
   }
@@ -2242,10 +2375,10 @@
   DeoptimizeIf(not_equal, instr->environment());
 
   // Check whether the function has an instance prototype.
-  NearLabel non_instance;
+  Label non_instance;
   __ testb(FieldOperand(result, Map::kBitFieldOffset),
            Immediate(1 << Map::kHasNonInstancePrototype));
-  __ j(not_zero, &non_instance);
+  __ j(not_zero, &non_instance, Label::kNear);
 
   // Get the prototype or initial map from the function.
   __ movq(result,
@@ -2256,13 +2389,13 @@
   DeoptimizeIf(equal, instr->environment());
 
   // If the function does not have an initial map, we're done.
-  NearLabel done;
+  Label done;
   __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
-  __ j(not_equal, &done);
+  __ j(not_equal, &done, Label::kNear);
 
   // Get the prototype from the initial map.
   __ movq(result, FieldOperand(result, Map::kPrototypeOffset));
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   // Non-instance prototype: Fetch prototype from constructor field
   // in the function's map.
@@ -2279,13 +2412,13 @@
   Register input = ToRegister(instr->InputAt(0));
   __ movq(result, FieldOperand(input, JSObject::kElementsOffset));
   if (FLAG_debug_code) {
-    NearLabel done;
+    Label done;
     __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
                    Heap::kFixedArrayMapRootIndex);
-    __ j(equal, &done);
+    __ j(equal, &done, Label::kNear);
     __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
                    Heap::kFixedCOWArrayMapRootIndex);
-    __ j(equal, &done);
+    __ j(equal, &done, Label::kNear);
     Register temp((result.is(rax)) ? rbx : rax);
     __ push(temp);
     __ movq(temp, FieldOperand(result, HeapObject::kMapOffset));
@@ -2339,41 +2472,63 @@
                                FixedArray::kHeaderSize));
 
   // Check for the hole value.
-  __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
-  DeoptimizeIf(equal, instr->environment());
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+    DeoptimizeIf(equal, instr->environment());
+  }
+}
+
+
+Operand LCodeGen::BuildExternalArrayOperand(LOperand* external_pointer,
+                                            LOperand* key,
+                                            ExternalArrayType array_type) {
+  Register external_pointer_reg = ToRegister(external_pointer);
+  int shift_size = ExternalArrayTypeToShiftSize(array_type);
+  if (key->IsConstantOperand()) {
+    int constant_value = ToInteger32(LConstantOperand::cast(key));
+    if (constant_value & 0xF0000000) {
+      Abort("array index constant value too big");
+    }
+    return Operand(external_pointer_reg, constant_value * (1 << shift_size));
+  } else {
+    ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
+    return Operand(external_pointer_reg, ToRegister(key), scale_factor, 0);
+  }
 }
 
 
 void LCodeGen::DoLoadKeyedSpecializedArrayElement(
     LLoadKeyedSpecializedArrayElement* instr) {
-  Register external_pointer = ToRegister(instr->external_pointer());
-  Register key = ToRegister(instr->key());
   ExternalArrayType array_type = instr->array_type();
+  Operand operand(BuildExternalArrayOperand(instr->external_pointer(),
+                                            instr->key(), array_type));
   if (array_type == kExternalFloatArray) {
     XMMRegister result(ToDoubleRegister(instr->result()));
-    __ movss(result, Operand(external_pointer, key, times_4, 0));
+    __ movss(result, operand);
     __ cvtss2sd(result, result);
+  } else if (array_type == kExternalDoubleArray) {
+    __ movsd(ToDoubleRegister(instr->result()), operand);
   } else {
     Register result(ToRegister(instr->result()));
     switch (array_type) {
       case kExternalByteArray:
-        __ movsxbq(result, Operand(external_pointer, key, times_1, 0));
+        __ movsxbq(result, operand);
         break;
       case kExternalUnsignedByteArray:
       case kExternalPixelArray:
-        __ movzxbq(result, Operand(external_pointer, key, times_1, 0));
+        __ movzxbq(result, operand);
         break;
       case kExternalShortArray:
-        __ movsxwq(result, Operand(external_pointer, key, times_2, 0));
+        __ movsxwq(result, operand);
         break;
       case kExternalUnsignedShortArray:
-        __ movzxwq(result, Operand(external_pointer, key, times_2, 0));
+        __ movzxwq(result, operand);
         break;
       case kExternalIntArray:
-        __ movsxlq(result, Operand(external_pointer, key, times_4, 0));
+        __ movsxlq(result, operand);
         break;
       case kExternalUnsignedIntArray:
-        __ movl(result, Operand(external_pointer, key, times_4, 0));
+        __ movl(result, operand);
         __ testl(result, result);
         // TODO(danno): we could be more clever here, perhaps having a special
         // version of the stub that detects if the overflow case actually
@@ -2381,6 +2536,7 @@
         DeoptimizeIf(negative, instr->environment());
         break;
       case kExternalFloatArray:
+      case kExternalDoubleArray:
         UNREACHABLE();
         break;
     }
@@ -2401,15 +2557,15 @@
   Register result = ToRegister(instr->result());
 
   // Check for arguments adapter frame.
-  NearLabel done, adapted;
+  Label done, adapted;
   __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
   __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
          Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ j(equal, &adapted);
+  __ j(equal, &adapted, Label::kNear);
 
   // No arguments adaptor frame.
   __ movq(result, rbp);
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   // Arguments adaptor frame present.
   __ bind(&adapted);
@@ -2424,7 +2580,7 @@
 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
   Register result = ToRegister(instr->result());
 
-  NearLabel done;
+  Label done;
 
   // If no arguments adaptor frame the number of arguments is fixed.
   if (instr->InputAt(0)->IsRegister()) {
@@ -2433,7 +2589,7 @@
     __ cmpq(rbp, ToOperand(instr->InputAt(0)));
   }
   __ movl(result, Immediate(scope()->num_parameters()));
-  __ j(equal, &done);
+  __ j(equal, &done, Label::kNear);
 
   // Arguments adaptor frame present. Get argument length from there.
   __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
@@ -2455,27 +2611,31 @@
   ASSERT(function.is(rdi));  // Required by InvokeFunction.
   ASSERT(ToRegister(instr->result()).is(rax));
 
+  // TODO(1412): This is not correct if the called function is a
+  // strict mode function or a native.
+  //
   // If the receiver is null or undefined, we have to pass the global object
   // as a receiver.
-  NearLabel global_object, receiver_ok;
+  Label global_object, receiver_ok;
   __ CompareRoot(receiver, Heap::kNullValueRootIndex);
-  __ j(equal, &global_object);
+  __ j(equal, &global_object, Label::kNear);
   __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
-  __ j(equal, &global_object);
+  __ j(equal, &global_object, Label::kNear);
 
   // The receiver should be a JS object.
   Condition is_smi = __ CheckSmi(receiver);
   DeoptimizeIf(is_smi, instr->environment());
   __ CmpObjectType(receiver, FIRST_JS_OBJECT_TYPE, kScratchRegister);
   DeoptimizeIf(below, instr->environment());
-  __ jmp(&receiver_ok);
+  __ jmp(&receiver_ok, Label::kNear);
 
   __ bind(&global_object);
   // TODO(kmillikin): We have a hydrogen value for the global object.  See
   // if it's better to use it than to explicitly fetch it from the context
   // here.
-  __ movq(receiver, Operand(rbp, StandardFrameConstants::kContextOffset));
-  __ movq(receiver, ContextOperand(receiver, Context::GLOBAL_INDEX));
+  __ movq(receiver, ContextOperand(rsi, Context::GLOBAL_INDEX));
+  __ movq(receiver,
+          FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
   __ bind(&receiver_ok);
 
   // Copy the arguments to this function possibly from the
@@ -2489,10 +2649,10 @@
 
   // Loop through the arguments pushing them onto the execution
   // stack.
-  NearLabel invoke, loop;
+  Label invoke, loop;
   // length is a small non-negative integer, due to the test above.
   __ testl(length, length);
-  __ j(zero, &invoke);
+  __ j(zero, &invoke, Label::kNear);
   __ bind(&loop);
   __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
   __ decl(length);
@@ -2509,26 +2669,21 @@
                                          pointers,
                                          env->deoptimization_index());
   v8::internal::ParameterCount actual(rax);
-  __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
+  __ InvokeFunction(function, actual, CALL_FUNCTION,
+                    safepoint_generator, CALL_AS_METHOD);
+  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
 }
 
 
 void LCodeGen::DoPushArgument(LPushArgument* instr) {
   LOperand* argument = instr->InputAt(0);
-  if (argument->IsConstantOperand()) {
-    EmitPushConstantOperand(argument);
-  } else if (argument->IsRegister()) {
-    __ push(ToRegister(argument));
-  } else {
-    ASSERT(!argument->IsDoubleRegister());
-    __ push(ToOperand(argument));
-  }
+  EmitPushTaggedOperand(argument);
 }
 
 
 void LCodeGen::DoContext(LContext* instr) {
   Register result = ToRegister(instr->result());
-  __ movq(result, Operand(rbp, StandardFrameConstants::kContextOffset));
+  __ movq(result, rsi);
 }
 
 
@@ -2556,7 +2711,8 @@
 
 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
                                  int arity,
-                                 LInstruction* instr) {
+                                 LInstruction* instr,
+                                 CallKind call_kind) {
   // Change context if needed.
   bool change_context =
       (info()->closure()->context() != function->context()) ||
@@ -2576,6 +2732,7 @@
   RecordPosition(pointers->position());
 
   // Invoke function.
+  __ SetCallKind(rcx, call_kind);
   if (*function == *info()->closure()) {
     __ CallSelf();
   } else {
@@ -2593,7 +2750,10 @@
 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
   ASSERT(ToRegister(instr->result()).is(rax));
   __ Move(rdi, instr->function());
-  CallKnownFunction(instr->function(), instr->arity(), instr);
+  CallKnownFunction(instr->function(),
+                    instr->arity(),
+                    instr,
+                    CALL_AS_METHOD);
 }
 
 
@@ -2680,7 +2840,7 @@
   if (r.IsDouble()) {
     XMMRegister scratch = xmm0;
     XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
-    __ xorpd(scratch, scratch);
+    __ xorps(scratch, scratch);
     __ subsd(scratch, input_reg);
     __ andpd(input_reg, scratch);
   } else if (r.IsInteger32()) {
@@ -2703,21 +2863,36 @@
   XMMRegister xmm_scratch = xmm0;
   Register output_reg = ToRegister(instr->result());
   XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
-  __ xorpd(xmm_scratch, xmm_scratch);  // Zero the register.
-  __ ucomisd(input_reg, xmm_scratch);
 
-  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    DeoptimizeIf(below_equal, instr->environment());
+  if (CpuFeatures::IsSupported(SSE4_1)) {
+    CpuFeatures::Scope scope(SSE4_1);
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      // Deoptimize if minus zero.
+      __ movq(output_reg, input_reg);
+      __ subq(output_reg, Immediate(1));
+      DeoptimizeIf(overflow, instr->environment());
+    }
+    __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
+    __ cvttsd2si(output_reg, xmm_scratch);
+    __ cmpl(output_reg, Immediate(0x80000000));
+    DeoptimizeIf(equal, instr->environment());
   } else {
-    DeoptimizeIf(below, instr->environment());
+    __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
+    __ ucomisd(input_reg, xmm_scratch);
+
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      DeoptimizeIf(below_equal, instr->environment());
+    } else {
+      DeoptimizeIf(below, instr->environment());
+    }
+
+    // Use truncating instruction (OK because input is positive).
+    __ cvttsd2si(output_reg, input_reg);
+
+    // Overflow is signalled with minint.
+    __ cmpl(output_reg, Immediate(0x80000000));
+    DeoptimizeIf(equal, instr->environment());
   }
-
-  // Use truncating instruction (OK because input is positive).
-  __ cvttsd2si(output_reg, input_reg);
-
-  // Overflow is signalled with minint.
-  __ cmpl(output_reg, Immediate(0x80000000));
-  DeoptimizeIf(equal, instr->environment());
 }
 
 
@@ -2726,33 +2901,45 @@
   Register output_reg = ToRegister(instr->result());
   XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
 
+  Label done;
   // xmm_scratch = 0.5
   __ movq(kScratchRegister, V8_INT64_C(0x3FE0000000000000), RelocInfo::NONE);
   __ movq(xmm_scratch, kScratchRegister);
-
+  Label below_half;
+  __ ucomisd(xmm_scratch, input_reg);
+  // If input_reg is NaN, this doesn't jump.
+  __ j(above, &below_half, Label::kNear);
   // input = input + 0.5
+  // This addition might give a result that isn't the correct for
+  // rounding, due to loss of precision, but only for a number that's
+  // so big that the conversion below will overflow anyway.
   __ addsd(input_reg, xmm_scratch);
-
-  // We need to return -0 for the input range [-0.5, 0[, otherwise
-  // compute Math.floor(value + 0.5).
-  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    __ ucomisd(input_reg, xmm_scratch);
-    DeoptimizeIf(below_equal, instr->environment());
-  } else {
-    // If we don't need to bailout on -0, we check only bailout
-    // on negative inputs.
-    __ xorpd(xmm_scratch, xmm_scratch);  // Zero the register.
-    __ ucomisd(input_reg, xmm_scratch);
-    DeoptimizeIf(below, instr->environment());
-  }
-
-  // Compute Math.floor(value + 0.5).
+  // Compute Math.floor(input).
   // Use truncating instruction (OK because input is positive).
   __ cvttsd2si(output_reg, input_reg);
-
   // Overflow is signalled with minint.
   __ cmpl(output_reg, Immediate(0x80000000));
   DeoptimizeIf(equal, instr->environment());
+  __ jmp(&done);
+
+  __ bind(&below_half);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Bailout if negative (including -0).
+    __ movq(output_reg, input_reg);
+    __ testq(output_reg, output_reg);
+    DeoptimizeIf(negative, instr->environment());
+  } else {
+    // Bailout if below -0.5, otherwise round to (positive) zero, even
+    // if negative.
+    // xmm_scrach = -0.5
+    __ movq(kScratchRegister, V8_INT64_C(0xBFE0000000000000), RelocInfo::NONE);
+    __ movq(xmm_scratch, kScratchRegister);
+    __ ucomisd(input_reg, xmm_scratch);
+    DeoptimizeIf(below, instr->environment());
+  }
+  __ xorl(output_reg, output_reg);
+
+  __ bind(&done);
 }
 
 
@@ -2767,7 +2954,7 @@
   XMMRegister xmm_scratch = xmm0;
   XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
   ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
-  __ xorpd(xmm_scratch, xmm_scratch);
+  __ xorps(xmm_scratch, xmm_scratch);
   __ addsd(input_reg, xmm_scratch);  // Convert -0 to +0.
   __ sqrtsd(input_reg, input_reg);
 }
@@ -2783,7 +2970,7 @@
   if (exponent_type.IsDouble()) {
     __ PrepareCallCFunction(2);
     // Move arguments to correct registers
-    __ movsd(xmm0, left_reg);
+    __ movaps(xmm0, left_reg);
     ASSERT(ToDoubleRegister(right).is(xmm1));
     __ CallCFunction(
         ExternalReference::power_double_double_function(isolate()), 2);
@@ -2791,7 +2978,7 @@
     __ PrepareCallCFunction(2);
     // Move arguments to correct registers: xmm0 and edi (not rdi).
     // On Windows, the registers are xmm0 and edx.
-    __ movsd(xmm0, left_reg);
+    __ movaps(xmm0, left_reg);
 #ifdef _WIN64
     ASSERT(ToRegister(right).is(rdx));
 #else
@@ -2817,13 +3004,13 @@
     __ bind(&call);
     __ PrepareCallCFunction(2);
     // Move arguments to correct registers xmm0 and xmm1.
-    __ movsd(xmm0, left_reg);
+    __ movaps(xmm0, left_reg);
     // Right argument is already in xmm1.
     __ CallCFunction(
         ExternalReference::power_double_double_function(isolate()), 2);
   }
   // Return value is in xmm0.
-  __ movsd(result_reg, xmm0);
+  __ movaps(result_reg, xmm0);
   // Restore context register.
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
 }
@@ -2886,6 +3073,21 @@
 }
 
 
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  ASSERT(ToRegister(instr->function()).is(rdi));
+  ASSERT(instr->HasPointerMap());
+  ASSERT(instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  LEnvironment* env = instr->deoptimization_environment();
+  RecordPosition(pointers->position());
+  RegisterEnvironmentForDeoptimization(env);
+  SafepointGenerator generator(this, pointers, env->deoptimization_index());
+  ParameterCount count(instr->arity());
+  __ InvokeFunction(rdi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+}
+
+
 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
   ASSERT(ToRegister(instr->key()).is(rcx));
   ASSERT(ToRegister(instr->result()).is(rax));
@@ -2902,10 +3104,11 @@
   ASSERT(ToRegister(instr->result()).is(rax));
 
   int arity = instr->arity();
-  Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
-      arity, NOT_IN_LOOP);
+  RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode);
   __ Move(rcx, instr->name());
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  CallCode(ic, mode, instr);
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
 }
 
@@ -2914,7 +3117,7 @@
   ASSERT(ToRegister(instr->result()).is(rax));
 
   int arity = instr->arity();
-  CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
+  CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_IMPLICIT);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   __ Drop(1);
@@ -2924,10 +3127,11 @@
 void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
   ASSERT(ToRegister(instr->result()).is(rax));
   int arity = instr->arity();
-  Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
-      arity, NOT_IN_LOOP);
+  RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
+  Handle<Code> ic =
+      isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode);
   __ Move(rcx, instr->name());
-  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+  CallCode(ic, mode, instr);
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
 }
 
@@ -2935,7 +3139,7 @@
 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
   ASSERT(ToRegister(instr->result()).is(rax));
   __ Move(rdi, instr->target());
-  CallKnownFunction(instr->target(), instr->arity(), instr);
+  CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
 }
 
 
@@ -2998,40 +3202,33 @@
 
 void LCodeGen::DoStoreKeyedSpecializedArrayElement(
     LStoreKeyedSpecializedArrayElement* instr) {
-  Register external_pointer = ToRegister(instr->external_pointer());
-  Register key = ToRegister(instr->key());
   ExternalArrayType array_type = instr->array_type();
+  Operand operand(BuildExternalArrayOperand(instr->external_pointer(),
+                                            instr->key(), array_type));
   if (array_type == kExternalFloatArray) {
     XMMRegister value(ToDoubleRegister(instr->value()));
     __ cvtsd2ss(value, value);
-    __ movss(Operand(external_pointer, key, times_4, 0), value);
+    __ movss(operand, value);
+  } else if (array_type == kExternalDoubleArray) {
+    __ movsd(operand, ToDoubleRegister(instr->value()));
   } else {
     Register value(ToRegister(instr->value()));
     switch (array_type) {
       case kExternalPixelArray:
-        {  // Clamp the value to [0..255].
-          NearLabel done;
-          __ testl(value, Immediate(0xFFFFFF00));
-          __ j(zero, &done);
-          __ setcc(negative, value);  // 1 if negative, 0 if positive.
-          __ decb(value);  // 0 if negative, 255 if positive.
-          __ bind(&done);
-          __ movb(Operand(external_pointer, key, times_1, 0), value);
-        }
-        break;
       case kExternalByteArray:
       case kExternalUnsignedByteArray:
-        __ movb(Operand(external_pointer, key, times_1, 0), value);
+        __ movb(operand, value);
         break;
       case kExternalShortArray:
       case kExternalUnsignedShortArray:
-        __ movw(Operand(external_pointer, key, times_2, 0), value);
+        __ movw(operand, value);
         break;
       case kExternalIntArray:
       case kExternalUnsignedIntArray:
-        __ movl(Operand(external_pointer, key, times_4, 0), value);
+        __ movl(operand, value);
         break;
       case kExternalFloatArray:
+      case kExternalDoubleArray:
         UNREACHABLE();
         break;
     }
@@ -3092,6 +3289,14 @@
 }
 
 
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+  EmitPushTaggedOperand(instr->left());
+  EmitPushTaggedOperand(instr->right());
+  StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
   class DeferredStringCharCodeAt: public LDeferredCode {
    public:
@@ -3126,7 +3331,7 @@
   DeferredStringCharCodeAt* deferred =
       new DeferredStringCharCodeAt(this, instr);
 
-  NearLabel flat_string, ascii_string, done;
+  Label flat_string, ascii_string, done;
 
   // Fetch the instance type of the receiver into result register.
   __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
@@ -3135,7 +3340,7 @@
   // We need special handling for non-sequential strings.
   STATIC_ASSERT(kSeqStringTag == 0);
   __ testb(result, Immediate(kStringRepresentationMask));
-  __ j(zero, &flat_string);
+  __ j(zero, &flat_string, Label::kNear);
 
   // Handle cons strings and go to deferred code for the rest.
   __ testb(result, Immediate(kIsConsStringMask));
@@ -3162,7 +3367,7 @@
   __ bind(&flat_string);
   STATIC_ASSERT(kAsciiStringTag != 0);
   __ testb(result, Immediate(kStringEncodingMask));
-  __ j(not_zero, &ascii_string);
+  __ j(not_zero, &ascii_string, Label::kNear);
 
   // Two-byte string.
   // Load the two-byte character code into the result register.
@@ -3178,7 +3383,7 @@
                                     times_2,
                                     SeqTwoByteString::kHeaderSize));
   }
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   // ASCII string.
   // Load the byte into the result register.
@@ -3369,10 +3574,10 @@
                                 XMMRegister result_reg,
                                 bool deoptimize_on_undefined,
                                 LEnvironment* env) {
-  NearLabel load_smi, done;
+  Label load_smi, done;
 
   // Smi check.
-  __ JumpIfSmi(input_reg, &load_smi);
+  __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
 
   // Heap number map check.
   __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
@@ -3380,21 +3585,22 @@
   if (deoptimize_on_undefined) {
     DeoptimizeIf(not_equal, env);
   } else {
-    NearLabel heap_number;
-    __ j(equal, &heap_number);
+    Label heap_number;
+    __ j(equal, &heap_number, Label::kNear);
+
     __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
     DeoptimizeIf(not_equal, env);
 
     // Convert undefined to NaN. Compute NaN as 0/0.
-    __ xorpd(result_reg, result_reg);
+    __ xorps(result_reg, result_reg);
     __ divsd(result_reg, result_reg);
-    __ jmp(&done);
+    __ jmp(&done, Label::kNear);
 
     __ bind(&heap_number);
   }
   // Heap number to XMM conversion.
   __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   // Smi to XMM conversion
   __ bind(&load_smi);
@@ -3415,7 +3621,7 @@
 
 
 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
-  NearLabel done, heap_number;
+  Label done, heap_number;
   Register input_reg = ToRegister(instr->InputAt(0));
 
   // Heap number map check.
@@ -3423,13 +3629,13 @@
                  Heap::kHeapNumberMapRootIndex);
 
   if (instr->truncating()) {
-    __ j(equal, &heap_number);
+    __ j(equal, &heap_number, Label::kNear);
     // Check for undefined. Undefined is converted to zero for truncating
     // conversions.
     __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
     DeoptimizeIf(not_equal, instr->environment());
     __ Set(input_reg, 0);
-    __ jmp(&done);
+    __ jmp(&done, Label::kNear);
 
     __ bind(&heap_number);
 
@@ -3504,7 +3710,7 @@
     __ cvttsd2siq(result_reg, input_reg);
     __ movq(kScratchRegister, V8_INT64_C(0x8000000000000000), RelocInfo::NONE);
     __ cmpq(result_reg, kScratchRegister);
-      DeoptimizeIf(equal, instr->environment());
+    DeoptimizeIf(equal, instr->environment());
   } else {
     __ cvttsd2si(result_reg, input_reg);
     __ cvtlsi2sd(xmm0, result_reg);
@@ -3512,11 +3718,11 @@
     DeoptimizeIf(not_equal, instr->environment());
     DeoptimizeIf(parity_even, instr->environment());  // NaN.
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      NearLabel done;
+      Label done;
       // The integer converted back is equal to the original. We
       // only have to test if we got -0 as an input.
       __ testl(result_reg, result_reg);
-      __ j(not_zero, &done);
+      __ j(not_zero, &done, Label::kNear);
       __ movmskpd(result_reg, input_reg);
       // Bit 0 contains the sign of the double in input_reg.
       // If input was positive, we are ok and return 0, otherwise
@@ -3545,30 +3751,45 @@
 
 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
   Register input = ToRegister(instr->InputAt(0));
-  InstanceType first = instr->hydrogen()->first();
-  InstanceType last = instr->hydrogen()->last();
 
   __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
 
-  // If there is only one type in the interval check for equality.
-  if (first == last) {
+  if (instr->hydrogen()->is_interval_check()) {
+    InstanceType first;
+    InstanceType last;
+    instr->hydrogen()->GetCheckInterval(&first, &last);
+
     __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
             Immediate(static_cast<int8_t>(first)));
-    DeoptimizeIf(not_equal, instr->environment());
-  } else if (first == FIRST_STRING_TYPE && last == LAST_STRING_TYPE) {
-    // String has a dedicated bit in instance type.
-    __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
-             Immediate(kIsNotStringMask));
-    DeoptimizeIf(not_zero, instr->environment());
+
+    // If there is only one type in the interval check for equality.
+    if (first == last) {
+      DeoptimizeIf(not_equal, instr->environment());
+    } else {
+      DeoptimizeIf(below, instr->environment());
+      // Omit check for the last type.
+      if (last != LAST_TYPE) {
+        __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+                Immediate(static_cast<int8_t>(last)));
+        DeoptimizeIf(above, instr->environment());
+      }
+    }
   } else {
-    __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
-            Immediate(static_cast<int8_t>(first)));
-    DeoptimizeIf(below, instr->environment());
-    // Omit check for the last type.
-    if (last != LAST_TYPE) {
-      __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
-              Immediate(static_cast<int8_t>(last)));
-      DeoptimizeIf(above, instr->environment());
+    uint8_t mask;
+    uint8_t tag;
+    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+    if (IsPowerOf2(mask)) {
+      ASSERT(tag == 0 || IsPowerOf2(tag));
+      __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+               Immediate(mask));
+      DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
+    } else {
+      __ movzxbl(kScratchRegister,
+                 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
+      __ andb(kScratchRegister, Immediate(mask));
+      __ cmpb(kScratchRegister, Immediate(tag));
+      DeoptimizeIf(not_equal, instr->environment());
     }
   }
 }
@@ -3592,6 +3813,57 @@
 }
 
 
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+  XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  Register temp_reg = ToRegister(instr->TempAt(0));
+  __ ClampDoubleToUint8(value_reg, xmm0, result_reg, temp_reg);
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+  ASSERT(instr->unclamped()->Equals(instr->result()));
+  Register value_reg = ToRegister(instr->result());
+  __ ClampUint8(value_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+  ASSERT(instr->unclamped()->Equals(instr->result()));
+  Register input_reg = ToRegister(instr->unclamped());
+  Register temp_reg = ToRegister(instr->TempAt(0));
+  XMMRegister temp_xmm_reg = ToDoubleRegister(instr->TempAt(1));
+  Label is_smi, done, heap_number;
+
+  __ JumpIfSmi(input_reg, &is_smi);
+
+  // Check for heap number
+  __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+         factory()->heap_number_map());
+  __ j(equal, &heap_number, Label::kNear);
+
+  // Check for undefined. Undefined is converted to zero for clamping
+  // conversions.
+  __ Cmp(input_reg, factory()->undefined_value());
+  DeoptimizeIf(not_equal, instr->environment());
+  __ movq(input_reg, Immediate(0));
+  __ jmp(&done, Label::kNear);
+
+  // Heap number
+  __ bind(&heap_number);
+  __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+  __ ClampDoubleToUint8(xmm0, temp_xmm_reg, input_reg, temp_reg);
+  __ jmp(&done, Label::kNear);
+
+  // smi
+  __ bind(&is_smi);
+  __ SmiToInteger32(input_reg, input_reg);
+  __ ClampUint8(input_reg);
+
+  __ bind(&done);
+}
+
+
 void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
   if (heap()->InNewSpace(*object)) {
     Handle<JSGlobalPropertyCell> cell =
@@ -3684,7 +3956,7 @@
 
 
 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
-  NearLabel materialized;
+  Label materialized;
   // Registers will be used as follows:
   // rdi = JS function.
   // rcx = literals array.
@@ -3696,7 +3968,7 @@
       instr->hydrogen()->literal_index() * kPointerSize;
   __ movq(rbx, FieldOperand(rcx, literal_offset));
   __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
-  __ j(not_equal, &materialized);
+  __ j(not_equal, &materialized, Label::kNear);
 
   // Create regexp literal using runtime function
   // Result will be in rax.
@@ -3758,14 +4030,7 @@
 
 void LCodeGen::DoTypeof(LTypeof* instr) {
   LOperand* input = instr->InputAt(0);
-  if (input->IsConstantOperand()) {
-    __ Push(ToHandle(LConstantOperand::cast(input)));
-  } else if (input->IsRegister()) {
-    __ push(ToRegister(input));
-  } else {
-    ASSERT(input->IsStackSlot());
-    __ push(ToOperand(input));
-  }
+  EmitPushTaggedOperand(input);
   CallRuntime(Runtime::kTypeof, 1, instr);
 }
 
@@ -3775,7 +4040,7 @@
   Register result = ToRegister(instr->result());
   Label true_label;
   Label false_label;
-  NearLabel done;
+  Label done;
 
   Condition final_branch_condition = EmitTypeofIs(&true_label,
                                                   &false_label,
@@ -3784,7 +4049,7 @@
   __ j(final_branch_condition, &true_label);
   __ bind(&false_label);
   __ LoadRoot(result, Heap::kFalseValueRootIndex);
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   __ bind(&true_label);
   __ LoadRoot(result, Heap::kTrueValueRootIndex);
@@ -3793,19 +4058,14 @@
 }
 
 
-void LCodeGen::EmitPushConstantOperand(LOperand* operand) {
-  ASSERT(operand->IsConstantOperand());
-  LConstantOperand* const_op = LConstantOperand::cast(operand);
-  Handle<Object> literal = chunk_->LookupLiteral(const_op);
-  Representation r = chunk_->LookupLiteralRepresentation(const_op);
-  if (r.IsInteger32()) {
-    ASSERT(literal->IsNumber());
-    __ push(Immediate(static_cast<int32_t>(literal->Number())));
-  } else if (r.IsDouble()) {
-    Abort("unsupported double immediate");
+void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
+  ASSERT(!operand->IsDoubleRegister());
+  if (operand->IsConstantOperand()) {
+    __ Push(ToHandle(LConstantOperand::cast(operand)));
+  } else if (operand->IsRegister()) {
+    __ push(ToRegister(operand));
   } else {
-    ASSERT(r.IsTagged());
-    __ Push(literal);
+    __ push(ToOperand(operand));
   }
 }
 
@@ -3891,15 +4151,14 @@
 
 void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
   Register result = ToRegister(instr->result());
-  NearLabel true_label;
-  NearLabel false_label;
-  NearLabel done;
+  Label true_label;
+  Label done;
 
   EmitIsConstructCall(result);
-  __ j(equal, &true_label);
+  __ j(equal, &true_label, Label::kNear);
 
   __ LoadRoot(result, Heap::kFalseValueRootIndex);
-  __ jmp(&done);
+  __ jmp(&done, Label::kNear);
 
   __ bind(&true_label);
   __ LoadRoot(result, Heap::kTrueValueRootIndex);
@@ -3924,10 +4183,10 @@
   __ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
 
   // Skip the arguments adaptor frame if it exists.
-  NearLabel check_frame_marker;
+  Label check_frame_marker;
   __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
          Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ j(not_equal, &check_frame_marker);
+  __ j(not_equal, &check_frame_marker, Label::kNear);
   __ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset));
 
   // Check the marker in the calling frame.
@@ -3951,20 +4210,8 @@
 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
   LOperand* obj = instr->object();
   LOperand* key = instr->key();
-  // Push object.
-  if (obj->IsRegister()) {
-    __ push(ToRegister(obj));
-  } else {
-    __ push(ToOperand(obj));
-  }
-  // Push key.
-  if (key->IsConstantOperand()) {
-    EmitPushConstantOperand(key);
-  } else if (key->IsRegister()) {
-    __ push(ToRegister(key));
-  } else {
-    __ push(ToOperand(key));
-  }
+  EmitPushTaggedOperand(obj);
+  EmitPushTaggedOperand(key);
   ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
   LPointerMap* pointers = instr->pointer_map();
   LEnvironment* env = instr->deoptimization_environment();
@@ -3977,15 +4224,35 @@
                                          pointers,
                                          env->deoptimization_index());
   __ Push(Smi::FromInt(strict_mode_flag()));
-  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator);
+  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoIn(LIn* instr) {
+  LOperand* obj = instr->object();
+  LOperand* key = instr->key();
+  EmitPushTaggedOperand(key);
+  EmitPushTaggedOperand(obj);
+  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  LEnvironment* env = instr->deoptimization_environment();
+  RecordPosition(pointers->position());
+  RegisterEnvironmentForDeoptimization(env);
+  // Create safepoint generator that will also ensure enough space in the
+  // reloc info for patching in deoptimization (since this is invoking a
+  // builtin)
+  SafepointGenerator safepoint_generator(this,
+                                         pointers,
+                                         env->deoptimization_index());
+  __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
 }
 
 
 void LCodeGen::DoStackCheck(LStackCheck* instr) {
   // Perform stack overflow check.
-  NearLabel done;
+  Label done;
   __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
-  __ j(above_equal, &done);
+  __ j(above_equal, &done, Label::kNear);
 
   StackCheckStub stub;
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index d95ab21..7c9f2a0 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -102,6 +102,7 @@
 
   // Parallel move support.
   void DoParallelMove(LParallelMove* move);
+  void DoGap(LGap* instr);
 
   // Emit frame translation commands for an environment.
   void WriteTranslation(LEnvironment* environment, Translation* translation);
@@ -141,8 +142,8 @@
                        Register input,
                        Register temporary);
 
-  int StackSlotCount() const { return chunk()->spill_slot_count(); }
-  int ParameterCount() const { return scope()->num_parameters(); }
+  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+  int GetParameterCount() const { return scope()->num_parameters(); }
 
   void Abort(const char* format, ...);
   void Comment(const char* format, ...);
@@ -193,7 +194,8 @@
   // to be in edi.
   void CallKnownFunction(Handle<JSFunction> function,
                          int arity,
-                         LInstruction* instr);
+                         LInstruction* instr,
+                         CallKind call_kind);
 
   void LoadHeapObject(Register result, Handle<HeapObject> object);
 
@@ -213,6 +215,9 @@
 
   Register ToRegister(int index) const;
   XMMRegister ToDoubleRegister(int index) const;
+  Operand BuildExternalArrayOperand(LOperand* external_pointer,
+                                    LOperand* key,
+                                    ExternalArrayType array_type);
 
   // Specific math operations - used from DoUnaryMathOperation.
   void EmitIntegerMathAbs(LUnaryMathOperation* instr);
@@ -266,13 +271,14 @@
   // Caller should branch on equal condition.
   void EmitIsConstructCall(Register temp);
 
-  void EmitLoadField(Register result,
-                     Register object,
-                     Handle<Map> type,
-                     Handle<String> name);
+  void EmitLoadFieldOrConstantFunction(Register result,
+                                       Register object,
+                                       Handle<Map> type,
+                                       Handle<String> name);
 
-  // Emits code for pushing a constant operand.
-  void EmitPushConstantOperand(LOperand* operand);
+  // Emits code for pushing either a tagged constant, a (non-double)
+  // register, or a stack slot operand.
+  void EmitPushTaggedOperand(LOperand* operand);
 
   struct JumpTableEntry {
     explicit inline JumpTableEntry(Address entry)
diff --git a/src/x64/lithium-gap-resolver-x64.cc b/src/x64/lithium-gap-resolver-x64.cc
index cedd025..c3c617c 100644
--- a/src/x64/lithium-gap-resolver-x64.cc
+++ b/src/x64/lithium-gap-resolver-x64.cc
@@ -214,7 +214,7 @@
   } else if (source->IsDoubleRegister()) {
     XMMRegister src = cgen_->ToDoubleRegister(source);
     if (destination->IsDoubleRegister()) {
-      __ movsd(cgen_->ToDoubleRegister(destination), src);
+      __ movaps(cgen_->ToDoubleRegister(destination), src);
     } else {
       ASSERT(destination->IsDoubleStackSlot());
       __ movsd(cgen_->ToOperand(destination), src);
@@ -273,9 +273,9 @@
     // Swap two double registers.
     XMMRegister source_reg = cgen_->ToDoubleRegister(source);
     XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
-    __ movsd(xmm0, source_reg);
-    __ movsd(source_reg, destination_reg);
-    __ movsd(destination_reg, xmm0);
+    __ movaps(xmm0, source_reg);
+    __ movaps(source_reg, destination_reg);
+    __ movaps(destination_reg, xmm0);
 
   } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
     // Swap a double register and a double stack slot.
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 4601cd9..569fa3e 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -71,22 +71,21 @@
 
 #ifdef DEBUG
 void LInstruction::VerifyCall() {
-  // Call instructions can use only fixed registers as
-  // temporaries and outputs because all registers
-  // are blocked by the calling convention.
-  // Inputs must use a fixed register.
+  // Call instructions can use only fixed registers as temporaries and
+  // outputs because all registers are blocked by the calling convention.
+  // Inputs operands must use a fixed register or use-at-start policy or
+  // a non-register policy.
   ASSERT(Output() == NULL ||
          LUnallocated::cast(Output())->HasFixedPolicy() ||
          !LUnallocated::cast(Output())->HasRegisterPolicy());
   for (UseIterator it(this); it.HasNext(); it.Advance()) {
-    LOperand* operand = it.Next();
-    ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
-           !LUnallocated::cast(operand)->HasRegisterPolicy());
+    LUnallocated* operand = LUnallocated::cast(it.Next());
+    ASSERT(operand->HasFixedPolicy() ||
+           operand->IsUsedAtStart());
   }
   for (TempIterator it(this); it.HasNext(); it.Advance()) {
-    LOperand* operand = it.Next();
-    ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
-           !LUnallocated::cast(operand)->HasRegisterPolicy());
+    LUnallocated* operand = LUnallocated::cast(it.Next());
+    ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
   }
 }
 #endif
@@ -240,6 +239,13 @@
 }
 
 
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_undetectable(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
 void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if has_instance_type(");
   InputAt(0)->PrintTo(stream);
@@ -303,6 +309,13 @@
 }
 
 
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
 void LCallKeyed::PrintDataTo(StringStream* stream) {
   stream->Add("[rcx] #%d / ", arity());
 }
@@ -442,7 +455,7 @@
 
 
 void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
-  LGap* gap = new LGap(block);
+  LInstructionGap* gap = new LInstructionGap(block);
   int index = -1;
   if (instr->IsControl()) {
     instructions_.Add(gap);
@@ -845,24 +858,22 @@
     right = UseFixed(right_value, rcx);
   }
 
-  // Shift operations can only deoptimize if we do a logical shift
-  // by 0 and the result cannot be truncated to int32.
-  bool can_deopt = (op == Token::SHR && constant_value == 0);
-  if (can_deopt) {
-    bool can_truncate = true;
-    for (int i = 0; i < instr->uses()->length(); i++) {
-      if (!instr->uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) {
-        can_truncate = false;
+  // Shift operations can only deoptimize if we do a logical shift by 0 and
+  // the result cannot be truncated to int32.
+  bool may_deopt = (op == Token::SHR && constant_value == 0);
+  bool does_deopt = false;
+  if (may_deopt) {
+    for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+      if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
+        does_deopt = true;
         break;
       }
     }
-    can_deopt = !can_truncate;
   }
 
-  LShiftI* result = new LShiftI(op, left, right, can_deopt);
-  return can_deopt
-      ? AssignEnvironment(DefineSameAsFirst(result))
-      : DefineSameAsFirst(result);
+  LInstruction* result =
+      DefineSameAsFirst(new LShiftI(op, left, right, does_deopt));
+  return does_deopt ? AssignEnvironment(result) : result;
 }
 
 
@@ -1006,6 +1017,8 @@
                                           outer);
   int argument_index = 0;
   for (int i = 0; i < value_count; ++i) {
+    if (hydrogen_env->is_special_index(i)) continue;
+
     HValue* value = hydrogen_env->values()->at(i);
     LOperand* op = NULL;
     if (value->IsArgumentsObject()) {
@@ -1033,98 +1046,94 @@
 
 LInstruction* LChunkBuilder::DoTest(HTest* instr) {
   HValue* v = instr->value();
-  if (v->EmitAtUses()) {
-    if (v->IsClassOfTest()) {
-      HClassOfTest* compare = HClassOfTest::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
-                                       TempRegister());
-    } else if (v->IsCompare()) {
-      HCompare* compare = HCompare::cast(v);
-      Token::Value op = compare->token();
-      HValue* left = compare->left();
-      HValue* right = compare->right();
-      Representation r = compare->GetInputRepresentation();
-      if (r.IsInteger32()) {
-        ASSERT(left->representation().IsInteger32());
-        ASSERT(right->representation().IsInteger32());
-
-        return new LCmpIDAndBranch(UseRegisterAtStart(left),
-                                   UseOrConstantAtStart(right));
-      } else if (r.IsDouble()) {
-        ASSERT(left->representation().IsDouble());
-        ASSERT(right->representation().IsDouble());
-
-        return new LCmpIDAndBranch(UseRegisterAtStart(left),
-                                   UseRegisterAtStart(right));
-      } else {
-        ASSERT(left->representation().IsTagged());
-        ASSERT(right->representation().IsTagged());
-        bool reversed = op == Token::GT || op == Token::LTE;
-        LOperand* left_operand = UseFixed(left, reversed ? rax : rdx);
-        LOperand* right_operand = UseFixed(right, reversed ? rdx : rax);
-        LCmpTAndBranch* result = new LCmpTAndBranch(left_operand,
-                                                    right_operand);
-        return MarkAsCall(result, instr);
-      }
-    } else if (v->IsIsSmi()) {
-      HIsSmi* compare = HIsSmi::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      return new LIsSmiAndBranch(Use(compare->value()));
-    } else if (v->IsHasInstanceType()) {
-      HHasInstanceType* compare = HHasInstanceType::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      return new LHasInstanceTypeAndBranch(
-          UseRegisterAtStart(compare->value()));
-    } else if (v->IsHasCachedArrayIndex()) {
-      HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      return new LHasCachedArrayIndexAndBranch(
-          UseRegisterAtStart(compare->value()));
-    } else if (v->IsIsNull()) {
-      HIsNull* compare = HIsNull::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-
-      // We only need a temp register for non-strict compare.
-      LOperand* temp = compare->is_strict() ? NULL : TempRegister();
-      return new LIsNullAndBranch(UseRegisterAtStart(compare->value()),
-                                  temp);
-    } else if (v->IsIsObject()) {
-      HIsObject* compare = HIsObject::cast(v);
-      ASSERT(compare->value()->representation().IsTagged());
-      return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()));
-    } else if (v->IsCompareJSObjectEq()) {
-      HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
-      return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
-                                         UseRegisterAtStart(compare->right()));
-    } else if (v->IsInstanceOf()) {
-      HInstanceOf* instance_of = HInstanceOf::cast(v);
-      LInstanceOfAndBranch* result =
-          new LInstanceOfAndBranch(UseFixed(instance_of->left(), rax),
-                                   UseFixed(instance_of->right(), rdx));
-      return MarkAsCall(result, instr);
-    } else if (v->IsTypeofIs()) {
-      HTypeofIs* typeof_is = HTypeofIs::cast(v);
-      return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
-    } else if (v->IsIsConstructCall()) {
-      return new LIsConstructCallAndBranch(TempRegister());
+  if (!v->EmitAtUses()) {
+    return new LBranch(UseRegisterAtStart(v));
+  } else if (v->IsClassOfTest()) {
+    HClassOfTest* compare = HClassOfTest::cast(v);
+    ASSERT(compare->value()->representation().IsTagged());
+    return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
+                                     TempRegister());
+  } else if (v->IsCompare()) {
+    HCompare* compare = HCompare::cast(v);
+    Token::Value op = compare->token();
+    HValue* left = compare->left();
+    HValue* right = compare->right();
+    Representation r = compare->GetInputRepresentation();
+    if (r.IsInteger32()) {
+      ASSERT(left->representation().IsInteger32());
+      ASSERT(right->representation().IsInteger32());
+      return new LCmpIDAndBranch(UseRegisterAtStart(left),
+                                 UseOrConstantAtStart(right));
+    } else if (r.IsDouble()) {
+      ASSERT(left->representation().IsDouble());
+      ASSERT(right->representation().IsDouble());
+      return new LCmpIDAndBranch(UseRegisterAtStart(left),
+                                 UseRegisterAtStart(right));
     } else {
-      if (v->IsConstant()) {
-        if (HConstant::cast(v)->ToBoolean()) {
-          return new LGoto(instr->FirstSuccessor()->block_id());
-        } else {
-          return new LGoto(instr->SecondSuccessor()->block_id());
-        }
-      }
-      Abort("Undefined compare before branch");
-      return NULL;
+      ASSERT(left->representation().IsTagged());
+      ASSERT(right->representation().IsTagged());
+      bool reversed = op == Token::GT || op == Token::LTE;
+      LOperand* left_operand = UseFixed(left, reversed ? rax : rdx);
+      LOperand* right_operand = UseFixed(right, reversed ? rdx : rax);
+      LCmpTAndBranch* result = new LCmpTAndBranch(left_operand, right_operand);
+      return MarkAsCall(result, instr);
     }
+  } else if (v->IsIsSmi()) {
+    HIsSmi* compare = HIsSmi::cast(v);
+    ASSERT(compare->value()->representation().IsTagged());
+    return new LIsSmiAndBranch(Use(compare->value()));
+  } else if (v->IsIsUndetectable()) {
+    HIsUndetectable* compare = HIsUndetectable::cast(v);
+    ASSERT(compare->value()->representation().IsTagged());
+    return new LIsUndetectableAndBranch(UseRegisterAtStart(compare->value()),
+                                        TempRegister());
+  } else if (v->IsHasInstanceType()) {
+    HHasInstanceType* compare = HHasInstanceType::cast(v);
+    ASSERT(compare->value()->representation().IsTagged());
+    return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()));
+  } else if (v->IsHasCachedArrayIndex()) {
+    HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
+    ASSERT(compare->value()->representation().IsTagged());
+    return new LHasCachedArrayIndexAndBranch(
+        UseRegisterAtStart(compare->value()));
+  } else if (v->IsIsNull()) {
+    HIsNull* compare = HIsNull::cast(v);
+    ASSERT(compare->value()->representation().IsTagged());
+    // We only need a temp register for non-strict compare.
+    LOperand* temp = compare->is_strict() ? NULL : TempRegister();
+    return new LIsNullAndBranch(UseRegisterAtStart(compare->value()), temp);
+  } else if (v->IsIsObject()) {
+    HIsObject* compare = HIsObject::cast(v);
+    ASSERT(compare->value()->representation().IsTagged());
+    return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()));
+  } else if (v->IsCompareJSObjectEq()) {
+    HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
+    return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
+                                       UseRegisterAtStart(compare->right()));
+  } else if (v->IsCompareSymbolEq()) {
+    HCompareSymbolEq* compare = HCompareSymbolEq::cast(v);
+    return new LCmpSymbolEqAndBranch(UseRegisterAtStart(compare->left()),
+                                     UseRegisterAtStart(compare->right()));
+  } else if (v->IsInstanceOf()) {
+    HInstanceOf* instance_of = HInstanceOf::cast(v);
+    LInstanceOfAndBranch* result =
+        new LInstanceOfAndBranch(UseFixed(instance_of->left(), rax),
+                                 UseFixed(instance_of->right(), rdx));
+    return MarkAsCall(result, instr);
+  } else if (v->IsTypeofIs()) {
+    HTypeofIs* typeof_is = HTypeofIs::cast(v);
+    return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
+  } else if (v->IsIsConstructCall()) {
+    return new LIsConstructCallAndBranch(TempRegister());
+  } else if (v->IsConstant()) {
+    HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
+        ? instr->FirstSuccessor()
+        : instr->SecondSuccessor();
+    return new LGoto(successor->block_id());
+  } else {
+    Abort("Undefined compare before branch");
+    return NULL;
   }
-  return new LBranch(UseRegisterAtStart(v));
 }
 
 
@@ -1183,7 +1192,7 @@
 
 
 LInstruction* LChunkBuilder::DoContext(HContext* instr) {
-  return DefineAsRegister(new LContext);
+  return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
 }
 
 
@@ -1211,6 +1220,14 @@
 }
 
 
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+  LOperand* function = UseFixed(instr->function(), rdi);
+  argument_count_ -= instr->argument_count();
+  LInvokeFunction* result = new LInvokeFunction(function);
+  return MarkAsCall(DefineFixed(result, rax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
 LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
   BuiltinFunctionId op = instr->op();
   if (op == kMathLog || op == kMathSin || op == kMathCos) {
@@ -1500,6 +1517,15 @@
 }
 
 
+LInstruction* LChunkBuilder::DoCompareSymbolEq(
+    HCompareSymbolEq* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  LCmpSymbolEq* result = new LCmpSymbolEq(left, right);
+  return DefineAsRegister(result);
+}
+
+
 LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
@@ -1524,6 +1550,14 @@
 }
 
 
+LInstruction* LChunkBuilder::DoIsUndetectable(HIsUndetectable* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+
+  return DefineAsRegister(new LIsUndetectable(value));
+}
+
+
 LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
@@ -1600,6 +1634,14 @@
 }
 
 
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+  // All HForceRepresentation instructions should be eliminated in the
+  // representation change phase of Hydrogen.
+  UNREACHABLE();
+  return NULL;
+}
+
+
 LInstruction* LChunkBuilder::DoChange(HChange* instr) {
   Representation from = instr->from();
   Representation to = instr->to();
@@ -1694,6 +1736,27 @@
 }
 
 
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+  LOperand* reg = UseRegister(value);
+  if (input_rep.IsDouble()) {
+    return DefineAsRegister(new LClampDToUint8(reg,
+                                               TempRegister()));
+  } else if (input_rep.IsInteger32()) {
+    return DefineSameAsFirst(new LClampIToUint8(reg));
+  } else {
+    ASSERT(input_rep.IsTagged());
+    // Register allocator doesn't (yet) support allocation of double
+    // temps. Reserve xmm1 explicitly.
+    LClampTToUint8* result = new LClampTToUint8(reg,
+                                                TempRegister(),
+                                                FixedTemp(xmm1));
+    return AssignEnvironment(DefineSameAsFirst(result));
+  }
+}
+
+
 LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
   return new LReturn(UseFixed(instr->value(), rax));
 }
@@ -1832,11 +1895,14 @@
     HLoadKeyedSpecializedArrayElement* instr) {
   ExternalArrayType array_type = instr->array_type();
   Representation representation(instr->representation());
-  ASSERT((representation.IsInteger32() && array_type != kExternalFloatArray) ||
-         (representation.IsDouble() && array_type == kExternalFloatArray));
+  ASSERT(
+      (representation.IsInteger32() && (array_type != kExternalFloatArray &&
+                                        array_type != kExternalDoubleArray)) ||
+      (representation.IsDouble() && (array_type == kExternalFloatArray ||
+                                     array_type == kExternalDoubleArray)));
   ASSERT(instr->key()->representation().IsInteger32());
   LOperand* external_pointer = UseRegister(instr->external_pointer());
-  LOperand* key = UseRegister(instr->key());
+  LOperand* key = UseRegisterOrConstant(instr->key());
   LLoadKeyedSpecializedArrayElement* result =
       new LLoadKeyedSpecializedArrayElement(external_pointer, key);
   LInstruction* load_instr = DefineAsRegister(result);
@@ -1879,8 +1945,11 @@
     HStoreKeyedSpecializedArrayElement* instr) {
   Representation representation(instr->value()->representation());
   ExternalArrayType array_type = instr->array_type();
-  ASSERT((representation.IsInteger32() && array_type != kExternalFloatArray) ||
-         (representation.IsDouble() && array_type == kExternalFloatArray));
+  ASSERT(
+      (representation.IsInteger32() && (array_type != kExternalFloatArray &&
+                                        array_type != kExternalDoubleArray)) ||
+      (representation.IsDouble() && (array_type == kExternalFloatArray ||
+                                     array_type == kExternalDoubleArray)));
   ASSERT(instr->external_pointer()->representation().IsExternal());
   ASSERT(instr->key()->representation().IsInteger32());
 
@@ -1890,7 +1959,7 @@
   LOperand* val = val_is_temp_register
       ? UseTempRegister(instr->value())
       : UseRegister(instr->value());
-  LOperand* key = UseRegister(instr->key());
+  LOperand* key = UseRegisterOrConstant(instr->key());
 
   return new LStoreKeyedSpecializedArrayElement(external_pointer,
                                                 key,
@@ -1941,6 +2010,13 @@
 }
 
 
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+  LOperand* left = UseOrConstantAtStart(instr->left());
+  LOperand* right = UseOrConstantAtStart(instr->right());
+  return MarkAsCall(DefineFixed(new LStringAdd(left, right), rax), instr);
+}
+
+
 LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
   LOperand* string = UseRegister(instr->string());
   LOperand* index = UseRegisterOrConstant(instr->index());
@@ -1984,7 +2060,8 @@
 
 LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
   LDeleteProperty* result =
-      new LDeleteProperty(Use(instr->object()), UseOrConstant(instr->key()));
+      new LDeleteProperty(UseAtStart(instr->object()),
+                          UseOrConstantAtStart(instr->key()));
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
@@ -2100,8 +2177,9 @@
   HConstant* undefined = graph()->GetConstantUndefined();
   HEnvironment* inner = outer->CopyForInlining(instr->closure(),
                                                instr->function(),
-                                               false,
-                                               undefined);
+                                               HEnvironment::LITHIUM,
+                                               undefined,
+                                               instr->call_kind());
   current_block_->UpdateEnvironment(inner);
   chunk_->AddInlinedClosure(instr->closure());
   return NULL;
@@ -2114,6 +2192,15 @@
   return NULL;
 }
 
+
+LInstruction* LChunkBuilder::DoIn(HIn* instr) {
+  LOperand* key = UseOrConstantAtStart(instr->key());
+  LOperand* object = UseOrConstantAtStart(instr->object());
+  LIn* result = new LIn(key, object);
+  return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index 8e12282..1493836 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -73,6 +73,9 @@
   V(CheckNonSmi)                                \
   V(CheckPrototypeMaps)                         \
   V(CheckSmi)                                   \
+  V(ClampDToUint8)                              \
+  V(ClampIToUint8)                              \
+  V(ClampTToUint8)                              \
   V(ClassOfTest)                                \
   V(ClassOfTestAndBranch)                       \
   V(CmpID)                                      \
@@ -80,6 +83,8 @@
   V(CmpJSObjectEq)                              \
   V(CmpJSObjectEqAndBranch)                     \
   V(CmpMapAndBranch)                            \
+  V(CmpSymbolEq)                                \
+  V(CmpSymbolEqAndBranch)                       \
   V(CmpT)                                       \
   V(CmpTAndBranch)                              \
   V(ConstantD)                                  \
@@ -93,31 +98,38 @@
   V(ExternalArrayLength)                        \
   V(FixedArrayLength)                           \
   V(FunctionLiteral)                            \
-  V(Gap)                                        \
   V(GetCachedArrayIndex)                        \
   V(GlobalObject)                               \
   V(GlobalReceiver)                             \
   V(Goto)                                       \
-  V(HasInstanceType)                            \
-  V(HasInstanceTypeAndBranch)                   \
   V(HasCachedArrayIndex)                        \
   V(HasCachedArrayIndexAndBranch)               \
+  V(HasInstanceType)                            \
+  V(HasInstanceTypeAndBranch)                   \
+  V(In)                                         \
   V(InstanceOf)                                 \
   V(InstanceOfAndBranch)                        \
   V(InstanceOfKnownGlobal)                      \
+  V(InstructionGap)                             \
   V(Integer32ToDouble)                          \
+  V(InvokeFunction)                             \
+  V(IsConstructCall)                            \
+  V(IsConstructCallAndBranch)                   \
   V(IsNull)                                     \
   V(IsNullAndBranch)                            \
   V(IsObject)                                   \
   V(IsObjectAndBranch)                          \
   V(IsSmi)                                      \
   V(IsSmiAndBranch)                             \
+  V(IsUndetectable)                             \
+  V(IsUndetectableAndBranch)                    \
   V(JSArrayLength)                              \
   V(Label)                                      \
   V(LazyBailout)                                \
   V(LoadContextSlot)                            \
   V(LoadElements)                               \
   V(LoadExternalArrayPointer)                   \
+  V(LoadFunctionPrototype)                      \
   V(LoadGlobalCell)                             \
   V(LoadGlobalGeneric)                          \
   V(LoadKeyedFastElement)                       \
@@ -126,7 +138,6 @@
   V(LoadNamedField)                             \
   V(LoadNamedFieldPolymorphic)                  \
   V(LoadNamedGeneric)                           \
-  V(LoadFunctionPrototype)                      \
   V(ModI)                                       \
   V(MulI)                                       \
   V(NumberTagD)                                 \
@@ -152,37 +163,32 @@
   V(StoreKeyedSpecializedArrayElement)          \
   V(StoreNamedField)                            \
   V(StoreNamedGeneric)                          \
+  V(StringAdd)                                  \
   V(StringCharCodeAt)                           \
   V(StringCharFromCode)                         \
   V(StringLength)                               \
   V(SubI)                                       \
   V(TaggedToI)                                  \
-  V(ToFastProperties)                           \
   V(Throw)                                      \
+  V(ToFastProperties)                           \
   V(Typeof)                                     \
   V(TypeofIs)                                   \
   V(TypeofIsAndBranch)                          \
-  V(IsConstructCall)                            \
-  V(IsConstructCallAndBranch)                   \
   V(UnaryMathOperation)                         \
   V(UnknownOSRValue)                            \
   V(ValueOf)
 
 
-#define DECLARE_INSTRUCTION(type)                \
-  virtual bool Is##type() const { return true; } \
-  static L##type* cast(LInstruction* instr) {    \
-    ASSERT(instr->Is##type());                   \
-    return reinterpret_cast<L##type*>(instr);    \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
+  virtual Opcode opcode() const { return LInstruction::k##type; } \
+  virtual void CompileToNative(LCodeGen* generator);              \
+  virtual const char* Mnemonic() const { return mnemonic; }       \
+  static L##type* cast(LInstruction* instr) {                     \
+    ASSERT(instr->Is##type());                                    \
+    return reinterpret_cast<L##type*>(instr);                     \
   }
 
 
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)        \
-  virtual void CompileToNative(LCodeGen* generator);        \
-  virtual const char* Mnemonic() const { return mnemonic; } \
-  DECLARE_INSTRUCTION(type)
-
-
 #define DECLARE_HYDROGEN_ACCESSOR(type)     \
   H##type* hydrogen() const {               \
     return H##type::cast(hydrogen_value()); \
@@ -205,10 +211,25 @@
   virtual void PrintDataTo(StringStream* stream) = 0;
   virtual void PrintOutputOperandTo(StringStream* stream) = 0;
 
-  // Declare virtual type testers.
-#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
-  LITHIUM_ALL_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
+  enum Opcode {
+    // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+    kNumberOfInstructions
+#undef DECLARE_OPCODE
+  };
+
+  virtual Opcode opcode() const = 0;
+
+  // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+  bool Is##type() const { return opcode() == k##type; }
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+  // Declare virtual predicates for instructions that don't have
+  // an opcode.
+  virtual bool IsGap() const { return false; }
 
   virtual bool IsControl() const { return false; }
   virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
@@ -335,8 +356,13 @@
     parallel_moves_[AFTER] = NULL;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(Gap, "gap")
+  // Can't use the DECLARE-macro here because of sub-classes.
+  virtual bool IsGap() const { return true; }
   virtual void PrintDataTo(StringStream* stream);
+  static LGap* cast(LInstruction* instr) {
+    ASSERT(instr->IsGap());
+    return reinterpret_cast<LGap*>(instr);
+  }
 
   bool IsRedundant() const;
 
@@ -366,6 +392,14 @@
 };
 
 
+class LInstructionGap: public LGap {
+ public:
+  explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
 class LGoto: public LTemplateInstruction<0, 0, 0> {
  public:
   LGoto(int block_id, bool include_stack_check = false)
@@ -454,7 +488,6 @@
 template<int I, int T>
 class LControlInstruction: public LTemplateInstruction<0, I, T> {
  public:
-  DECLARE_INSTRUCTION(ControlInstruction)
   virtual bool IsControl() const { return true; }
 
   int true_block_id() const { return true_block_id_; }
@@ -638,6 +671,28 @@
 };
 
 
+class LCmpSymbolEq: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LCmpSymbolEq(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEq, "cmp-symbol-eq")
+};
+
+
+class LCmpSymbolEqAndBranch: public LControlInstruction<2, 0> {
+ public:
+  LCmpSymbolEqAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEqAndBranch, "cmp-symbol-eq-and-branch")
+};
+
+
 class LIsNull: public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LIsNull(LOperand* value) {
@@ -712,6 +767,31 @@
 };
 
 
+class LIsUndetectable: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LIsUndetectable(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsUndetectable, "is-undetectable")
+  DECLARE_HYDROGEN_ACCESSOR(IsUndetectable)
+};
+
+
+class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
+ public:
+  explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+                               "is-undetectable-and-branch")
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
 class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LHasInstanceType(LOperand* value) {
@@ -828,6 +908,20 @@
 };
 
 
+class LIn: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LIn(LOperand* key, LOperand* object) {
+    inputs_[0] = key;
+    inputs_[1] = object;
+  }
+
+  LOperand* key() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(In, "in")
+};
+
+
 class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
  public:
   LInstanceOf(LOperand* left, LOperand* right) {
@@ -1090,6 +1184,7 @@
 
   Token::Value op() const { return op_; }
 
+  virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
   virtual void CompileToNative(LCodeGen* generator);
   virtual const char* Mnemonic() const;
 
@@ -1106,6 +1201,7 @@
     inputs_[1] = right;
   }
 
+  virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
   virtual void CompileToNative(LCodeGen* generator);
   virtual const char* Mnemonic() const;
 
@@ -1393,6 +1489,23 @@
 };
 
 
+class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LInvokeFunction(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+  DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+  LOperand* function() { return inputs_[0]; }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
 class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCallKeyed(LOperand* key) {
@@ -1685,6 +1798,21 @@
 };
 
 
+class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LStringAdd(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+  DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+};
+
+
 class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
  public:
   LStringCharCodeAt(LOperand* string, LOperand* index) {
@@ -1783,6 +1911,47 @@
 };
 
 
+class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
+ public:
+  LClampDToUint8(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampIToUint8(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8: public LTemplateInstruction<1, 1, 2> {
+ public:
+  LClampTToUint8(LOperand* value,
+                 LOperand* temp,
+                 LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+    temps_[1] = temp2;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
 class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LCheckNonSmi(LOperand* value) {
@@ -2158,7 +2327,6 @@
 };
 
 #undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_INSTRUCTION
 #undef DECLARE_CONCRETE_INSTRUCTION
 
 } }  // namespace v8::int
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 7f027f7..2d28579 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -201,8 +201,8 @@
                                        Register scratch) {
   if (emit_debug_code()) {
     // Check that the object is not in new space.
-    NearLabel not_in_new_space;
-    InNewSpace(object, scratch, not_equal, &not_in_new_space);
+    Label not_in_new_space;
+    InNewSpace(object, scratch, not_equal, &not_in_new_space, Label::kNear);
     Abort("new-space object passed to RecordWriteHelper");
     bind(&not_in_new_space);
   }
@@ -221,6 +221,42 @@
 }
 
 
+void MacroAssembler::InNewSpace(Register object,
+                                Register scratch,
+                                Condition cc,
+                                Label* branch,
+                                Label::Distance near_jump) {
+  if (Serializer::enabled()) {
+    // Can't do arithmetic on external references if it might get serialized.
+    // The mask isn't really an address.  We load it as an external reference in
+    // case the size of the new space is different between the snapshot maker
+    // and the running system.
+    if (scratch.is(object)) {
+      movq(kScratchRegister, ExternalReference::new_space_mask(isolate()));
+      and_(scratch, kScratchRegister);
+    } else {
+      movq(scratch, ExternalReference::new_space_mask(isolate()));
+      and_(scratch, object);
+    }
+    movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
+    cmpq(scratch, kScratchRegister);
+    j(cc, branch, near_jump);
+  } else {
+    ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
+    intptr_t new_space_start =
+        reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
+    movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
+    if (scratch.is(object)) {
+      addq(scratch, kScratchRegister);
+    } else {
+      lea(scratch, Operand(object, kScratchRegister, times_1, 0));
+    }
+    and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
+    j(cc, branch, near_jump);
+  }
+}
+
+
 void MacroAssembler::RecordWrite(Register object,
                                  int offset,
                                  Register value,
@@ -287,8 +323,8 @@
   Label done;
 
   if (emit_debug_code()) {
-    NearLabel okay;
-    JumpIfNotSmi(object, &okay);
+    Label okay;
+    JumpIfNotSmi(object, &okay, Label::kNear);
     Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
     bind(&okay);
 
@@ -344,13 +380,13 @@
 
 void MacroAssembler::AssertFastElements(Register elements) {
   if (emit_debug_code()) {
-    NearLabel ok;
+    Label ok;
     CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
                 Heap::kFixedArrayMapRootIndex);
-    j(equal, &ok);
+    j(equal, &ok, Label::kNear);
     CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
                 Heap::kFixedCOWArrayMapRootIndex);
-    j(equal, &ok);
+    j(equal, &ok, Label::kNear);
     Abort("JSObject with fast elements map has slow elements");
     bind(&ok);
   }
@@ -358,8 +394,8 @@
 
 
 void MacroAssembler::Check(Condition cc, const char* msg) {
-  NearLabel L;
-  j(cc, &L);
+  Label L;
+  j(cc, &L, Label::kNear);
   Abort(msg);
   // will not return here
   bind(&L);
@@ -371,9 +407,9 @@
   int frame_alignment_mask = frame_alignment - 1;
   if (frame_alignment > kPointerSize) {
     ASSERT(IsPowerOf2(frame_alignment));
-    NearLabel alignment_as_expected;
+    Label alignment_as_expected;
     testq(rsp, Immediate(frame_alignment_mask));
-    j(zero, &alignment_as_expected);
+    j(zero, &alignment_as_expected, Label::kNear);
     // Abort if stack is not aligned.
     int3();
     bind(&alignment_as_expected);
@@ -384,9 +420,9 @@
 void MacroAssembler::NegativeZeroTest(Register result,
                                       Register op,
                                       Label* then_label) {
-  NearLabel ok;
+  Label ok;
   testl(result, result);
-  j(not_zero, &ok);
+  j(not_zero, &ok, Label::kNear);
   testl(op, op);
   j(sign, then_label);
   bind(&ok);
@@ -425,9 +461,9 @@
 }
 
 
-void MacroAssembler::CallStub(CodeStub* stub) {
+void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
   ASSERT(allow_stub_calls());  // calls are not allowed in some stubs
-  Call(stub->GetCode(), RelocInfo::CODE_TARGET);
+  Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
 }
 
 
@@ -650,6 +686,7 @@
   Label leave_exit_frame;
   Label write_back;
 
+  Factory* factory = isolate()->factory();
   ExternalReference next_address =
       ExternalReference::handle_scope_next_address();
   const int kNextOffset = 0;
@@ -697,7 +734,7 @@
 
   // Check if the function scheduled an exception.
   movq(rsi, scheduled_exception_address);
-  Cmp(Operand(rsi, 0), FACTORY->the_hole_value());
+  Cmp(Operand(rsi, 0), factory->the_hole_value());
   j(not_equal, &promote_scheduled_exception);
 
   LeaveApiExitFrame();
@@ -712,7 +749,7 @@
 
   bind(&empty_result);
   // It was zero; the result is undefined.
-  Move(rax, FACTORY->undefined_value());
+  Move(rax, factory->undefined_value());
   jmp(&prologue);
 
   // HandleScope limit has changed. Delete allocated extensions.
@@ -754,7 +791,7 @@
 
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeFlag flag,
-                                   CallWrapper* call_wrapper) {
+                                   const CallWrapper& call_wrapper) {
   // Calls are not allowed in some stubs.
   ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
 
@@ -763,7 +800,7 @@
   // parameter count to avoid emitting code to do the check.
   ParameterCount expected(0);
   GetBuiltinEntry(rdx, id);
-  InvokeCode(rdx, expected, expected, flag, call_wrapper);
+  InvokeCode(rdx, expected, expected, flag, call_wrapper, CALL_AS_METHOD);
 }
 
 
@@ -831,8 +868,8 @@
     if (allow_stub_calls()) {
       Assert(equal, "Uninitialized kSmiConstantRegister");
     } else {
-      NearLabel ok;
-      j(equal, &ok);
+      Label ok;
+      j(equal, &ok, Label::kNear);
       int3();
       bind(&ok);
     }
@@ -894,8 +931,8 @@
 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
   if (emit_debug_code()) {
     testb(dst, Immediate(0x01));
-    NearLabel ok;
-    j(zero, &ok);
+    Label ok;
+    j(zero, &ok, Label::kNear);
     if (allow_stub_calls()) {
       Abort("Integer32ToSmiField writing to non-smi location");
     } else {
@@ -1052,6 +1089,24 @@
 }
 
 
+void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
+                                 Label* on_not_smis,
+                                 Label::Distance near_jump) {
+  if (dst.is(src1) || dst.is(src2)) {
+    ASSERT(!src1.is(kScratchRegister));
+    ASSERT(!src2.is(kScratchRegister));
+    movq(kScratchRegister, src1);
+    or_(kScratchRegister, src2);
+    JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
+    movq(dst, kScratchRegister);
+  } else {
+    movq(dst, src1);
+    or_(dst, src2);
+    JumpIfNotSmi(dst, on_not_smis, near_jump);
+  }
+}
+
+
 Condition MacroAssembler::CheckSmi(Register src) {
   ASSERT_EQ(0, kSmiTag);
   testb(src, Immediate(kSmiTagMask));
@@ -1162,6 +1217,95 @@
 }
 
 
+void MacroAssembler::JumpIfNotValidSmiValue(Register src,
+                                            Label* on_invalid,
+                                            Label::Distance near_jump) {
+  Condition is_valid = CheckInteger32ValidSmiValue(src);
+  j(NegateCondition(is_valid), on_invalid, near_jump);
+}
+
+
+void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
+                                                Label* on_invalid,
+                                                Label::Distance near_jump) {
+  Condition is_valid = CheckUInteger32ValidSmiValue(src);
+  j(NegateCondition(is_valid), on_invalid, near_jump);
+}
+
+
+void MacroAssembler::JumpIfSmi(Register src,
+                               Label* on_smi,
+                               Label::Distance near_jump) {
+  Condition smi = CheckSmi(src);
+  j(smi, on_smi, near_jump);
+}
+
+
+void MacroAssembler::JumpIfNotSmi(Register src,
+                                  Label* on_not_smi,
+                                  Label::Distance near_jump) {
+  Condition smi = CheckSmi(src);
+  j(NegateCondition(smi), on_not_smi, near_jump);
+}
+
+
+void MacroAssembler::JumpUnlessNonNegativeSmi(
+    Register src, Label* on_not_smi_or_negative,
+    Label::Distance near_jump) {
+  Condition non_negative_smi = CheckNonNegativeSmi(src);
+  j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
+}
+
+
+void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
+                                             Smi* constant,
+                                             Label* on_equals,
+                                             Label::Distance near_jump) {
+  SmiCompare(src, constant);
+  j(equal, on_equals, near_jump);
+}
+
+
+void MacroAssembler::JumpIfNotBothSmi(Register src1,
+                                      Register src2,
+                                      Label* on_not_both_smi,
+                                      Label::Distance near_jump) {
+  Condition both_smi = CheckBothSmi(src1, src2);
+  j(NegateCondition(both_smi), on_not_both_smi, near_jump);
+}
+
+
+void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
+                                                  Register src2,
+                                                  Label* on_not_both_smi,
+                                                  Label::Distance near_jump) {
+  Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
+  j(NegateCondition(both_smi), on_not_both_smi, near_jump);
+}
+
+
+void MacroAssembler::SmiTryAddConstant(Register dst,
+                                       Register src,
+                                       Smi* constant,
+                                       Label* on_not_smi_result,
+                                       Label::Distance near_jump) {
+  // Does not assume that src is a smi.
+  ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
+  ASSERT_EQ(0, kSmiTag);
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src.is(kScratchRegister));
+
+  JumpIfNotSmi(src, on_not_smi_result, near_jump);
+  Register tmp = (dst.is(src) ? kScratchRegister : dst);
+  LoadSmiConstant(tmp, constant);
+  addq(tmp, src);
+  j(overflow, on_not_smi_result, near_jump);
+  if (dst.is(src)) {
+    movq(dst, tmp);
+  }
+}
+
+
 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
   if (constant->value() == 0) {
     if (!dst.is(src)) {
@@ -1218,6 +1362,30 @@
 }
 
 
+void MacroAssembler::SmiAddConstant(Register dst,
+                                    Register src,
+                                    Smi* constant,
+                                    Label* on_not_smi_result,
+                                    Label::Distance near_jump) {
+  if (constant->value() == 0) {
+    if (!dst.is(src)) {
+      movq(dst, src);
+    }
+  } else if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+
+    LoadSmiConstant(kScratchRegister, constant);
+    addq(kScratchRegister, src);
+    j(overflow, on_not_smi_result, near_jump);
+    movq(dst, kScratchRegister);
+  } else {
+    LoadSmiConstant(dst, constant);
+    addq(dst, src);
+    j(overflow, on_not_smi_result, near_jump);
+  }
+}
+
+
 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
   if (constant->value() == 0) {
     if (!dst.is(src)) {
@@ -1242,17 +1410,148 @@
 }
 
 
+void MacroAssembler::SmiSubConstant(Register dst,
+                                    Register src,
+                                    Smi* constant,
+                                    Label* on_not_smi_result,
+                                    Label::Distance near_jump) {
+  if (constant->value() == 0) {
+    if (!dst.is(src)) {
+      movq(dst, src);
+    }
+  } else if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+    if (constant->value() == Smi::kMinValue) {
+      // Subtracting min-value from any non-negative value will overflow.
+      // We test the non-negativeness before doing the subtraction.
+      testq(src, src);
+      j(not_sign, on_not_smi_result, near_jump);
+      LoadSmiConstant(kScratchRegister, constant);
+      subq(dst, kScratchRegister);
+    } else {
+      // Subtract by adding the negation.
+      LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
+      addq(kScratchRegister, dst);
+      j(overflow, on_not_smi_result, near_jump);
+      movq(dst, kScratchRegister);
+    }
+  } else {
+    if (constant->value() == Smi::kMinValue) {
+      // Subtracting min-value from any non-negative value will overflow.
+      // We test the non-negativeness before doing the subtraction.
+      testq(src, src);
+      j(not_sign, on_not_smi_result, near_jump);
+      LoadSmiConstant(dst, constant);
+      // Adding and subtracting the min-value gives the same result, it only
+      // differs on the overflow bit, which we don't check here.
+      addq(dst, src);
+    } else {
+      // Subtract by adding the negation.
+      LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
+      addq(dst, src);
+      j(overflow, on_not_smi_result, near_jump);
+    }
+  }
+}
+
+
+void MacroAssembler::SmiNeg(Register dst,
+                            Register src,
+                            Label* on_smi_result,
+                            Label::Distance near_jump) {
+  if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+    movq(kScratchRegister, src);
+    neg(dst);  // Low 32 bits are retained as zero by negation.
+    // Test if result is zero or Smi::kMinValue.
+    cmpq(dst, kScratchRegister);
+    j(not_equal, on_smi_result, near_jump);
+    movq(src, kScratchRegister);
+  } else {
+    movq(dst, src);
+    neg(dst);
+    cmpq(dst, src);
+    // If the result is zero or Smi::kMinValue, negation failed to create a smi.
+    j(not_equal, on_smi_result, near_jump);
+  }
+}
+
+
+void MacroAssembler::SmiAdd(Register dst,
+                            Register src1,
+                            Register src2,
+                            Label* on_not_smi_result,
+                            Label::Distance near_jump) {
+  ASSERT_NOT_NULL(on_not_smi_result);
+  ASSERT(!dst.is(src2));
+  if (dst.is(src1)) {
+    movq(kScratchRegister, src1);
+    addq(kScratchRegister, src2);
+    j(overflow, on_not_smi_result, near_jump);
+    movq(dst, kScratchRegister);
+  } else {
+    movq(dst, src1);
+    addq(dst, src2);
+    j(overflow, on_not_smi_result, near_jump);
+  }
+}
+
+
+void MacroAssembler::SmiAdd(Register dst,
+                            Register src1,
+                            const Operand& src2,
+                            Label* on_not_smi_result,
+                            Label::Distance near_jump) {
+  ASSERT_NOT_NULL(on_not_smi_result);
+  if (dst.is(src1)) {
+    movq(kScratchRegister, src1);
+    addq(kScratchRegister, src2);
+    j(overflow, on_not_smi_result, near_jump);
+    movq(dst, kScratchRegister);
+  } else {
+    ASSERT(!src2.AddressUsesRegister(dst));
+    movq(dst, src1);
+    addq(dst, src2);
+    j(overflow, on_not_smi_result, near_jump);
+  }
+}
+
+
 void MacroAssembler::SmiAdd(Register dst,
                             Register src1,
                             Register src2) {
   // No overflow checking. Use only when it's known that
   // overflowing is impossible.
-  ASSERT(!dst.is(src2));
   if (!dst.is(src1)) {
-    movq(dst, src1);
+    if (emit_debug_code()) {
+      movq(kScratchRegister, src1);
+      addq(kScratchRegister, src2);
+      Check(no_overflow, "Smi addition overflow");
+    }
+    lea(dst, Operand(src1, src2, times_1, 0));
+  } else {
+    addq(dst, src2);
+    Assert(no_overflow, "Smi addition overflow");
   }
-  addq(dst, src2);
-  Assert(no_overflow, "Smi addition overflow");
+}
+
+
+void MacroAssembler::SmiSub(Register dst,
+                            Register src1,
+                            Register src2,
+                            Label* on_not_smi_result,
+                            Label::Distance near_jump) {
+  ASSERT_NOT_NULL(on_not_smi_result);
+  ASSERT(!dst.is(src2));
+  if (dst.is(src1)) {
+    cmpq(dst, src2);
+    j(overflow, on_not_smi_result, near_jump);
+    subq(dst, src2);
+  } else {
+    movq(dst, src1);
+    subq(dst, src2);
+    j(overflow, on_not_smi_result, near_jump);
+  }
 }
 
 
@@ -1270,6 +1569,25 @@
 
 void MacroAssembler::SmiSub(Register dst,
                             Register src1,
+                            const Operand& src2,
+                            Label* on_not_smi_result,
+                            Label::Distance near_jump) {
+  ASSERT_NOT_NULL(on_not_smi_result);
+  if (dst.is(src1)) {
+    movq(kScratchRegister, src2);
+    cmpq(src1, kScratchRegister);
+    j(overflow, on_not_smi_result, near_jump);
+    subq(src1, kScratchRegister);
+  } else {
+    movq(dst, src1);
+    subq(dst, src2);
+    j(overflow, on_not_smi_result, near_jump);
+  }
+}
+
+
+void MacroAssembler::SmiSub(Register dst,
+                            Register src1,
                             const Operand& src2) {
   // No overflow checking. Use only when it's known that
   // overflowing is impossible (e.g., subtracting two positive smis).
@@ -1281,6 +1599,180 @@
 }
 
 
+void MacroAssembler::SmiMul(Register dst,
+                            Register src1,
+                            Register src2,
+                            Label* on_not_smi_result,
+                            Label::Distance near_jump) {
+  ASSERT(!dst.is(src2));
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+
+  if (dst.is(src1)) {
+    Label failure, zero_correct_result;
+    movq(kScratchRegister, src1);  // Create backup for later testing.
+    SmiToInteger64(dst, src1);
+    imul(dst, src2);
+    j(overflow, &failure, Label::kNear);
+
+    // Check for negative zero result.  If product is zero, and one
+    // argument is negative, go to slow case.
+    Label correct_result;
+    testq(dst, dst);
+    j(not_zero, &correct_result, Label::kNear);
+
+    movq(dst, kScratchRegister);
+    xor_(dst, src2);
+    // Result was positive zero.
+    j(positive, &zero_correct_result, Label::kNear);
+
+    bind(&failure);  // Reused failure exit, restores src1.
+    movq(src1, kScratchRegister);
+    jmp(on_not_smi_result, near_jump);
+
+    bind(&zero_correct_result);
+    Set(dst, 0);
+
+    bind(&correct_result);
+  } else {
+    SmiToInteger64(dst, src1);
+    imul(dst, src2);
+    j(overflow, on_not_smi_result, near_jump);
+    // Check for negative zero result.  If product is zero, and one
+    // argument is negative, go to slow case.
+    Label correct_result;
+    testq(dst, dst);
+    j(not_zero, &correct_result, Label::kNear);
+    // One of src1 and src2 is zero, the check whether the other is
+    // negative.
+    movq(kScratchRegister, src1);
+    xor_(kScratchRegister, src2);
+    j(negative, on_not_smi_result, near_jump);
+    bind(&correct_result);
+  }
+}
+
+
+void MacroAssembler::SmiDiv(Register dst,
+                            Register src1,
+                            Register src2,
+                            Label* on_not_smi_result,
+                            Label::Distance near_jump) {
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src2.is(rax));
+  ASSERT(!src2.is(rdx));
+  ASSERT(!src1.is(rdx));
+
+  // Check for 0 divisor (result is +/-Infinity).
+  testq(src2, src2);
+  j(zero, on_not_smi_result, near_jump);
+
+  if (src1.is(rax)) {
+    movq(kScratchRegister, src1);
+  }
+  SmiToInteger32(rax, src1);
+  // We need to rule out dividing Smi::kMinValue by -1, since that would
+  // overflow in idiv and raise an exception.
+  // We combine this with negative zero test (negative zero only happens
+  // when dividing zero by a negative number).
+
+  // We overshoot a little and go to slow case if we divide min-value
+  // by any negative value, not just -1.
+  Label safe_div;
+  testl(rax, Immediate(0x7fffffff));
+  j(not_zero, &safe_div, Label::kNear);
+  testq(src2, src2);
+  if (src1.is(rax)) {
+    j(positive, &safe_div, Label::kNear);
+    movq(src1, kScratchRegister);
+    jmp(on_not_smi_result, near_jump);
+  } else {
+    j(negative, on_not_smi_result, near_jump);
+  }
+  bind(&safe_div);
+
+  SmiToInteger32(src2, src2);
+  // Sign extend src1 into edx:eax.
+  cdq();
+  idivl(src2);
+  Integer32ToSmi(src2, src2);
+  // Check that the remainder is zero.
+  testl(rdx, rdx);
+  if (src1.is(rax)) {
+    Label smi_result;
+    j(zero, &smi_result, Label::kNear);
+    movq(src1, kScratchRegister);
+    jmp(on_not_smi_result, near_jump);
+    bind(&smi_result);
+  } else {
+    j(not_zero, on_not_smi_result, near_jump);
+  }
+  if (!dst.is(src1) && src1.is(rax)) {
+    movq(src1, kScratchRegister);
+  }
+  Integer32ToSmi(dst, rax);
+}
+
+
+void MacroAssembler::SmiMod(Register dst,
+                            Register src1,
+                            Register src2,
+                            Label* on_not_smi_result,
+                            Label::Distance near_jump) {
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+  ASSERT(!src2.is(rax));
+  ASSERT(!src2.is(rdx));
+  ASSERT(!src1.is(rdx));
+  ASSERT(!src1.is(src2));
+
+  testq(src2, src2);
+  j(zero, on_not_smi_result, near_jump);
+
+  if (src1.is(rax)) {
+    movq(kScratchRegister, src1);
+  }
+  SmiToInteger32(rax, src1);
+  SmiToInteger32(src2, src2);
+
+  // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
+  Label safe_div;
+  cmpl(rax, Immediate(Smi::kMinValue));
+  j(not_equal, &safe_div, Label::kNear);
+  cmpl(src2, Immediate(-1));
+  j(not_equal, &safe_div, Label::kNear);
+  // Retag inputs and go slow case.
+  Integer32ToSmi(src2, src2);
+  if (src1.is(rax)) {
+    movq(src1, kScratchRegister);
+  }
+  jmp(on_not_smi_result, near_jump);
+  bind(&safe_div);
+
+  // Sign extend eax into edx:eax.
+  cdq();
+  idivl(src2);
+  // Restore smi tags on inputs.
+  Integer32ToSmi(src2, src2);
+  if (src1.is(rax)) {
+    movq(src1, kScratchRegister);
+  }
+  // Check for a negative zero result.  If the result is zero, and the
+  // dividend is negative, go slow to return a floating point negative zero.
+  Label smi_result;
+  testl(rdx, rdx);
+  j(not_zero, &smi_result, Label::kNear);
+  testq(src1, src1);
+  j(negative, on_not_smi_result, near_jump);
+  bind(&smi_result);
+  Integer32ToSmi(dst, rdx);
+}
+
+
 void MacroAssembler::SmiNot(Register dst, Register src) {
   ASSERT(!dst.is(kScratchRegister));
   ASSERT(!src.is(kScratchRegister));
@@ -1387,11 +1879,28 @@
 }
 
 
+void MacroAssembler::SmiShiftLogicalRightConstant(
+    Register dst, Register src, int shift_value,
+    Label* on_not_smi_result, Label::Distance near_jump) {
+  // Logic right shift interprets its result as an *unsigned* number.
+  if (dst.is(src)) {
+    UNIMPLEMENTED();  // Not used.
+  } else {
+    movq(dst, src);
+    if (shift_value == 0) {
+      testq(dst, dst);
+      j(negative, on_not_smi_result, near_jump);
+    }
+    shr(dst, Immediate(shift_value + kSmiShift));
+    shl(dst, Immediate(kSmiShift));
+  }
+}
+
+
 void MacroAssembler::SmiShiftLeft(Register dst,
                                   Register src1,
                                   Register src2) {
   ASSERT(!dst.is(rcx));
-  NearLabel result_ok;
   // Untag shift amount.
   if (!dst.is(src1)) {
     movq(dst, src1);
@@ -1403,6 +1912,45 @@
 }
 
 
+void MacroAssembler::SmiShiftLogicalRight(Register dst,
+                                          Register src1,
+                                          Register src2,
+                                          Label* on_not_smi_result,
+                                          Label::Distance near_jump) {
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+  ASSERT(!dst.is(rcx));
+  // dst and src1 can be the same, because the one case that bails out
+  // is a shift by 0, which leaves dst, and therefore src1, unchanged.
+  if (src1.is(rcx) || src2.is(rcx)) {
+    movq(kScratchRegister, rcx);
+  }
+  if (!dst.is(src1)) {
+    movq(dst, src1);
+  }
+  SmiToInteger32(rcx, src2);
+  orl(rcx, Immediate(kSmiShift));
+  shr_cl(dst);  // Shift is rcx modulo 0x1f + 32.
+  shl(dst, Immediate(kSmiShift));
+  testq(dst, dst);
+  if (src1.is(rcx) || src2.is(rcx)) {
+    Label positive_result;
+    j(positive, &positive_result, Label::kNear);
+    if (src1.is(rcx)) {
+      movq(src1, kScratchRegister);
+    } else {
+      movq(src2, kScratchRegister);
+    }
+    jmp(on_not_smi_result, near_jump);
+    bind(&positive_result);
+  } else {
+    // src2 was zero and src1 negative.
+    j(negative, on_not_smi_result, near_jump);
+  }
+}
+
+
 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
                                              Register src1,
                                              Register src2) {
@@ -1430,6 +1978,45 @@
 }
 
 
+void MacroAssembler::SelectNonSmi(Register dst,
+                                  Register src1,
+                                  Register src2,
+                                  Label* on_not_smis,
+                                  Label::Distance near_jump) {
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+  ASSERT(!dst.is(src1));
+  ASSERT(!dst.is(src2));
+  // Both operands must not be smis.
+#ifdef DEBUG
+  if (allow_stub_calls()) {  // Check contains a stub call.
+    Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
+    Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
+  }
+#endif
+  ASSERT_EQ(0, kSmiTag);
+  ASSERT_EQ(0, Smi::FromInt(0));
+  movl(kScratchRegister, Immediate(kSmiTagMask));
+  and_(kScratchRegister, src1);
+  testl(kScratchRegister, src2);
+  // If non-zero then both are smis.
+  j(not_zero, on_not_smis, near_jump);
+
+  // Exactly one operand is a smi.
+  ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
+  // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
+  subq(kScratchRegister, Immediate(1));
+  // If src1 is a smi, then scratch register all 1s, else it is all 0s.
+  movq(dst, src1);
+  xor_(dst, src2);
+  and_(dst, kScratchRegister);
+  // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
+  xor_(dst, src1);
+  // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
+}
+
+
 SmiIndex MacroAssembler::SmiToIndex(Register dst,
                                     Register src,
                                     int shift) {
@@ -1471,6 +2058,97 @@
 }
 
 
+void MacroAssembler::JumpIfNotString(Register object,
+                                     Register object_map,
+                                     Label* not_string,
+                                     Label::Distance near_jump) {
+  Condition is_smi = CheckSmi(object);
+  j(is_smi, not_string, near_jump);
+  CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
+  j(above_equal, not_string, near_jump);
+}
+
+
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
+    Register first_object,
+    Register second_object,
+    Register scratch1,
+    Register scratch2,
+    Label* on_fail,
+    Label::Distance near_jump) {
+  // Check that both objects are not smis.
+  Condition either_smi = CheckEitherSmi(first_object, second_object);
+  j(either_smi, on_fail, near_jump);
+
+  // Load instance type for both strings.
+  movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
+  movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
+  movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
+  movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
+
+  // Check that both are flat ascii strings.
+  ASSERT(kNotStringTag != 0);
+  const int kFlatAsciiStringMask =
+      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+
+  andl(scratch1, Immediate(kFlatAsciiStringMask));
+  andl(scratch2, Immediate(kFlatAsciiStringMask));
+  // Interleave the bits to check both scratch1 and scratch2 in one test.
+  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+  cmpl(scratch1,
+       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+  j(not_equal, on_fail, near_jump);
+}
+
+
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
+    Register instance_type,
+    Register scratch,
+    Label* failure,
+    Label::Distance near_jump) {
+  if (!scratch.is(instance_type)) {
+    movl(scratch, instance_type);
+  }
+
+  const int kFlatAsciiStringMask =
+      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+
+  andl(scratch, Immediate(kFlatAsciiStringMask));
+  cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
+  j(not_equal, failure, near_jump);
+}
+
+
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
+    Register first_object_instance_type,
+    Register second_object_instance_type,
+    Register scratch1,
+    Register scratch2,
+    Label* on_fail,
+    Label::Distance near_jump) {
+  // Load instance type for both strings.
+  movq(scratch1, first_object_instance_type);
+  movq(scratch2, second_object_instance_type);
+
+  // Check that both are flat ascii strings.
+  ASSERT(kNotStringTag != 0);
+  const int kFlatAsciiStringMask =
+      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+
+  andl(scratch1, Immediate(kFlatAsciiStringMask));
+  andl(scratch2, Immediate(kFlatAsciiStringMask));
+  // Interleave the bits to check both scratch1 and scratch2 in one test.
+  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+  cmpl(scratch1,
+       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+  j(not_equal, on_fail, near_jump);
+}
+
+
 
 void MacroAssembler::Move(Register dst, Register src) {
   if (!dst.is(src)) {
@@ -1604,12 +2282,14 @@
 }
 
 
-void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
+void MacroAssembler::Call(Handle<Code> code_object,
+                          RelocInfo::Mode rmode,
+                          unsigned ast_id) {
 #ifdef DEBUG
   int end_position = pc_offset() + CallSize(code_object);
 #endif
   ASSERT(RelocInfo::IsCodeTarget(rmode));
-  call(code_object, rmode);
+  call(code_object, rmode, ast_id);
 #ifdef DEBUG
   CHECK_EQ(end_position, pc_offset());
 #endif
@@ -1774,9 +2454,9 @@
   // Before returning we restore the context from the frame pointer if not NULL.
   // The frame pointer is NULL in the exception handler of a JS entry frame.
   Set(rsi, 0);  // Tentatively set context pointer to NULL
-  NearLabel skip;
+  Label skip;
   cmpq(rbp, Immediate(0));
-  j(equal, &skip);
+  j(equal, &skip, Label::kNear);
   movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   bind(&skip);
   ret(0);
@@ -1794,12 +2474,12 @@
   Load(rsp, handler_address);
 
   // Unwind the handlers until the ENTRY handler is found.
-  NearLabel loop, done;
+  Label loop, done;
   bind(&loop);
   // Load the type of the current stack handler.
   const int kStateOffset = StackHandlerConstants::kStateOffset;
   cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
-  j(equal, &done);
+  j(equal, &done, Label::kNear);
   // Fetch the next handler in the list.
   const int kNextOffset = StackHandlerConstants::kNextOffset;
   movq(rsp, Operand(rsp, kNextOffset));
@@ -1881,8 +2561,8 @@
 void MacroAssembler::CheckMap(Register obj,
                               Handle<Map> map,
                               Label* fail,
-                              bool is_heap_object) {
-  if (!is_heap_object) {
+                              SmiCheckType smi_check_type) {
+  if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, fail);
   }
   Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
@@ -1890,19 +2570,75 @@
 }
 
 
+void MacroAssembler::ClampUint8(Register reg) {
+  Label done;
+  testl(reg, Immediate(0xFFFFFF00));
+  j(zero, &done, Label::kNear);
+  setcc(negative, reg);  // 1 if negative, 0 if positive.
+  decb(reg);  // 0 if negative, 255 if positive.
+  bind(&done);
+}
+
+
+void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
+                                        XMMRegister temp_xmm_reg,
+                                        Register result_reg,
+                                        Register temp_reg) {
+  Label done;
+  Set(result_reg, 0);
+  xorps(temp_xmm_reg, temp_xmm_reg);
+  ucomisd(input_reg, temp_xmm_reg);
+  j(below, &done, Label::kNear);
+  uint64_t one_half = BitCast<uint64_t, double>(0.5);
+  Set(temp_reg, one_half);
+  movq(temp_xmm_reg, temp_reg);
+  addsd(temp_xmm_reg, input_reg);
+  cvttsd2si(result_reg, temp_xmm_reg);
+  testl(result_reg, Immediate(0xFFFFFF00));
+  j(zero, &done, Label::kNear);
+  Set(result_reg, 255);
+  bind(&done);
+}
+
+
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+                                             Register descriptors) {
+  movq(descriptors, FieldOperand(map,
+                                 Map::kInstanceDescriptorsOrBitField3Offset));
+  Label not_smi;
+  JumpIfNotSmi(descriptors, &not_smi, Label::kNear);
+  Move(descriptors, isolate()->factory()->empty_descriptor_array());
+  bind(&not_smi);
+}
+
+
+void MacroAssembler::DispatchMap(Register obj,
+                                 Handle<Map> map,
+                                 Handle<Code> success,
+                                 SmiCheckType smi_check_type) {
+  Label fail;
+  if (smi_check_type == DO_SMI_CHECK) {
+    JumpIfSmi(obj, &fail);
+  }
+  Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
+  j(equal, success, RelocInfo::CODE_TARGET);
+
+  bind(&fail);
+}
+
+
 void MacroAssembler::AbortIfNotNumber(Register object) {
-  NearLabel ok;
+  Label ok;
   Condition is_smi = CheckSmi(object);
-  j(is_smi, &ok);
+  j(is_smi, &ok, Label::kNear);
   Cmp(FieldOperand(object, HeapObject::kMapOffset),
-      FACTORY->heap_number_map());
+      isolate()->factory()->heap_number_map());
   Assert(equal, "Operand not a number");
   bind(&ok);
 }
 
 
 void MacroAssembler::AbortIfSmi(Register object) {
-  NearLabel ok;
   Condition is_smi = CheckSmi(object);
   Assert(NegateCondition(is_smi), "Operand is a smi");
 }
@@ -1965,10 +2701,10 @@
   j(not_equal, miss);
 
   // Make sure that the function has an instance prototype.
-  NearLabel non_instance;
+  Label non_instance;
   testb(FieldOperand(result, Map::kBitFieldOffset),
         Immediate(1 << Map::kHasNonInstancePrototype));
-  j(not_zero, &non_instance);
+  j(not_zero, &non_instance, Label::kNear);
 
   // Get the prototype or initial map from the function.
   movq(result,
@@ -1981,13 +2717,13 @@
   j(equal, miss);
 
   // If the function does not have an initial map, we're done.
-  NearLabel done;
+  Label done;
   CmpObjectType(result, MAP_TYPE, kScratchRegister);
-  j(not_equal, &done);
+  j(not_equal, &done, Label::kNear);
 
   // Get the prototype from the initial map.
   movq(result, FieldOperand(result, Map::kPrototypeOffset));
-  jmp(&done);
+  jmp(&done, Label::kNear);
 
   // Non-instance prototype: Fetch prototype from constructor field
   // in initial map.
@@ -2044,25 +2780,44 @@
 #endif  // ENABLE_DEBUGGER_SUPPORT
 
 
+void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
+  // This macro takes the dst register to make the code more readable
+  // at the call sites. However, the dst register has to be rcx to
+  // follow the calling convention which requires the call type to be
+  // in rcx.
+  ASSERT(dst.is(rcx));
+  if (call_kind == CALL_AS_FUNCTION) {
+    LoadSmiConstant(dst, Smi::FromInt(1));
+  } else {
+    LoadSmiConstant(dst, Smi::FromInt(0));
+  }
+}
+
+
 void MacroAssembler::InvokeCode(Register code,
                                 const ParameterCount& expected,
                                 const ParameterCount& actual,
                                 InvokeFlag flag,
-                                CallWrapper* call_wrapper) {
-  NearLabel done;
+                                const CallWrapper& call_wrapper,
+                                CallKind call_kind) {
+  Label done;
   InvokePrologue(expected,
                  actual,
                  Handle<Code>::null(),
                  code,
                  &done,
                  flag,
-                 call_wrapper);
+                 Label::kNear,
+                 call_wrapper,
+                 call_kind);
   if (flag == CALL_FUNCTION) {
-    if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(code));
+    call_wrapper.BeforeCall(CallSize(code));
+    SetCallKind(rcx, call_kind);
     call(code);
-    if (call_wrapper != NULL) call_wrapper->AfterCall();
+    call_wrapper.AfterCall();
   } else {
     ASSERT(flag == JUMP_FUNCTION);
+    SetCallKind(rcx, call_kind);
     jmp(code);
   }
   bind(&done);
@@ -2074,8 +2829,9 @@
                                 const ParameterCount& actual,
                                 RelocInfo::Mode rmode,
                                 InvokeFlag flag,
-                                CallWrapper* call_wrapper) {
-  NearLabel done;
+                                const CallWrapper& call_wrapper,
+                                CallKind call_kind) {
+  Label done;
   Register dummy = rax;
   InvokePrologue(expected,
                  actual,
@@ -2083,13 +2839,17 @@
                  dummy,
                  &done,
                  flag,
-                 call_wrapper);
+                 Label::kNear,
+                 call_wrapper,
+                 call_kind);
   if (flag == CALL_FUNCTION) {
-    if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(code));
+    call_wrapper.BeforeCall(CallSize(code));
+    SetCallKind(rcx, call_kind);
     Call(code, rmode);
-    if (call_wrapper != NULL) call_wrapper->AfterCall();
+    call_wrapper.AfterCall();
   } else {
     ASSERT(flag == JUMP_FUNCTION);
+    SetCallKind(rcx, call_kind);
     Jump(code, rmode);
   }
   bind(&done);
@@ -2099,7 +2859,8 @@
 void MacroAssembler::InvokeFunction(Register function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
-                                    CallWrapper* call_wrapper) {
+                                    const CallWrapper& call_wrapper,
+                                    CallKind call_kind) {
   ASSERT(function.is(rdi));
   movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
   movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
@@ -2110,14 +2871,15 @@
   movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
 
   ParameterCount expected(rbx);
-  InvokeCode(rdx, expected, actual, flag, call_wrapper);
+  InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
 }
 
 
 void MacroAssembler::InvokeFunction(JSFunction* function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
-                                    CallWrapper* call_wrapper) {
+                                    const CallWrapper& call_wrapper,
+                                    CallKind call_kind) {
   ASSERT(function->is_compiled());
   // Get the function and setup the context.
   Move(rdi, Handle<JSFunction>(function));
@@ -2128,7 +2890,7 @@
     // the Code object every time we call the function.
     movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
     ParameterCount expected(function->shared()->formal_parameter_count());
-    InvokeCode(rdx, expected, actual, flag, call_wrapper);
+    InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
   } else {
     // Invoke the cached code.
     Handle<Code> code(function->code());
@@ -2138,7 +2900,79 @@
                actual,
                RelocInfo::CODE_TARGET,
                flag,
-               call_wrapper);
+               call_wrapper,
+               call_kind);
+  }
+}
+
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+                                    const ParameterCount& actual,
+                                    Handle<Code> code_constant,
+                                    Register code_register,
+                                    Label* done,
+                                    InvokeFlag flag,
+                                    Label::Distance near_jump,
+                                    const CallWrapper& call_wrapper,
+                                    CallKind call_kind) {
+  bool definitely_matches = false;
+  Label invoke;
+  if (expected.is_immediate()) {
+    ASSERT(actual.is_immediate());
+    if (expected.immediate() == actual.immediate()) {
+      definitely_matches = true;
+    } else {
+      Set(rax, actual.immediate());
+      if (expected.immediate() ==
+              SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+        // Don't worry about adapting arguments for built-ins that
+        // don't want that done. Skip adaption code by making it look
+        // like we have a match between expected and actual number of
+        // arguments.
+        definitely_matches = true;
+      } else {
+        Set(rbx, expected.immediate());
+      }
+    }
+  } else {
+    if (actual.is_immediate()) {
+      // Expected is in register, actual is immediate. This is the
+      // case when we invoke function values without going through the
+      // IC mechanism.
+      cmpq(expected.reg(), Immediate(actual.immediate()));
+      j(equal, &invoke, Label::kNear);
+      ASSERT(expected.reg().is(rbx));
+      Set(rax, actual.immediate());
+    } else if (!expected.reg().is(actual.reg())) {
+      // Both expected and actual are in (different) registers. This
+      // is the case when we invoke functions using call and apply.
+      cmpq(expected.reg(), actual.reg());
+      j(equal, &invoke, Label::kNear);
+      ASSERT(actual.reg().is(rax));
+      ASSERT(expected.reg().is(rbx));
+    }
+  }
+
+  if (!definitely_matches) {
+    Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
+    if (!code_constant.is_null()) {
+      movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
+      addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+    } else if (!code_register.is(rdx)) {
+      movq(rdx, code_register);
+    }
+
+    if (flag == CALL_FUNCTION) {
+      call_wrapper.BeforeCall(CallSize(adaptor));
+      SetCallKind(rcx, call_kind);
+      Call(adaptor, RelocInfo::CODE_TARGET);
+      call_wrapper.AfterCall();
+      jmp(done, near_jump);
+    } else {
+      SetCallKind(rcx, call_kind);
+      Jump(adaptor, RelocInfo::CODE_TARGET);
+    }
+    bind(&invoke);
   }
 }
 
@@ -2152,7 +2986,7 @@
   push(kScratchRegister);
   if (emit_debug_code()) {
     movq(kScratchRegister,
-         FACTORY->undefined_value(),
+         isolate()->factory()->undefined_value(),
          RelocInfo::EMBEDDED_OBJECT);
     cmpq(Operand(rsp, 0), kScratchRegister);
     Check(not_equal, "code object not properly patched");
@@ -2320,7 +3154,7 @@
   // Check the context is a global context.
   if (emit_debug_code()) {
     Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
-        FACTORY->global_context_map());
+        isolate()->factory()->global_context_map());
     Check(equal, "JSGlobalObject::global_context should be a global context.");
   }
 
@@ -2822,7 +3656,7 @@
   movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   if (emit_debug_code()) {
     Label ok, fail;
-    CheckMap(map, FACTORY->meta_map(), &fail, false);
+    CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
     jmp(&ok);
     bind(&fail);
     Abort("Global functions must have initial map");
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 4c17720..16f6d8d 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -29,6 +29,7 @@
 #define V8_X64_MACRO_ASSEMBLER_X64_H_
 
 #include "assembler.h"
+#include "v8globals.h"
 
 namespace v8 {
 namespace internal {
@@ -44,6 +45,7 @@
   RESULT_CONTAINS_TOP = 1 << 1
 };
 
+
 // Default scratch register used by MacroAssembler (and other code that needs
 // a spare register). The register isn't callee save, and not used by the
 // function calling convention.
@@ -61,7 +63,6 @@
 
 // Forward declaration.
 class JumpTarget;
-class CallWrapper;
 
 struct SmiIndex {
   SmiIndex(Register index_register, ScaleFactor scale)
@@ -146,11 +147,11 @@
   // Check if object is in new space. The condition cc can be equal or
   // not_equal. If it is equal a jump will be done if the object is on new
   // space. The register scratch can be object itself, but it will be clobbered.
-  template <typename LabelType>
   void InNewSpace(Register object,
                   Register scratch,
                   Condition cc,
-                  LabelType* branch);
+                  Label* branch,
+                  Label::Distance near_jump = Label::kFar);
 
   // For page containing |object| mark region covering [object+offset]
   // dirty. |object| is the object being stored into, |value| is the
@@ -240,37 +241,46 @@
   // ---------------------------------------------------------------------------
   // JavaScript invokes
 
+  // Setup call kind marking in rcx. The method takes rcx as an
+  // explicit first parameter to make the code more readable at the
+  // call sites.
+  void SetCallKind(Register dst, CallKind kind);
+
   // Invoke the JavaScript function code by either calling or jumping.
   void InvokeCode(Register code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
                   InvokeFlag flag,
-                  CallWrapper* call_wrapper = NULL);
+                  const CallWrapper& call_wrapper,
+                  CallKind call_kind);
 
   void InvokeCode(Handle<Code> code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
                   RelocInfo::Mode rmode,
                   InvokeFlag flag,
-                  CallWrapper* call_wrapper = NULL);
+                  const CallWrapper& call_wrapper,
+                  CallKind call_kind);
 
   // Invoke the JavaScript function in the given register. Changes the
   // current context to the context in the function before invoking.
   void InvokeFunction(Register function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
-                      CallWrapper* call_wrapper = NULL);
+                      const CallWrapper& call_wrapper,
+                      CallKind call_kind);
 
   void InvokeFunction(JSFunction* function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
-                      CallWrapper* call_wrapper = NULL);
+                      const CallWrapper& call_wrapper,
+                      CallKind call_kind);
 
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
   void InvokeBuiltin(Builtins::JavaScript id,
                      InvokeFlag flag,
-                     CallWrapper* call_wrapper = NULL);
+                     const CallWrapper& call_wrapper = NullCallWrapper());
 
   // Store the function for the given builtin in the target register.
   void GetBuiltinFunction(Register target, Builtins::JavaScript id);
@@ -327,11 +337,11 @@
   // If either argument is not a smi, jump to on_not_smis and retain
   // the original values of source registers. The destination register
   // may be changed if it's not one of the source registers.
-  template <typename LabelType>
   void SmiOrIfSmis(Register dst,
                    Register src1,
                    Register src2,
-                   LabelType* on_not_smis);
+                   Label* on_not_smis,
+                   Label::Distance near_jump = Label::kFar);
 
 
   // Simple comparison of smis.  Both sides must be known smis to use these,
@@ -389,42 +399,45 @@
   // above with a conditional jump.
 
   // Jump if the value cannot be represented by a smi.
-  template <typename LabelType>
-  void JumpIfNotValidSmiValue(Register src, LabelType* on_invalid);
+  void JumpIfNotValidSmiValue(Register src, Label* on_invalid,
+                              Label::Distance near_jump = Label::kFar);
 
   // Jump if the unsigned integer value cannot be represented by a smi.
-  template <typename LabelType>
-  void JumpIfUIntNotValidSmiValue(Register src, LabelType* on_invalid);
+  void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid,
+                                  Label::Distance near_jump = Label::kFar);
 
   // Jump to label if the value is a tagged smi.
-  template <typename LabelType>
-  void JumpIfSmi(Register src, LabelType* on_smi);
+  void JumpIfSmi(Register src,
+                 Label* on_smi,
+                 Label::Distance near_jump = Label::kFar);
 
   // Jump to label if the value is not a tagged smi.
-  template <typename LabelType>
-  void JumpIfNotSmi(Register src, LabelType* on_not_smi);
+  void JumpIfNotSmi(Register src,
+                    Label* on_not_smi,
+                    Label::Distance near_jump = Label::kFar);
 
   // Jump to label if the value is not a non-negative tagged smi.
-  template <typename LabelType>
-  void JumpUnlessNonNegativeSmi(Register src, LabelType* on_not_smi);
+  void JumpUnlessNonNegativeSmi(Register src,
+                                Label* on_not_smi,
+                                Label::Distance near_jump = Label::kFar);
 
   // Jump to label if the value, which must be a tagged smi, has value equal
   // to the constant.
-  template <typename LabelType>
   void JumpIfSmiEqualsConstant(Register src,
                                Smi* constant,
-                               LabelType* on_equals);
+                               Label* on_equals,
+                               Label::Distance near_jump = Label::kFar);
 
   // Jump if either or both register are not smi values.
-  template <typename LabelType>
   void JumpIfNotBothSmi(Register src1,
                         Register src2,
-                        LabelType* on_not_both_smi);
+                        Label* on_not_both_smi,
+                        Label::Distance near_jump = Label::kFar);
 
   // Jump if either or both register are not non-negative smi values.
-  template <typename LabelType>
   void JumpUnlessBothNonNegativeSmi(Register src1, Register src2,
-                                    LabelType* on_not_both_smi);
+                                    Label* on_not_both_smi,
+                                    Label::Distance near_jump = Label::kFar);
 
   // Operations on tagged smi values.
 
@@ -434,11 +447,11 @@
   // Optimistically adds an integer constant to a supposed smi.
   // If the src is not a smi, or the result is not a smi, jump to
   // the label.
-  template <typename LabelType>
   void SmiTryAddConstant(Register dst,
                          Register src,
                          Smi* constant,
-                         LabelType* on_not_smi_result);
+                         Label* on_not_smi_result,
+                         Label::Distance near_jump = Label::kFar);
 
   // Add an integer constant to a tagged smi, giving a tagged smi as result.
   // No overflow testing on the result is done.
@@ -450,11 +463,11 @@
 
   // Add an integer constant to a tagged smi, giving a tagged smi as result,
   // or jumping to a label if the result cannot be represented by a smi.
-  template <typename LabelType>
   void SmiAddConstant(Register dst,
                       Register src,
                       Smi* constant,
-                      LabelType* on_not_smi_result);
+                      Label* on_not_smi_result,
+                      Label::Distance near_jump = Label::kFar);
 
   // Subtract an integer constant from a tagged smi, giving a tagged smi as
   // result. No testing on the result is done. Sets the N and Z flags
@@ -463,32 +476,32 @@
 
   // Subtract an integer constant from a tagged smi, giving a tagged smi as
   // result, or jumping to a label if the result cannot be represented by a smi.
-  template <typename LabelType>
   void SmiSubConstant(Register dst,
                       Register src,
                       Smi* constant,
-                      LabelType* on_not_smi_result);
+                      Label* on_not_smi_result,
+                      Label::Distance near_jump = Label::kFar);
 
   // Negating a smi can give a negative zero or too large positive value.
   // NOTICE: This operation jumps on success, not failure!
-  template <typename LabelType>
   void SmiNeg(Register dst,
               Register src,
-              LabelType* on_smi_result);
+              Label* on_smi_result,
+              Label::Distance near_jump = Label::kFar);
 
   // Adds smi values and return the result as a smi.
   // If dst is src1, then src1 will be destroyed, even if
   // the operation is unsuccessful.
-  template <typename LabelType>
   void SmiAdd(Register dst,
               Register src1,
               Register src2,
-              LabelType* on_not_smi_result);
-  template <typename LabelType>
+              Label* on_not_smi_result,
+              Label::Distance near_jump = Label::kFar);
   void SmiAdd(Register dst,
               Register src1,
               const Operand& src2,
-              LabelType* on_not_smi_result);
+              Label* on_not_smi_result,
+              Label::Distance near_jump = Label::kFar);
 
   void SmiAdd(Register dst,
               Register src1,
@@ -497,21 +510,21 @@
   // Subtracts smi values and return the result as a smi.
   // If dst is src1, then src1 will be destroyed, even if
   // the operation is unsuccessful.
-  template <typename LabelType>
   void SmiSub(Register dst,
               Register src1,
               Register src2,
-              LabelType* on_not_smi_result);
+              Label* on_not_smi_result,
+              Label::Distance near_jump = Label::kFar);
 
   void SmiSub(Register dst,
               Register src1,
               Register src2);
 
-  template <typename LabelType>
   void SmiSub(Register dst,
               Register src1,
               const Operand& src2,
-              LabelType* on_not_smi_result);
+              Label* on_not_smi_result,
+              Label::Distance near_jump = Label::kFar);
 
   void SmiSub(Register dst,
               Register src1,
@@ -521,27 +534,27 @@
   // if possible.
   // If dst is src1, then src1 will be destroyed, even if
   // the operation is unsuccessful.
-  template <typename LabelType>
   void SmiMul(Register dst,
               Register src1,
               Register src2,
-              LabelType* on_not_smi_result);
+              Label* on_not_smi_result,
+              Label::Distance near_jump = Label::kFar);
 
   // Divides one smi by another and returns the quotient.
   // Clobbers rax and rdx registers.
-  template <typename LabelType>
   void SmiDiv(Register dst,
               Register src1,
               Register src2,
-              LabelType* on_not_smi_result);
+              Label* on_not_smi_result,
+              Label::Distance near_jump = Label::kFar);
 
   // Divides one smi by another and returns the remainder.
   // Clobbers rax and rdx registers.
-  template <typename LabelType>
   void SmiMod(Register dst,
               Register src1,
               Register src2,
-              LabelType* on_not_smi_result);
+              Label* on_not_smi_result,
+              Label::Distance near_jump = Label::kFar);
 
   // Bitwise operations.
   void SmiNot(Register dst, Register src);
@@ -555,11 +568,11 @@
   void SmiShiftLeftConstant(Register dst,
                             Register src,
                             int shift_value);
-  template <typename LabelType>
   void SmiShiftLogicalRightConstant(Register dst,
                                   Register src,
                                   int shift_value,
-                                  LabelType* on_not_smi_result);
+                                  Label* on_not_smi_result,
+                                  Label::Distance near_jump = Label::kFar);
   void SmiShiftArithmeticRightConstant(Register dst,
                                        Register src,
                                        int shift_value);
@@ -572,11 +585,11 @@
   // Shifts a smi value to the right, shifting in zero bits at the top, and
   // returns the unsigned intepretation of the result if that is a smi.
   // Uses and clobbers rcx, so dst may not be rcx.
-  template <typename LabelType>
   void SmiShiftLogicalRight(Register dst,
                             Register src1,
                             Register src2,
-                            LabelType* on_not_smi_result);
+                            Label* on_not_smi_result,
+                            Label::Distance near_jump = Label::kFar);
   // Shifts a smi value to the right, sign extending the top, and
   // returns the signed intepretation of the result. That will always
   // be a valid smi value, since it's numerically smaller than the
@@ -590,11 +603,11 @@
 
   // Select the non-smi register of two registers where exactly one is a
   // smi. If neither are smis, jump to the failure label.
-  template <typename LabelType>
   void SelectNonSmi(Register dst,
                     Register src1,
                     Register src2,
-                    LabelType* on_not_smis);
+                    Label* on_not_smis,
+                    Label::Distance near_jump = Label::kFar);
 
   // Converts, if necessary, a smi to a combination of number and
   // multiplier to be used as a scaled index.
@@ -630,35 +643,36 @@
   // String macros.
 
   // If object is a string, its map is loaded into object_map.
-  template <typename LabelType>
   void JumpIfNotString(Register object,
                        Register object_map,
-                       LabelType* not_string);
+                       Label* not_string,
+                       Label::Distance near_jump = Label::kFar);
 
 
-  template <typename LabelType>
-  void JumpIfNotBothSequentialAsciiStrings(Register first_object,
-                                           Register second_object,
-                                           Register scratch1,
-                                           Register scratch2,
-                                           LabelType* on_not_both_flat_ascii);
+  void JumpIfNotBothSequentialAsciiStrings(
+      Register first_object,
+      Register second_object,
+      Register scratch1,
+      Register scratch2,
+      Label* on_not_both_flat_ascii,
+      Label::Distance near_jump = Label::kFar);
 
   // Check whether the instance type represents a flat ascii string. Jump to the
   // label if not. If the instance type can be scratched specify same register
   // for both instance type and scratch.
-  template <typename LabelType>
   void JumpIfInstanceTypeIsNotSequentialAscii(
       Register instance_type,
       Register scratch,
-      LabelType *on_not_flat_ascii_string);
+      Label*on_not_flat_ascii_string,
+      Label::Distance near_jump = Label::kFar);
 
-  template <typename LabelType>
   void JumpIfBothInstanceTypesAreNotSequentialAscii(
       Register first_object_instance_type,
       Register second_object_instance_type,
       Register scratch1,
       Register scratch2,
-      LabelType* on_fail);
+      Label* on_fail,
+      Label::Distance near_jump = Label::kFar);
 
   // ---------------------------------------------------------------------------
   // Macro instructions.
@@ -692,7 +706,9 @@
 
   void Call(Address destination, RelocInfo::Mode rmode);
   void Call(ExternalReference ext);
-  void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
+  void Call(Handle<Code> code_object,
+            RelocInfo::Mode rmode,
+            unsigned ast_id = kNoASTId);
 
   // The size of the code generated for different call instructions.
   int CallSize(Address destination, RelocInfo::Mode rmode) {
@@ -744,7 +760,15 @@
   void CheckMap(Register obj,
                 Handle<Map> map,
                 Label* fail,
-                bool is_heap_object);
+                SmiCheckType smi_check_type);
+
+  // Check if the map of an object is equal to a specified map and branch to a
+  // specified target if equal. Skip the smi check if not required (object is
+  // known to be a heap object)
+  void DispatchMap(Register obj,
+                   Handle<Map> map,
+                   Handle<Code> success,
+                   SmiCheckType smi_check_type);
 
   // Check if the object in register heap_object is a string. Afterwards the
   // register map contains the object map and the register instance_type
@@ -760,6 +784,15 @@
   // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
   void FCmp();
 
+  void ClampUint8(Register reg);
+
+  void ClampDoubleToUint8(XMMRegister input_reg,
+                          XMMRegister temp_xmm_reg,
+                          Register result_reg,
+                          Register temp_reg);
+
+  void LoadInstanceDescriptors(Register map, Register descriptors);
+
   // Abort execution if argument is not a number. Used in debug code.
   void AbortIfNotNumber(Register object);
 
@@ -932,7 +965,7 @@
   // Runtime calls
 
   // Call a code stub.
-  void CallStub(CodeStub* stub);
+  void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
 
   // Call a code stub and return the code object called.  Try to generate
   // the code if necessary.  Do not perform a GC but instead return a retry
@@ -1102,6 +1135,7 @@
   // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
   static int kSafepointPushRegisterIndices[Register::kNumRegisters];
   static const int kNumSafepointSavedRegisters = 11;
+  static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
 
   bool generating_stub_;
   bool allow_stub_calls_;
@@ -1118,14 +1152,15 @@
   Handle<Object> code_object_;
 
   // Helper functions for generating invokes.
-  template <typename LabelType>
   void InvokePrologue(const ParameterCount& expected,
                       const ParameterCount& actual,
                       Handle<Code> code_constant,
                       Register code_register,
-                      LabelType* done,
+                      Label* done,
                       InvokeFlag flag,
-                      CallWrapper* call_wrapper);
+                      Label::Distance near_jump = Label::kFar,
+                      const CallWrapper& call_wrapper = NullCallWrapper(),
+                      CallKind call_kind = CALL_AS_METHOD);
 
   // Activation support.
   void EnterFrame(StackFrame::Type type);
@@ -1190,21 +1225,6 @@
 };
 
 
-// Helper class for generating code or data associated with the code
-// right before or after a call instruction. As an example this can be used to
-// generate safepoint data after calls for crankshaft.
-class CallWrapper {
- public:
-  CallWrapper() { }
-  virtual ~CallWrapper() { }
-  // Called just before emitting a call. Argument is the size of the generated
-  // call code.
-  virtual void BeforeCall(int call_size) = 0;
-  // Called just after emitting a call, i.e., at the return site for the call.
-  virtual void AfterCall() = 0;
-};
-
-
 // -----------------------------------------------------------------------------
 // Static helper functions.
 
@@ -1266,751 +1286,6 @@
 #define ACCESS_MASM(masm) masm->
 #endif
 
-// -----------------------------------------------------------------------------
-// Template implementations.
-
-static int kSmiShift = kSmiTagSize + kSmiShiftSize;
-
-
-template <typename LabelType>
-void MacroAssembler::SmiNeg(Register dst,
-                            Register src,
-                            LabelType* on_smi_result) {
-  if (dst.is(src)) {
-    ASSERT(!dst.is(kScratchRegister));
-    movq(kScratchRegister, src);
-    neg(dst);  // Low 32 bits are retained as zero by negation.
-    // Test if result is zero or Smi::kMinValue.
-    cmpq(dst, kScratchRegister);
-    j(not_equal, on_smi_result);
-    movq(src, kScratchRegister);
-  } else {
-    movq(dst, src);
-    neg(dst);
-    cmpq(dst, src);
-    // If the result is zero or Smi::kMinValue, negation failed to create a smi.
-    j(not_equal, on_smi_result);
-  }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiAdd(Register dst,
-                            Register src1,
-                            Register src2,
-                            LabelType* on_not_smi_result) {
-  ASSERT_NOT_NULL(on_not_smi_result);
-  ASSERT(!dst.is(src2));
-  if (dst.is(src1)) {
-    movq(kScratchRegister, src1);
-    addq(kScratchRegister, src2);
-    j(overflow, on_not_smi_result);
-    movq(dst, kScratchRegister);
-  } else {
-    movq(dst, src1);
-    addq(dst, src2);
-    j(overflow, on_not_smi_result);
-  }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiAdd(Register dst,
-                            Register src1,
-                            const Operand& src2,
-                            LabelType* on_not_smi_result) {
-  ASSERT_NOT_NULL(on_not_smi_result);
-  if (dst.is(src1)) {
-    movq(kScratchRegister, src1);
-    addq(kScratchRegister, src2);
-    j(overflow, on_not_smi_result);
-    movq(dst, kScratchRegister);
-  } else {
-    ASSERT(!src2.AddressUsesRegister(dst));
-    movq(dst, src1);
-    addq(dst, src2);
-    j(overflow, on_not_smi_result);
-  }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiSub(Register dst,
-                            Register src1,
-                            Register src2,
-                            LabelType* on_not_smi_result) {
-  ASSERT_NOT_NULL(on_not_smi_result);
-  ASSERT(!dst.is(src2));
-  if (dst.is(src1)) {
-    cmpq(dst, src2);
-    j(overflow, on_not_smi_result);
-    subq(dst, src2);
-  } else {
-    movq(dst, src1);
-    subq(dst, src2);
-    j(overflow, on_not_smi_result);
-  }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiSub(Register dst,
-                            Register src1,
-                            const Operand& src2,
-                            LabelType* on_not_smi_result) {
-  ASSERT_NOT_NULL(on_not_smi_result);
-  if (dst.is(src1)) {
-    movq(kScratchRegister, src2);
-    cmpq(src1, kScratchRegister);
-    j(overflow, on_not_smi_result);
-    subq(src1, kScratchRegister);
-  } else {
-    movq(dst, src1);
-    subq(dst, src2);
-    j(overflow, on_not_smi_result);
-  }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiMul(Register dst,
-                            Register src1,
-                            Register src2,
-                            LabelType* on_not_smi_result) {
-  ASSERT(!dst.is(src2));
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src1.is(kScratchRegister));
-  ASSERT(!src2.is(kScratchRegister));
-
-  if (dst.is(src1)) {
-    NearLabel failure, zero_correct_result;
-    movq(kScratchRegister, src1);  // Create backup for later testing.
-    SmiToInteger64(dst, src1);
-    imul(dst, src2);
-    j(overflow, &failure);
-
-    // Check for negative zero result.  If product is zero, and one
-    // argument is negative, go to slow case.
-    NearLabel correct_result;
-    testq(dst, dst);
-    j(not_zero, &correct_result);
-
-    movq(dst, kScratchRegister);
-    xor_(dst, src2);
-    j(positive, &zero_correct_result);  // Result was positive zero.
-
-    bind(&failure);  // Reused failure exit, restores src1.
-    movq(src1, kScratchRegister);
-    jmp(on_not_smi_result);
-
-    bind(&zero_correct_result);
-    Set(dst, 0);
-
-    bind(&correct_result);
-  } else {
-    SmiToInteger64(dst, src1);
-    imul(dst, src2);
-    j(overflow, on_not_smi_result);
-    // Check for negative zero result.  If product is zero, and one
-    // argument is negative, go to slow case.
-    NearLabel correct_result;
-    testq(dst, dst);
-    j(not_zero, &correct_result);
-    // One of src1 and src2 is zero, the check whether the other is
-    // negative.
-    movq(kScratchRegister, src1);
-    xor_(kScratchRegister, src2);
-    j(negative, on_not_smi_result);
-    bind(&correct_result);
-  }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiTryAddConstant(Register dst,
-                                       Register src,
-                                       Smi* constant,
-                                       LabelType* on_not_smi_result) {
-  // Does not assume that src is a smi.
-  ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
-  ASSERT_EQ(0, kSmiTag);
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src.is(kScratchRegister));
-
-  JumpIfNotSmi(src, on_not_smi_result);
-  Register tmp = (dst.is(src) ? kScratchRegister : dst);
-  LoadSmiConstant(tmp, constant);
-  addq(tmp, src);
-  j(overflow, on_not_smi_result);
-  if (dst.is(src)) {
-    movq(dst, tmp);
-  }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiAddConstant(Register dst,
-                                    Register src,
-                                    Smi* constant,
-                                    LabelType* on_not_smi_result) {
-  if (constant->value() == 0) {
-    if (!dst.is(src)) {
-      movq(dst, src);
-    }
-  } else if (dst.is(src)) {
-    ASSERT(!dst.is(kScratchRegister));
-
-    LoadSmiConstant(kScratchRegister, constant);
-    addq(kScratchRegister, src);
-    j(overflow, on_not_smi_result);
-    movq(dst, kScratchRegister);
-  } else {
-    LoadSmiConstant(dst, constant);
-    addq(dst, src);
-    j(overflow, on_not_smi_result);
-  }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiSubConstant(Register dst,
-                                    Register src,
-                                    Smi* constant,
-                                    LabelType* on_not_smi_result) {
-  if (constant->value() == 0) {
-    if (!dst.is(src)) {
-      movq(dst, src);
-    }
-  } else if (dst.is(src)) {
-    ASSERT(!dst.is(kScratchRegister));
-    if (constant->value() == Smi::kMinValue) {
-      // Subtracting min-value from any non-negative value will overflow.
-      // We test the non-negativeness before doing the subtraction.
-      testq(src, src);
-      j(not_sign, on_not_smi_result);
-      LoadSmiConstant(kScratchRegister, constant);
-      subq(dst, kScratchRegister);
-    } else {
-      // Subtract by adding the negation.
-      LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
-      addq(kScratchRegister, dst);
-      j(overflow, on_not_smi_result);
-      movq(dst, kScratchRegister);
-    }
-  } else {
-    if (constant->value() == Smi::kMinValue) {
-      // Subtracting min-value from any non-negative value will overflow.
-      // We test the non-negativeness before doing the subtraction.
-      testq(src, src);
-      j(not_sign, on_not_smi_result);
-      LoadSmiConstant(dst, constant);
-      // Adding and subtracting the min-value gives the same result, it only
-      // differs on the overflow bit, which we don't check here.
-      addq(dst, src);
-    } else {
-      // Subtract by adding the negation.
-      LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
-      addq(dst, src);
-      j(overflow, on_not_smi_result);
-    }
-  }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiDiv(Register dst,
-                            Register src1,
-                            Register src2,
-                            LabelType* on_not_smi_result) {
-  ASSERT(!src1.is(kScratchRegister));
-  ASSERT(!src2.is(kScratchRegister));
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src2.is(rax));
-  ASSERT(!src2.is(rdx));
-  ASSERT(!src1.is(rdx));
-
-  // Check for 0 divisor (result is +/-Infinity).
-  NearLabel positive_divisor;
-  testq(src2, src2);
-  j(zero, on_not_smi_result);
-
-  if (src1.is(rax)) {
-    movq(kScratchRegister, src1);
-  }
-  SmiToInteger32(rax, src1);
-  // We need to rule out dividing Smi::kMinValue by -1, since that would
-  // overflow in idiv and raise an exception.
-  // We combine this with negative zero test (negative zero only happens
-  // when dividing zero by a negative number).
-
-  // We overshoot a little and go to slow case if we divide min-value
-  // by any negative value, not just -1.
-  NearLabel safe_div;
-  testl(rax, Immediate(0x7fffffff));
-  j(not_zero, &safe_div);
-  testq(src2, src2);
-  if (src1.is(rax)) {
-    j(positive, &safe_div);
-    movq(src1, kScratchRegister);
-    jmp(on_not_smi_result);
-  } else {
-    j(negative, on_not_smi_result);
-  }
-  bind(&safe_div);
-
-  SmiToInteger32(src2, src2);
-  // Sign extend src1 into edx:eax.
-  cdq();
-  idivl(src2);
-  Integer32ToSmi(src2, src2);
-  // Check that the remainder is zero.
-  testl(rdx, rdx);
-  if (src1.is(rax)) {
-    NearLabel smi_result;
-    j(zero, &smi_result);
-    movq(src1, kScratchRegister);
-    jmp(on_not_smi_result);
-    bind(&smi_result);
-  } else {
-    j(not_zero, on_not_smi_result);
-  }
-  if (!dst.is(src1) && src1.is(rax)) {
-    movq(src1, kScratchRegister);
-  }
-  Integer32ToSmi(dst, rax);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiMod(Register dst,
-                            Register src1,
-                            Register src2,
-                            LabelType* on_not_smi_result) {
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src1.is(kScratchRegister));
-  ASSERT(!src2.is(kScratchRegister));
-  ASSERT(!src2.is(rax));
-  ASSERT(!src2.is(rdx));
-  ASSERT(!src1.is(rdx));
-  ASSERT(!src1.is(src2));
-
-  testq(src2, src2);
-  j(zero, on_not_smi_result);
-
-  if (src1.is(rax)) {
-    movq(kScratchRegister, src1);
-  }
-  SmiToInteger32(rax, src1);
-  SmiToInteger32(src2, src2);
-
-  // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
-  NearLabel safe_div;
-  cmpl(rax, Immediate(Smi::kMinValue));
-  j(not_equal, &safe_div);
-  cmpl(src2, Immediate(-1));
-  j(not_equal, &safe_div);
-  // Retag inputs and go slow case.
-  Integer32ToSmi(src2, src2);
-  if (src1.is(rax)) {
-    movq(src1, kScratchRegister);
-  }
-  jmp(on_not_smi_result);
-  bind(&safe_div);
-
-  // Sign extend eax into edx:eax.
-  cdq();
-  idivl(src2);
-  // Restore smi tags on inputs.
-  Integer32ToSmi(src2, src2);
-  if (src1.is(rax)) {
-    movq(src1, kScratchRegister);
-  }
-  // Check for a negative zero result.  If the result is zero, and the
-  // dividend is negative, go slow to return a floating point negative zero.
-  NearLabel smi_result;
-  testl(rdx, rdx);
-  j(not_zero, &smi_result);
-  testq(src1, src1);
-  j(negative, on_not_smi_result);
-  bind(&smi_result);
-  Integer32ToSmi(dst, rdx);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiShiftLogicalRightConstant(
-    Register dst, Register src, int shift_value, LabelType* on_not_smi_result) {
-  // Logic right shift interprets its result as an *unsigned* number.
-  if (dst.is(src)) {
-    UNIMPLEMENTED();  // Not used.
-  } else {
-    movq(dst, src);
-    if (shift_value == 0) {
-      testq(dst, dst);
-      j(negative, on_not_smi_result);
-    }
-    shr(dst, Immediate(shift_value + kSmiShift));
-    shl(dst, Immediate(kSmiShift));
-  }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiShiftLogicalRight(Register dst,
-                                          Register src1,
-                                          Register src2,
-                                          LabelType* on_not_smi_result) {
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src1.is(kScratchRegister));
-  ASSERT(!src2.is(kScratchRegister));
-  ASSERT(!dst.is(rcx));
-  // dst and src1 can be the same, because the one case that bails out
-  // is a shift by 0, which leaves dst, and therefore src1, unchanged.
-  NearLabel result_ok;
-  if (src1.is(rcx) || src2.is(rcx)) {
-    movq(kScratchRegister, rcx);
-  }
-  if (!dst.is(src1)) {
-    movq(dst, src1);
-  }
-  SmiToInteger32(rcx, src2);
-  orl(rcx, Immediate(kSmiShift));
-  shr_cl(dst);  // Shift is rcx modulo 0x1f + 32.
-  shl(dst, Immediate(kSmiShift));
-  testq(dst, dst);
-  if (src1.is(rcx) || src2.is(rcx)) {
-    NearLabel positive_result;
-    j(positive, &positive_result);
-    if (src1.is(rcx)) {
-      movq(src1, kScratchRegister);
-    } else {
-      movq(src2, kScratchRegister);
-    }
-    jmp(on_not_smi_result);
-    bind(&positive_result);
-  } else {
-    j(negative, on_not_smi_result);  // src2 was zero and src1 negative.
-  }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SelectNonSmi(Register dst,
-                                  Register src1,
-                                  Register src2,
-                                  LabelType* on_not_smis) {
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src1.is(kScratchRegister));
-  ASSERT(!src2.is(kScratchRegister));
-  ASSERT(!dst.is(src1));
-  ASSERT(!dst.is(src2));
-  // Both operands must not be smis.
-#ifdef DEBUG
-  if (allow_stub_calls()) {  // Check contains a stub call.
-    Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
-    Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
-  }
-#endif
-  ASSERT_EQ(0, kSmiTag);
-  ASSERT_EQ(0, Smi::FromInt(0));
-  movl(kScratchRegister, Immediate(kSmiTagMask));
-  and_(kScratchRegister, src1);
-  testl(kScratchRegister, src2);
-  // If non-zero then both are smis.
-  j(not_zero, on_not_smis);
-
-  // Exactly one operand is a smi.
-  ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
-  // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
-  subq(kScratchRegister, Immediate(1));
-  // If src1 is a smi, then scratch register all 1s, else it is all 0s.
-  movq(dst, src1);
-  xor_(dst, src2);
-  and_(dst, kScratchRegister);
-  // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
-  xor_(dst, src1);
-  // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfSmi(Register src, LabelType* on_smi) {
-  ASSERT_EQ(0, kSmiTag);
-  Condition smi = CheckSmi(src);
-  j(smi, on_smi);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfNotSmi(Register src, LabelType* on_not_smi) {
-  Condition smi = CheckSmi(src);
-  j(NegateCondition(smi), on_not_smi);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpUnlessNonNegativeSmi(
-    Register src, LabelType* on_not_smi_or_negative) {
-  Condition non_negative_smi = CheckNonNegativeSmi(src);
-  j(NegateCondition(non_negative_smi), on_not_smi_or_negative);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
-                                             Smi* constant,
-                                             LabelType* on_equals) {
-  SmiCompare(src, constant);
-  j(equal, on_equals);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfNotValidSmiValue(Register src,
-                                            LabelType* on_invalid) {
-  Condition is_valid = CheckInteger32ValidSmiValue(src);
-  j(NegateCondition(is_valid), on_invalid);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
-                                                LabelType* on_invalid) {
-  Condition is_valid = CheckUInteger32ValidSmiValue(src);
-  j(NegateCondition(is_valid), on_invalid);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfNotBothSmi(Register src1,
-                                      Register src2,
-                                      LabelType* on_not_both_smi) {
-  Condition both_smi = CheckBothSmi(src1, src2);
-  j(NegateCondition(both_smi), on_not_both_smi);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
-                                                  Register src2,
-                                                  LabelType* on_not_both_smi) {
-  Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
-  j(NegateCondition(both_smi), on_not_both_smi);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
-                                 LabelType* on_not_smis) {
-  if (dst.is(src1) || dst.is(src2)) {
-    ASSERT(!src1.is(kScratchRegister));
-    ASSERT(!src2.is(kScratchRegister));
-    movq(kScratchRegister, src1);
-    or_(kScratchRegister, src2);
-    JumpIfNotSmi(kScratchRegister, on_not_smis);
-    movq(dst, kScratchRegister);
-  } else {
-    movq(dst, src1);
-    or_(dst, src2);
-    JumpIfNotSmi(dst, on_not_smis);
-  }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfNotString(Register object,
-                                     Register object_map,
-                                     LabelType* not_string) {
-  Condition is_smi = CheckSmi(object);
-  j(is_smi, not_string);
-  CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
-  j(above_equal, not_string);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
-                                                         Register second_object,
-                                                         Register scratch1,
-                                                         Register scratch2,
-                                                         LabelType* on_fail) {
-  // Check that both objects are not smis.
-  Condition either_smi = CheckEitherSmi(first_object, second_object);
-  j(either_smi, on_fail);
-
-  // Load instance type for both strings.
-  movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
-  movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
-  movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
-  movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
-
-  // Check that both are flat ascii strings.
-  ASSERT(kNotStringTag != 0);
-  const int kFlatAsciiStringMask =
-      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
-
-  andl(scratch1, Immediate(kFlatAsciiStringMask));
-  andl(scratch2, Immediate(kFlatAsciiStringMask));
-  // Interleave the bits to check both scratch1 and scratch2 in one test.
-  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
-  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
-  cmpl(scratch1,
-       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
-  j(not_equal, on_fail);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
-    Register instance_type,
-    Register scratch,
-    LabelType *failure) {
-  if (!scratch.is(instance_type)) {
-    movl(scratch, instance_type);
-  }
-
-  const int kFlatAsciiStringMask =
-      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-
-  andl(scratch, Immediate(kFlatAsciiStringMask));
-  cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
-  j(not_equal, failure);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
-    Register first_object_instance_type,
-    Register second_object_instance_type,
-    Register scratch1,
-    Register scratch2,
-    LabelType* on_fail) {
-  // Load instance type for both strings.
-  movq(scratch1, first_object_instance_type);
-  movq(scratch2, second_object_instance_type);
-
-  // Check that both are flat ascii strings.
-  ASSERT(kNotStringTag != 0);
-  const int kFlatAsciiStringMask =
-      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
-
-  andl(scratch1, Immediate(kFlatAsciiStringMask));
-  andl(scratch2, Immediate(kFlatAsciiStringMask));
-  // Interleave the bits to check both scratch1 and scratch2 in one test.
-  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
-  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
-  cmpl(scratch1,
-       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
-  j(not_equal, on_fail);
-}
-
-
-template <typename LabelType>
-void MacroAssembler::InNewSpace(Register object,
-                                Register scratch,
-                                Condition cc,
-                                LabelType* branch) {
-  if (Serializer::enabled()) {
-    // Can't do arithmetic on external references if it might get serialized.
-    // The mask isn't really an address.  We load it as an external reference in
-    // case the size of the new space is different between the snapshot maker
-    // and the running system.
-    if (scratch.is(object)) {
-      movq(kScratchRegister, ExternalReference::new_space_mask(isolate()));
-      and_(scratch, kScratchRegister);
-    } else {
-      movq(scratch, ExternalReference::new_space_mask(isolate()));
-      and_(scratch, object);
-    }
-    movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
-    cmpq(scratch, kScratchRegister);
-    j(cc, branch);
-  } else {
-    ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
-    intptr_t new_space_start =
-        reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
-    movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
-    if (scratch.is(object)) {
-      addq(scratch, kScratchRegister);
-    } else {
-      lea(scratch, Operand(object, kScratchRegister, times_1, 0));
-    }
-    and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
-    j(cc, branch);
-  }
-}
-
-
-template <typename LabelType>
-void MacroAssembler::InvokePrologue(const ParameterCount& expected,
-                                    const ParameterCount& actual,
-                                    Handle<Code> code_constant,
-                                    Register code_register,
-                                    LabelType* done,
-                                    InvokeFlag flag,
-                                    CallWrapper* call_wrapper) {
-  bool definitely_matches = false;
-  NearLabel invoke;
-  if (expected.is_immediate()) {
-    ASSERT(actual.is_immediate());
-    if (expected.immediate() == actual.immediate()) {
-      definitely_matches = true;
-    } else {
-      Set(rax, actual.immediate());
-      if (expected.immediate() ==
-              SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
-        // Don't worry about adapting arguments for built-ins that
-        // don't want that done. Skip adaption code by making it look
-        // like we have a match between expected and actual number of
-        // arguments.
-        definitely_matches = true;
-      } else {
-        Set(rbx, expected.immediate());
-      }
-    }
-  } else {
-    if (actual.is_immediate()) {
-      // Expected is in register, actual is immediate. This is the
-      // case when we invoke function values without going through the
-      // IC mechanism.
-      cmpq(expected.reg(), Immediate(actual.immediate()));
-      j(equal, &invoke);
-      ASSERT(expected.reg().is(rbx));
-      Set(rax, actual.immediate());
-    } else if (!expected.reg().is(actual.reg())) {
-      // Both expected and actual are in (different) registers. This
-      // is the case when we invoke functions using call and apply.
-      cmpq(expected.reg(), actual.reg());
-      j(equal, &invoke);
-      ASSERT(actual.reg().is(rax));
-      ASSERT(expected.reg().is(rbx));
-    }
-  }
-
-  if (!definitely_matches) {
-    Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
-    if (!code_constant.is_null()) {
-      movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
-      addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
-    } else if (!code_register.is(rdx)) {
-      movq(rdx, code_register);
-    }
-
-    if (flag == CALL_FUNCTION) {
-      if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(adaptor));
-      Call(adaptor, RelocInfo::CODE_TARGET);
-      if (call_wrapper != NULL) call_wrapper->AfterCall();
-      jmp(done);
-    } else {
-      Jump(adaptor, RelocInfo::CODE_TARGET);
-    }
-    bind(&invoke);
-  }
-}
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_X64_MACRO_ASSEMBLER_X64_H_
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index c16da94..2ea17f0 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -1065,9 +1065,9 @@
 
 
 void RegExpMacroAssemblerX64::SetCurrentPositionFromEnd(int by) {
-  NearLabel after_position;
+  Label after_position;
   __ cmpq(rdi, Immediate(-by * char_size()));
-  __ j(greater_equal, &after_position);
+  __ j(greater_equal, &after_position, Label::kNear);
   __ movq(rdi, Immediate(-by * char_size()));
   // On RegExp code entry (where this operation is used), the character before
   // the current position is expected to be already loaded.
diff --git a/src/x64/simulator-x64.h b/src/x64/simulator-x64.h
index cfaa5b8..df8423a 100644
--- a/src/x64/simulator-x64.h
+++ b/src/x64/simulator-x64.h
@@ -55,7 +55,8 @@
 // just use the C stack limit.
 class SimulatorStack : public v8::internal::AllStatic {
  public:
-  static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
+  static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
+                                            uintptr_t c_limit) {
     return c_limit;
   }
 
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index c19d29d..dae4a55 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -82,18 +82,18 @@
 // must always call a backup property check that is complete.
 // This function is safe to call if the receiver has fast properties.
 // Name must be a symbol and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
-                                             Label* miss_label,
-                                             Register receiver,
-                                             String* name,
-                                             Register r0,
-                                             Register r1) {
+MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
+    MacroAssembler* masm,
+    Label* miss_label,
+    Register receiver,
+    String* name,
+    Register r0,
+    Register r1) {
   ASSERT(name->IsSymbol());
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->negative_lookups(), 1);
   __ IncrementCounter(counters->negative_lookups_miss(), 1);
 
-  Label done;
   __ movq(r0, FieldOperand(receiver, HeapObject::kMapOffset));
 
   const int kInterceptorOrAccessCheckNeededMask =
@@ -117,64 +117,20 @@
                  Heap::kHashTableMapRootIndex);
   __ j(not_equal, miss_label);
 
-  // Compute the capacity mask.
-  const int kCapacityOffset =
-      StringDictionary::kHeaderSize +
-      StringDictionary::kCapacityIndex * kPointerSize;
-
-  // Generate an unrolled loop that performs a few probes before
-  // giving up.
-  static const int kProbes = 4;
-  const int kElementsStartOffset =
-      StringDictionary::kHeaderSize +
-      StringDictionary::kElementsStartIndex * kPointerSize;
-
-  // If names of slots in range from 1 to kProbes - 1 for the hash value are
-  // not equal to the name and kProbes-th slot is not used (its name is the
-  // undefined value), it guarantees the hash table doesn't contain the
-  // property. It's true even if some slots represent deleted properties
-  // (their names are the null value).
-  for (int i = 0; i < kProbes; i++) {
-    // r0 points to properties hash.
-    // Compute the masked index: (hash + i + i * i) & mask.
-    Register index = r1;
-    // Capacity is smi 2^n.
-    __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
-    __ decl(index);
-    __ and_(index,
-            Immediate(name->Hash() + StringDictionary::GetProbeOffset(i)));
-
-    // Scale the index by multiplying by the entry size.
-    ASSERT(StringDictionary::kEntrySize == 3);
-    __ lea(index, Operand(index, index, times_2, 0));  // index *= 3.
-
-    Register entity_name = r1;
-    // Having undefined at this place means the name is not contained.
-    ASSERT_EQ(kSmiTagSize, 1);
-    __ movq(entity_name, Operand(properties, index, times_pointer_size,
-                                 kElementsStartOffset - kHeapObjectTag));
-    __ Cmp(entity_name, masm->isolate()->factory()->undefined_value());
-    // __ jmp(miss_label);
-    if (i != kProbes - 1) {
-      __ j(equal, &done);
-
-      // Stop if found the property.
-      __ Cmp(entity_name, Handle<String>(name));
-      __ j(equal, miss_label);
-
-      // Check if the entry name is not a symbol.
-      __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
-      __ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
-               Immediate(kIsSymbolMask));
-      __ j(zero, miss_label);
-    } else {
-      // Give up probing if still not found the undefined value.
-      __ j(not_equal, miss_label);
-    }
-  }
+  Label done;
+  MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
+      masm,
+      miss_label,
+      &done,
+      properties,
+      name,
+      r1);
+  if (result->IsFailure()) return result;
 
   __ bind(&done);
   __ DecrementCounter(counters->negative_lookups_miss(), 1);
+
+  return result;
 }
 
 
@@ -522,10 +478,12 @@
  public:
   CallInterceptorCompiler(StubCompiler* stub_compiler,
                           const ParameterCount& arguments,
-                          Register name)
+                          Register name,
+                          Code::ExtraICState extra_ic_state)
       : stub_compiler_(stub_compiler),
         arguments_(arguments),
-        name_(name) {}
+        name_(name),
+        extra_ic_state_(extra_ic_state) {}
 
   MaybeObject* Compile(MacroAssembler* masm,
                        JSObject* object,
@@ -650,8 +608,11 @@
                                                 arguments_.immediate());
       if (result->IsFailure()) return result;
     } else {
+      CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+          ? CALL_AS_FUNCTION
+          : CALL_AS_METHOD;
       __ InvokeFunction(optimization.constant_function(), arguments_,
-                        JUMP_FUNCTION);
+                        JUMP_FUNCTION, NullCallWrapper(), call_kind);
     }
 
     // Deferred code for fast API call case---clean preallocated space.
@@ -730,6 +691,7 @@
   StubCompiler* stub_compiler_;
   const ParameterCount& arguments_;
   Register name_;
+  Code::ExtraICState extra_ic_state_;
 };
 
 
@@ -747,6 +709,14 @@
 }
 
 
+void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
+  Code* code = masm->isolate()->builtins()->builtin(
+      Builtins::kKeyedLoadIC_MissForceGeneric);
+  Handle<Code> ic(code);
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+}
+
+
 // Both name_reg and receiver_reg are preserved on jumps to miss_label,
 // but may be destroyed if store is successful.
 void StubCompiler::GenerateStoreField(MacroAssembler* masm,
@@ -907,12 +877,17 @@
       ASSERT(current->property_dictionary()->FindEntry(name) ==
              StringDictionary::kNotFound);
 
-      GenerateDictionaryNegativeLookup(masm(),
-                                       miss,
-                                       reg,
-                                       name,
-                                       scratch1,
-                                       scratch2);
+      MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
+                                                                      miss,
+                                                                      reg,
+                                                                      name,
+                                                                      scratch1,
+                                                                      scratch2);
+      if (negative_lookup->IsFailure()) {
+        set_failure(Failure::cast(negative_lookup));
+        return reg;
+      }
+
       __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
       reg = holder_reg;  // from now the object is in holder_reg
       __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
@@ -1325,8 +1300,10 @@
 
 
 MaybeObject* CallStubCompiler::GenerateMissBranch() {
-  MaybeObject* maybe_obj = isolate()->stub_cache()->ComputeCallMiss(
-      arguments().immediate(), kind_);
+  MaybeObject* maybe_obj =
+      isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
+                                               kind_,
+                                               extra_ic_state_);
   Object* obj;
   if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
@@ -1377,7 +1354,11 @@
   }
 
   // Invoke the function.
-  __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION);
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
+                    NullCallWrapper(), call_kind);
 
   // Handle call cache miss.
   __ bind(&miss);
@@ -1657,7 +1638,9 @@
   Label index_out_of_range;
   Label* index_out_of_range_label = &index_out_of_range;
 
-  if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
+  if (kind_ == Code::CALL_IC &&
+      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+       DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
 
@@ -1739,7 +1722,9 @@
   Label index_out_of_range;
   Label* index_out_of_range_label = &index_out_of_range;
 
-  if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
+  if (kind_ == Code::CALL_IC &&
+      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+       DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
 
@@ -1856,7 +1841,11 @@
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
+                    NullCallWrapper(), call_kind);
 
   __ bind(&miss);
   // rcx: function name.
@@ -1944,7 +1933,7 @@
 
   // Check if the argument is a heap number and load its value.
   __ bind(&not_smi);
-  __ CheckMap(rax, factory()->heap_number_map(), &slow, true);
+  __ CheckMap(rax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
   __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
 
   // Check the sign of the argument. If the argument is positive,
@@ -1969,7 +1958,11 @@
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
+                    NullCallWrapper(), call_kind);
 
   __ bind(&miss);
   // rcx: function name.
@@ -1993,6 +1986,7 @@
   // repatch it to global receiver.
   if (object->IsGlobalObject()) return heap()->undefined_value();
   if (cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSObject()) return heap()->undefined_value();
   int depth = optimization.GetPrototypeDepthOfExpectedType(
             JSObject::cast(object), holder);
   if (depth == kInvalidProtoDepth) return heap()->undefined_value();
@@ -2162,7 +2156,11 @@
       UNREACHABLE();
   }
 
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
+                    NullCallWrapper(), call_kind);
 
   // Handle call cache miss.
   __ bind(&miss);
@@ -2199,7 +2197,7 @@
   // Get the receiver from the stack.
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
-  CallInterceptorCompiler compiler(this, arguments(), rcx);
+  CallInterceptorCompiler compiler(this, arguments(), rcx, extra_ic_state_);
   MaybeObject* result = compiler.Compile(masm(),
                                          object,
                                          holder,
@@ -2229,7 +2227,11 @@
 
   // Invoke the function.
   __ movq(rdi, rax);
-  __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION);
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
+                    NullCallWrapper(), call_kind);
 
   // Handle load cache miss.
   __ bind(&miss);
@@ -2290,16 +2292,21 @@
   __ IncrementCounter(counters->call_global_inline(), 1);
   ASSERT(function->is_compiled());
   ParameterCount expected(function->shared()->formal_parameter_count());
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
   if (V8::UseCrankshaft()) {
     // TODO(kasperl): For now, we always call indirectly through the
     // code field in the function to allow recompilation to take effect
     // without changing any of the call sites.
     __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
-    __ InvokeCode(rdx, expected, arguments(), JUMP_FUNCTION);
+    __ InvokeCode(rdx, expected, arguments(), JUMP_FUNCTION,
+                  NullCallWrapper(), call_kind);
   } else {
     Handle<Code> code(function->code());
     __ InvokeCode(code, expected, arguments(),
-                  RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+                  RelocInfo::CODE_TARGET, JUMP_FUNCTION,
+                  NullCallWrapper(), call_kind);
   }
   // Handle call cache miss.
   __ bind(&miss);
@@ -2523,8 +2530,35 @@
 }
 
 
-MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
-    JSObject* receiver) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreFastElement(
+    Map* receiver_map) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rcx    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+  MaybeObject* maybe_stub =
+      KeyedStoreFastElementStub(is_js_array).TryGetCode();
+  Code* stub;
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(rdx,
+                 Handle<Map>(receiver_map),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
+
+  Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
+MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
+    MapList* receiver_maps,
+    CodeList* handler_ics) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : key
@@ -2532,51 +2566,26 @@
   //  -- rsp[0] : return address
   // -----------------------------------
   Label miss;
-
-  // Check that the receiver isn't a smi.
   __ JumpIfSmi(rdx, &miss);
 
-  // Check that the map matches.
-  __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
-         Handle<Map>(receiver->map()));
-  __ j(not_equal, &miss);
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(rcx, &miss);
-
-  // Get the elements array and make sure it is a fast element array, not 'cow'.
-  __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
-         factory()->fixed_array_map());
-  __ j(not_equal, &miss);
-
-  // Check that the key is within bounds.
-  if (receiver->IsJSArray()) {
-    __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
-    __ j(above_equal, &miss);
-  } else {
-    __ SmiCompare(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
-    __ j(above_equal, &miss);
+  Register map_reg = rbx;
+  __ movq(map_reg, FieldOperand(rdx, HeapObject::kMapOffset));
+  int receiver_count = receiver_maps->length();
+  for (int current = 0; current < receiver_count; ++current) {
+    // Check map and tail call if there's a match
+    Handle<Map> map(receiver_maps->at(current));
+    __ Cmp(map_reg, map);
+    __ j(equal,
+         Handle<Code>(handler_ics->at(current)),
+         RelocInfo::CODE_TARGET);
   }
 
-  // Do the store and update the write barrier. Make sure to preserve
-  // the value in register eax.
-  __ movq(rdx, rax);
-  __ SmiToInteger32(rcx, rcx);
-  __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
-          rax);
-  __ RecordWrite(rdi, 0, rdx, rcx);
-
-  // Done.
-  __ ret(0);
-
-  // Handle store cache miss.
   __ bind(&miss);
   Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL);
+  return GetCode(NORMAL, NULL, MEGAMORPHIC);
 }
 
 
@@ -2590,7 +2599,7 @@
   // -----------------------------------
   Label miss;
 
-  // Chech that receiver is not a smi.
+  // Check that receiver is not a smi.
   __ JumpIfSmi(rax, &miss);
 
   // Check the maps of the full prototype chain. Also check that
@@ -2981,49 +2990,56 @@
 }
 
 
-MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadFastElement(Map* receiver_map) {
+  // ----------- S t a t e -------------
+  //  -- rax    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  MaybeObject* maybe_stub = KeyedLoadFastElementStub().TryGetCode();
+  Code* stub;
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(rdx,
+                 Handle<Map>(receiver_map),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
+
+  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
+    MapList* receiver_maps,
+    CodeList* handler_ics) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
   //  -- rsp[0] : return address
   // -----------------------------------
   Label miss;
-
-  // Check that the receiver isn't a smi.
   __ JumpIfSmi(rdx, &miss);
 
-  // Check that the map matches.
-  __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
-         Handle<Map>(receiver->map()));
-  __ j(not_equal, &miss);
+  Register map_reg = rbx;
+  __ movq(map_reg, FieldOperand(rdx, HeapObject::kMapOffset));
+  int receiver_count = receiver_maps->length();
+  for (int current = 0; current < receiver_count; ++current) {
+    // Check map and tail call if there's a match
+    Handle<Map> map(receiver_maps->at(current));
+    __ Cmp(map_reg, map);
+    __ j(equal,
+         Handle<Code>(handler_ics->at(current)),
+         RelocInfo::CODE_TARGET);
+  }
 
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(rax, &miss);
-
-  // Get the elements array.
-  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ AssertFastElements(rcx);
-
-  // Check that the key is within bounds.
-  __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
-  __ j(above_equal, &miss);
-
-  // Load the result and make sure it's not the hole.
-  SmiIndex index = masm()->SmiToIndex(rbx, rax, kPointerSizeLog2);
-  __ movq(rbx, FieldOperand(rcx,
-                            index.reg,
-                            index.scale,
-                            FixedArray::kHeaderSize));
-  __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
-  __ j(equal, &miss);
-  __ movq(rax, rbx);
-  __ ret(0);
-
-  __ bind(&miss);
+  __  bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
   // Return the generated code.
-  return GetCode(NORMAL, NULL);
+  return GetCode(NORMAL, NULL, MEGAMORPHIC);
 }
 
 
@@ -3160,30 +3176,79 @@
 }
 
 
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
-    JSObject* receiver, ExternalArrayType array_type, Code::Flags flags) {
+MaybeObject* ExternalArrayLoadStubCompiler::CompileLoad(
+    JSObject*receiver, ExternalArrayType array_type) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
   //  -- rsp[0] : return address
   // -----------------------------------
-  Label slow;
+  MaybeObject* maybe_stub =
+      KeyedLoadExternalArrayStub(array_type).TryGetCode();
+  Code* stub;
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(rdx,
+                 Handle<Map>(receiver->map()),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
 
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(rdx, &slow);
+  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode();
+}
+
+MaybeObject* ExternalArrayStoreStubCompiler::CompileStore(
+    JSObject* receiver, ExternalArrayType array_type) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rcx    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  MaybeObject* maybe_stub =
+      KeyedStoreExternalArrayStub(array_type).TryGetCode();
+  Code* stub;
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(rdx,
+                 Handle<Map>(receiver->map()),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
+
+  Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  return GetCode();
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void KeyedLoadStubCompiler::GenerateLoadExternalArray(
+    MacroAssembler* masm,
+    ExternalArrayType array_type) {
+  // ----------- S t a t e -------------
+  //  -- rax    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  Label slow, miss_force_generic;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
 
   // Check that the key is a smi.
-  __ JumpIfNotSmi(rax, &slow);
-
-  // Check that the map matches.
-  __ CheckMap(rdx, Handle<Map>(receiver->map()), &slow, false);
-  __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ JumpIfNotSmi(rax, &miss_force_generic);
 
   // Check that the index is in range.
+  __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
   __ SmiToInteger32(rcx, rax);
   __ cmpl(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset));
   // Unsigned comparison catches both negative and too-large values.
-  __ j(above_equal, &slow);
+  __ j(above_equal, &miss_force_generic);
 
   // rax: index (as a smi)
   // rdx: receiver (JSObject)
@@ -3214,6 +3279,9 @@
     case kExternalFloatArray:
       __ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0));
       break;
+    case kExternalDoubleArray:
+      __ movsd(xmm0, Operand(rbx, rcx, times_8, 0));
+      break;
     default:
       UNREACHABLE();
       break;
@@ -3231,9 +3299,9 @@
     // For the UnsignedInt array type, we need to see whether
     // the value can be represented in a Smi. If not, we need to convert
     // it to a HeapNumber.
-    NearLabel box_int;
+    Label box_int;
 
-    __ JumpIfUIntNotValidSmiValue(rcx, &box_int);
+    __ JumpIfUIntNotValidSmiValue(rcx, &box_int, Label::kNear);
 
     __ Integer32ToSmi(rax, rcx);
     __ ret(0);
@@ -3251,7 +3319,8 @@
     __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
     __ movq(rax, rcx);
     __ ret(0);
-  } else if (array_type == kExternalFloatArray) {
+  } else if (array_type == kExternalFloatArray ||
+             array_type == kExternalDoubleArray) {
     // For the floating-point array type, we need to always allocate a
     // HeapNumber.
     __ AllocateHeapNumber(rcx, rbx, &slow);
@@ -3266,7 +3335,7 @@
 
   // Slow case: Jump to runtime.
   __ bind(&slow);
-  Counters* counters = isolate()->counters();
+  Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
 
   // ----------- S t a t e -------------
@@ -3275,44 +3344,46 @@
   //  -- rsp[0]  : return address
   // -----------------------------------
 
-  __ pop(rbx);
-  __ push(rdx);  // receiver
-  __ push(rax);  // name
-  __ push(rbx);  // return address
+  Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Slow();
+  __ jmp(ic, RelocInfo::CODE_TARGET);
 
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+  // Miss case: Jump to runtime.
+  __ bind(&miss_force_generic);
 
-  // Return the generated code.
-  return GetCode(flags);
+  // ----------- S t a t e -------------
+  //  -- rax    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0]  : return address
+  // -----------------------------------
+  Handle<Code> miss_ic =
+      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
 }
 
 
-MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
-    JSObject* receiver, ExternalArrayType array_type, Code::Flags flags) {
+void KeyedStoreStubCompiler::GenerateStoreExternalArray(
+    MacroAssembler* masm,
+    ExternalArrayType array_type) {
   // ----------- S t a t e -------------
   //  -- rax     : value
   //  -- rcx     : key
   //  -- rdx     : receiver
   //  -- rsp[0]  : return address
   // -----------------------------------
-  Label slow;
+  Label slow, miss_force_generic;
 
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(rdx, &slow);
-
-  // Check that the map matches.
-  __ CheckMap(rdx, Handle<Map>(receiver->map()), &slow, false);
-  __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
 
   // Check that the key is a smi.
-  __ JumpIfNotSmi(rcx, &slow);
+  __ JumpIfNotSmi(rcx, &miss_force_generic);
 
   // Check that the index is in range.
+  __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
   __ SmiToInteger32(rdi, rcx);  // Untag the index.
   __ cmpl(rdi, FieldOperand(rbx, ExternalArray::kLengthOffset));
   // Unsigned comparison catches both negative and too-large values.
-  __ j(above_equal, &slow);
+  __ j(above_equal, &miss_force_generic);
 
   // Handle both smis and HeapNumbers in the fast path. Go to the
   // runtime for all other kinds of values.
@@ -3321,12 +3392,12 @@
   // rdx: receiver (a JSObject)
   // rbx: elements array
   // rdi: untagged key
-  NearLabel check_heap_number;
+  Label check_heap_number;
   if (array_type == kExternalPixelArray) {
     // Float to pixel conversion is only implemented in the runtime for now.
     __ JumpIfNotSmi(rax, &slow);
   } else {
-    __ JumpIfNotSmi(rax, &check_heap_number);
+    __ JumpIfNotSmi(rax, &check_heap_number, Label::kNear);
   }
   // No more branches to slow case on this path.  Key and receiver not needed.
   __ SmiToInteger32(rdx, rax);
@@ -3335,9 +3406,9 @@
   switch (array_type) {
     case kExternalPixelArray:
       {  // Clamp the value to [0..255].
-        NearLabel done;
+        Label done;
         __ testl(rdx, Immediate(0xFFFFFF00));
-        __ j(zero, &done);
+        __ j(zero, &done, Label::kNear);
         __ setcc(negative, rdx);  // 1 if negative, 0 if positive.
         __ decb(rdx);  // 0 if negative, 255 if positive.
         __ bind(&done);
@@ -3361,6 +3432,11 @@
       __ cvtlsi2ss(xmm0, rdx);
       __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
       break;
+    case kExternalDoubleArray:
+      // Need to perform int-to-float conversion.
+      __ cvtlsi2sd(xmm0, rdx);
+      __ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
+      break;
     default:
       UNREACHABLE();
       break;
@@ -3391,6 +3467,9 @@
       __ cvtsd2ss(xmm0, xmm0);
       __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
       __ ret(0);
+    } else if (array_type == kExternalDoubleArray) {
+      __ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
+      __ ret(0);
     } else {
       // Perform float-to-int conversion with truncation (round-to-zero)
       // behavior.
@@ -3438,21 +3517,116 @@
   //  -- rsp[0]  : return address
   // -----------------------------------
 
-  __ pop(rbx);
-  __ push(rdx);  // receiver
-  __ push(rcx);  // key
-  __ push(rax);  // value
-  __ Push(Smi::FromInt(NONE));   // PropertyAttributes
-  __ Push(Smi::FromInt(
-      Code::ExtractExtraICStateFromFlags(flags) & kStrictMode));
-  __ push(rbx);  // return address
+  Handle<Code> ic = masm->isolate()->builtins()->KeyedStoreIC_Slow();
+  __ jmp(ic, RelocInfo::CODE_TARGET);
 
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
+  // Miss case: call runtime.
+  __ bind(&miss_force_generic);
 
-  return GetCode(flags);
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rcx    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+
+  Handle<Code> miss_ic =
+      masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+  __ jmp(miss_ic, RelocInfo::CODE_TARGET);
 }
 
+
+void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  Label miss_force_generic;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(rax, &miss_force_generic);
+
+  // Get the elements array.
+  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ AssertFastElements(rcx);
+
+  // Check that the key is within bounds.
+  __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
+  __ j(above_equal, &miss_force_generic);
+
+  // Load the result and make sure it's not the hole.
+  SmiIndex index = masm->SmiToIndex(rbx, rax, kPointerSizeLog2);
+  __ movq(rbx, FieldOperand(rcx,
+                            index.reg,
+                            index.scale,
+                            FixedArray::kHeaderSize));
+  __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
+  __ j(equal, &miss_force_generic);
+  __ movq(rax, rbx);
+  __ ret(0);
+
+  __ bind(&miss_force_generic);
+  Code* code = masm->isolate()->builtins()->builtin(
+      Builtins::kKeyedLoadIC_MissForceGeneric);
+  Handle<Code> ic(code);
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
+                                                      bool is_js_array) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rcx    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  Label miss_force_generic;
+
+  // This stub is meant to be tail-jumped to, the receiver must already
+  // have been verified by the caller to not be a smi.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(rcx, &miss_force_generic);
+
+  // Get the elements array and make sure it is a fast element array, not 'cow'.
+  __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
+                 Heap::kFixedArrayMapRootIndex);
+  __ j(not_equal, &miss_force_generic);
+
+  // Check that the key is within bounds.
+  if (is_js_array) {
+    __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
+    __ j(above_equal, &miss_force_generic);
+  } else {
+    __ SmiCompare(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
+    __ j(above_equal, &miss_force_generic);
+  }
+
+  // Do the store and update the write barrier. Make sure to preserve
+  // the value in register eax.
+  __ movq(rdx, rax);
+  __ SmiToInteger32(rcx, rcx);
+  __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
+          rax);
+  __ RecordWrite(rdi, 0, rdx, rcx);
+
+  // Done.
+  __ ret(0);
+
+  // Handle store cache miss.
+  __ bind(&miss_force_generic);
+  Handle<Code> ic_force_generic =
+      masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+  __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/zone-inl.h b/src/zone-inl.h
index 17e83dc..6e2d558 100644
--- a/src/zone-inl.h
+++ b/src/zone-inl.h
@@ -107,9 +107,20 @@
 }
 
 
-ZoneScope::ZoneScope(ZoneScopeMode mode)
-    : isolate_(Isolate::Current()),
-      mode_(mode) {
+template <typename T>
+void* ZoneList<T>::operator new(size_t size) {
+  return ZONE->New(static_cast<int>(size));
+}
+
+
+template <typename T>
+void* ZoneList<T>::operator new(size_t size, Zone* zone) {
+  return zone->New(static_cast<int>(size));
+}
+
+
+ZoneScope::ZoneScope(Isolate* isolate, ZoneScopeMode mode)
+    : isolate_(isolate), mode_(mode) {
   isolate_->zone()->scope_nesting_++;
 }
 
diff --git a/src/zone.h b/src/zone.h
index 9efe4f5..a5e437f 100644
--- a/src/zone.h
+++ b/src/zone.h
@@ -28,6 +28,8 @@
 #ifndef V8_ZONE_H_
 #define V8_ZONE_H_
 
+#include "allocation.h"
+
 namespace v8 {
 namespace internal {
 
@@ -132,8 +134,8 @@
 class ZoneObject {
  public:
   // Allocate a new ZoneObject of 'size' bytes in the Zone.
-  inline void* operator new(size_t size);
-  inline void* operator new(size_t size, Zone* zone);
+  INLINE(void* operator new(size_t size));
+  INLINE(void* operator new(size_t size, Zone* zone));
 
   // Ideally, the delete operator should be private instead of
   // public, but unfortunately the compiler sometimes synthesizes
@@ -162,7 +164,7 @@
 class ZoneListAllocationPolicy {
  public:
   // Allocate 'size' bytes of memory in the zone.
-  static inline void* New(int size);
+  INLINE(static void* New(int size));
 
   // De-allocation attempts are silently ignored.
   static void Delete(void* p) { }
@@ -176,6 +178,9 @@
 template<typename T>
 class ZoneList: public List<T, ZoneListAllocationPolicy> {
  public:
+  INLINE(void* operator new(size_t size));
+  INLINE(void* operator new(size_t size, Zone* zone));
+
   // Construct a new ZoneList with the given capacity; the length is
   // always zero. The capacity must be non-negative.
   explicit ZoneList(int capacity)
@@ -198,8 +203,7 @@
 // outer-most scope.
 class ZoneScope BASE_EMBEDDED {
  public:
-  // TODO(isolates): pass isolate pointer here.
-  inline explicit ZoneScope(ZoneScopeMode mode);
+  INLINE(ZoneScope(Isolate* isolate, ZoneScopeMode mode));
 
   virtual ~ZoneScope();