Update V8 to r4588

We're using WebKit r58033, as used by
http://src.chromium.org/svn/releases/5.0.387.0/DEPS
This requires http://v8.googlecode.com/svn/trunk@4465 but this version has a
crashing bug for ARM. Instead we use http://v8.googlecode.com/svn/trunk@4588,
which is used by http://src.chromium.org/svn/releases/6.0.399.0/DEPS

Note that a trivial bug fix was required in arm/codegen-arm.cc. This is guarded
with ANDROID. See http://code.google.com/p/v8/issues/detail?id=703

Change-Id: I459647a8286c4f8c7405f0c5581ecbf051a6f1e8
diff --git a/src/SConscript b/src/SConscript
index 3b227c8..5add999 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -43,6 +43,7 @@
     bootstrapper.cc
     builtins.cc
     checks.cc
+    circular-queue.cc
     code-stubs.cc
     codegen.cc
     compilation-cache.cc
@@ -50,19 +51,23 @@
     contexts.cc
     conversions.cc
     counters.cc
+    cpu-profiler.cc
     data-flow.cc
     dateparser.cc
     debug-agent.cc
     debug.cc
     disassembler.cc
+    diy-fp.cc
     execution.cc
     factory.cc
     flags.cc
+    flow-graph.cc
     frame-element.cc
     frames.cc
     full-codegen.cc
     func-name-inferrer.cc
     global-handles.cc
+    fast-dtoa.cc
     handles.cc
     hashmap.cc
     heap-profiler.cc
@@ -79,6 +84,7 @@
     objects.cc
     oprofile-agent.cc
     parser.cc
+    profile-generator.cc
     property.cc
     regexp-macro-assembler-irregexp.cc
     regexp-macro-assembler.cc
@@ -96,8 +102,8 @@
     stub-cache.cc
     token.cc
     top.cc
+    type-info.cc
     unicode.cc
-    usage-analyzer.cc
     utils.cc
     v8-counters.cc
     v8.cc
@@ -105,10 +111,13 @@
     variables.cc
     version.cc
     virtual-frame.cc
+    vm-state.cc
     zone.cc
     """),
   'arch:arm': Split("""
     fast-codegen.cc
+    jump-target-light.cc
+    virtual-frame-light.cc
     arm/builtins-arm.cc
     arm/codegen-arm.cc
     arm/constants-arm.cc
@@ -152,6 +161,8 @@
     mips/virtual-frame-mips.cc
     """),
   'arch:ia32': Split("""
+    jump-target-heavy.cc
+    virtual-frame-heavy.cc
     ia32/assembler-ia32.cc
     ia32/builtins-ia32.cc
     ia32/codegen-ia32.cc
@@ -171,6 +182,8 @@
     """),
   'arch:x64': Split("""
     fast-codegen.cc
+    jump-target-heavy.cc
+    virtual-frame-heavy.cc
     x64/assembler-x64.cc
     x64/builtins-x64.cc
     x64/codegen-x64.cc
@@ -251,6 +264,7 @@
 date.js
 regexp.js
 json.js
+liveedit-debugger.js
 mirror-debugger.js
 debug-debugger.js
 '''.split()
@@ -292,7 +306,12 @@
   source_objs = context.ConfigureObject(env, source_files)
   non_snapshot_files = [dtoa_obj, source_objs]
 
-  # Create snapshot if necessary.
+  # Create snapshot if necessary.  For cross compilation you should either
+  # do without snapshots and take the performance hit or you should build a
+  # host VM with the simulator=arm and snapshot=on options and then take the
+  # resulting snapshot.cc file from obj/release and put it in the src
+  # directory.  Then rebuild the VM with the cross compiler and specify
+  # snapshot=nobuild on the scons command line.
   empty_snapshot_obj = context.ConfigureObject(env, 'snapshot-empty.cc')
   mksnapshot_env = env.Copy()
   mksnapshot_env.Replace(**context.flags['mksnapshot'])
@@ -302,7 +321,7 @@
     if context.build_snapshot:
       snapshot_cc = env.Snapshot('snapshot.cc', mksnapshot, LOGFILE=File('snapshot.log').abspath)
     else:
-      snapshot_cc = Command('snapshot.cc', [], [])
+      snapshot_cc = 'snapshot.cc'
     snapshot_obj = context.ConfigureObject(env, snapshot_cc, CPPPATH=['.'])
   else:
     snapshot_obj = empty_snapshot_obj
diff --git a/src/accessors.cc b/src/accessors.cc
index b05719e..e41db94 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -32,7 +32,6 @@
 #include "factory.h"
 #include "scopeinfo.h"
 #include "top.h"
-#include "zone-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/api.cc b/src/api.cc
index 22c5b77..4709a15 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -34,14 +34,17 @@
 #include "debug.h"
 #include "execution.h"
 #include "global-handles.h"
-#include "globals.h"
+#include "messages.h"
 #include "platform.h"
+#include "profile-generator-inl.h"
 #include "serialize.h"
 #include "snapshot.h"
+#include "top.h"
 #include "utils.h"
 #include "v8threads.h"
 #include "version.h"
 
+#include "../include/v8-profiler.h"
 
 #define LOG_API(expr) LOG(ApiEntryCall(expr))
 
@@ -439,7 +442,6 @@
 void V8::DisposeGlobal(i::Object** obj) {
   LOG_API("DisposeGlobal");
   if (!i::V8::IsRunning()) return;
-  if ((*obj)->IsGlobalContext()) i::Heap::NotifyContextDisposed();
   i::GlobalHandles::Destroy(obj);
 }
 
@@ -537,10 +539,17 @@
   LOG_API("CloseHandleScope");
 
   // Read the result before popping the handle block.
-  i::Object* result = *value;
+  i::Object* result = NULL;
+  if (value != NULL) {
+    result = *value;
+  }
   is_closed_ = true;
   i::HandleScope::Leave(&previous_);
 
+  if (value == NULL) {
+    return NULL;
+  }
+
   // Allocate a new handle on the previous handle block.
   i::Handle<i::Object> handle(result);
   return handle.location();
@@ -1136,7 +1145,7 @@
   if (pre_data_impl != NULL && !pre_data_impl->SanityCheck()) {
     pre_data_impl = NULL;
   }
-  i::Handle<i::JSFunction> boilerplate =
+  i::Handle<i::SharedFunctionInfo> result =
       i::Compiler::Compile(str,
                            name_obj,
                            line_offset,
@@ -1145,9 +1154,9 @@
                            pre_data_impl,
                            Utils::OpenHandle(*script_data),
                            i::NOT_NATIVES_CODE);
-  has_pending_exception = boilerplate.is_null();
+  has_pending_exception = result.is_null();
   EXCEPTION_BAILOUT_CHECK(Local<Script>());
-  return Local<Script>(ToApi<Script>(boilerplate));
+  return Local<Script>(ToApi<Script>(result));
 }
 
 
@@ -1168,10 +1177,12 @@
   Local<Script> generic = New(source, origin, pre_data, script_data);
   if (generic.IsEmpty())
     return generic;
-  i::Handle<i::JSFunction> boilerplate = Utils::OpenHandle(*generic);
+  i::Handle<i::Object> obj = Utils::OpenHandle(*generic);
+  i::Handle<i::SharedFunctionInfo> function =
+      i::Handle<i::SharedFunctionInfo>(i::SharedFunctionInfo::cast(*obj));
   i::Handle<i::JSFunction> result =
-      i::Factory::NewFunctionFromBoilerplate(boilerplate,
-                                             i::Top::global_context());
+      i::Factory::NewFunctionFromSharedFunctionInfo(function,
+                                                    i::Top::global_context());
   return Local<Script>(ToApi<Script>(result));
 }
 
@@ -1191,10 +1202,15 @@
   i::Object* raw_result = NULL;
   {
     HandleScope scope;
-    i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
-    if (fun->IsBoilerplate()) {
-      fun = i::Factory::NewFunctionFromBoilerplate(fun,
-                                                   i::Top::global_context());
+    i::Handle<i::Object> obj = Utils::OpenHandle(this);
+    i::Handle<i::JSFunction> fun;
+    if (obj->IsSharedFunctionInfo()) {
+      i::Handle<i::SharedFunctionInfo>
+          function_info(i::SharedFunctionInfo::cast(*obj));
+      fun = i::Factory::NewFunctionFromSharedFunctionInfo(
+          function_info, i::Top::global_context());
+    } else {
+      fun = i::Handle<i::JSFunction>(i::JSFunction::cast(*obj));
     }
     EXCEPTION_PREAMBLE();
     i::Handle<i::Object> receiver(i::Top::context()->global_proxy());
@@ -1208,14 +1224,28 @@
 }
 
 
+static i::Handle<i::SharedFunctionInfo> OpenScript(Script* script) {
+  i::Handle<i::Object> obj = Utils::OpenHandle(script);
+  i::Handle<i::SharedFunctionInfo> result;
+  if (obj->IsSharedFunctionInfo()) {
+    result =
+        i::Handle<i::SharedFunctionInfo>(i::SharedFunctionInfo::cast(*obj));
+  } else {
+    result =
+        i::Handle<i::SharedFunctionInfo>(i::JSFunction::cast(*obj)->shared());
+  }
+  return result;
+}
+
+
 Local<Value> Script::Id() {
   ON_BAILOUT("v8::Script::Id()", return Local<Value>());
   LOG_API("Script::Id");
   i::Object* raw_id = NULL;
   {
     HandleScope scope;
-    i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
-    i::Handle<i::Script> script(i::Script::cast(fun->shared()->script()));
+    i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this);
+    i::Handle<i::Script> script(i::Script::cast(function_info->script()));
     i::Handle<i::Object> id(script->id());
     raw_id = *id;
   }
@@ -1229,9 +1259,9 @@
   LOG_API("Script::SetData");
   {
     HandleScope scope;
-    i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
+    i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this);
     i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
-    i::Handle<i::Script> script(i::Script::cast(fun->shared()->script()));
+    i::Handle<i::Script> script(i::Script::cast(function_info->script()));
     script->set_data(*raw_data);
   }
 }
@@ -1577,6 +1607,18 @@
 }
 
 
+bool Value::IsUint32() const {
+  if (IsDeadCheck("v8::Value::IsUint32()")) return false;
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  if (obj->IsSmi()) return i::Smi::cast(*obj)->value() >= 0;
+  if (obj->IsNumber()) {
+    double value = obj->Number();
+    return i::FastUI2D(i::FastD2UI(value)) == value;
+  }
+  return false;
+}
+
+
 bool Value::IsDate() const {
   if (IsDeadCheck("v8::Value::IsDate()")) return false;
   i::Handle<i::Object> obj = Utils::OpenHandle(this);
@@ -1982,6 +2024,23 @@
 }
 
 
+bool v8::Object::Set(uint32_t index, v8::Handle<Value> value) {
+  ON_BAILOUT("v8::Object::Set()", return false);
+  ENTER_V8;
+  HandleScope scope;
+  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
+  EXCEPTION_PREAMBLE();
+  i::Handle<i::Object> obj = i::SetElement(
+      self,
+      index,
+      value_obj);
+  has_pending_exception = obj.is_null();
+  EXCEPTION_BAILOUT_CHECK(false);
+  return true;
+}
+
+
 bool v8::Object::ForceSet(v8::Handle<Value> key,
                           v8::Handle<Value> value,
                           v8::PropertyAttribute attribs) {
@@ -2030,6 +2089,18 @@
 }
 
 
+Local<Value> v8::Object::Get(uint32_t index) {
+  ON_BAILOUT("v8::Object::Get()", return Local<v8::Value>());
+  ENTER_V8;
+  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  EXCEPTION_PREAMBLE();
+  i::Handle<i::Object> result = i::GetElement(self, index);
+  has_pending_exception = result.is_null();
+  EXCEPTION_BAILOUT_CHECK(Local<Value>());
+  return Utils::ToLocal(result);
+}
+
+
 Local<Value> v8::Object::GetPrototype() {
   ON_BAILOUT("v8::Object::GetPrototype()", return Local<v8::Value>());
   ENTER_V8;
@@ -2570,12 +2641,20 @@
 }
 
 
-int String::WriteUtf8(char* buffer, int capacity) const {
+int String::WriteUtf8(char* buffer,
+                      int capacity,
+                      int* nchars_ref,
+                      WriteHints hints) const {
   if (IsDeadCheck("v8::String::WriteUtf8()")) return 0;
   LOG_API("String::WriteUtf8");
   ENTER_V8;
   i::Handle<i::String> str = Utils::OpenHandle(this);
   StringTracker::RecordWrite(str);
+  if (hints & HINT_MANY_WRITES_EXPECTED) {
+    // Flatten the string for efficiency.  This applies whether we are
+    // using StringInputBuffer or Get(i) to access the characters.
+    str->TryFlatten();
+  }
   write_input_buffer.Reset(0, *str);
   int len = str->length();
   // Encode the first K - 3 bytes directly into the buffer since we
@@ -2584,10 +2663,12 @@
   int fast_end = capacity - (unibrow::Utf8::kMaxEncodedSize - 1);
   int i;
   int pos = 0;
+  int nchars = 0;
   for (i = 0; i < len && (capacity == -1 || pos < fast_end); i++) {
     i::uc32 c = write_input_buffer.GetNext();
     int written = unibrow::Utf8::Encode(buffer + pos, c);
     pos += written;
+    nchars++;
   }
   if (i < len) {
     // For the last characters we need to check the length for each one
@@ -2601,28 +2682,35 @@
         for (int j = 0; j < written; j++)
           buffer[pos + j] = intermediate[j];
         pos += written;
+        nchars++;
       } else {
         // We've reached the end of the buffer
         break;
       }
     }
   }
+  if (nchars_ref != NULL) *nchars_ref = nchars;
   if (i == len && (capacity == -1 || pos < capacity))
     buffer[pos++] = '\0';
   return pos;
 }
 
 
-int String::WriteAscii(char* buffer, int start, int length) const {
+int String::WriteAscii(char* buffer,
+                       int start,
+                       int length,
+                       WriteHints hints) const {
   if (IsDeadCheck("v8::String::WriteAscii()")) return 0;
   LOG_API("String::WriteAscii");
   ENTER_V8;
   ASSERT(start >= 0 && length >= -1);
   i::Handle<i::String> str = Utils::OpenHandle(this);
   StringTracker::RecordWrite(str);
-  // Flatten the string for efficiency.  This applies whether we are
-  // using StringInputBuffer or Get(i) to access the characters.
-  str->TryFlattenIfNotFlat();
+  if (hints & HINT_MANY_WRITES_EXPECTED) {
+    // Flatten the string for efficiency.  This applies whether we are
+    // using StringInputBuffer or Get(i) to access the characters.
+    str->TryFlatten();
+  }
   int end = length;
   if ( (length == -1) || (length > str->length() - start) )
     end = str->length() - start;
@@ -2640,13 +2728,21 @@
 }
 
 
-int String::Write(uint16_t* buffer, int start, int length) const {
+int String::Write(uint16_t* buffer,
+                  int start,
+                  int length,
+                  WriteHints hints) const {
   if (IsDeadCheck("v8::String::Write()")) return 0;
   LOG_API("String::Write");
   ENTER_V8;
   ASSERT(start >= 0 && length >= -1);
   i::Handle<i::String> str = Utils::OpenHandle(this);
   StringTracker::RecordWrite(str);
+  if (hints & HINT_MANY_WRITES_EXPECTED) {
+    // Flatten the string for efficiency.  This applies whether we are
+    // using StringInputBuffer or Get(i) to access the characters.
+    str->TryFlatten();
+  }
   int end = length;
   if ( (length == -1) || (length > str->length() - start) )
     end = str->length() - start;
@@ -2735,6 +2831,17 @@
 }
 
 
+uint32_t Uint32::Value() const {
+  if (IsDeadCheck("v8::Uint32::Value()")) return 0;
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  if (obj->IsSmi()) {
+    return i::Smi::cast(*obj)->value();
+  } else {
+    return static_cast<uint32_t>(obj->Number());
+  }
+}
+
+
 int v8::Object::InternalFieldCount() {
   if (IsDeadCheck("v8::Object::InternalFieldCount()")) return 0;
   i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
@@ -2775,6 +2882,7 @@
 
 
 void v8::Object::SetPointerInInternalField(int index, void* value) {
+  ENTER_V8;
   i::Object* as_object = reinterpret_cast<i::Object*>(value);
   if (as_object->IsSmi()) {
     Utils::OpenHandle(this)->SetInternalField(index, as_object);
@@ -2828,6 +2936,12 @@
 }
 
 
+int v8::V8::ContextDisposedNotification() {
+  if (!i::V8::IsRunning()) return 0;
+  return i::Heap::NotifyContextDisposed();
+}
+
+
 const char* v8::V8::GetVersion() {
   static v8::internal::EmbeddedVector<char, 128> buffer;
   v8::internal::Version::GetString(buffer);
@@ -2855,14 +2969,6 @@
   LOG_API("Context::New");
   ON_BAILOUT("v8::Context::New()", return Persistent<Context>());
 
-#if defined(ANDROID)
-  // On mobile device, full GC is expensive, leave it to the system to
-  // decide when should make a full GC.
-#else
-  // Give the heap a chance to cleanup if we've disposed contexts.
-  i::Heap::CollectAllGarbageIfContextDisposed();
-#endif
-
   // Enter V8 via an ENTER_V8 scope.
   i::Handle<i::Context> env;
   {
@@ -3341,6 +3447,7 @@
   }
   i::Handle<i::JSObject> paragon_handle(i::JSObject::cast(paragon));
   EXCEPTION_PREAMBLE();
+  ENTER_V8;
   i::Handle<i::JSObject> result = i::Copy(paragon_handle);
   has_pending_exception = result.is_null();
   EXCEPTION_BAILOUT_CHECK(Local<Object>());
@@ -3485,6 +3592,30 @@
 }
 
 
+void V8::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
+  if (IsDeadCheck("v8::V8::AddGCPrologueCallback()")) return;
+  i::Heap::AddGCPrologueCallback(callback, gc_type);
+}
+
+
+void V8::RemoveGCPrologueCallback(GCPrologueCallback callback) {
+  if (IsDeadCheck("v8::V8::RemoveGCPrologueCallback()")) return;
+  i::Heap::RemoveGCPrologueCallback(callback);
+}
+
+
+void V8::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
+  if (IsDeadCheck("v8::V8::AddGCEpilogueCallback()")) return;
+  i::Heap::AddGCEpilogueCallback(callback, gc_type);
+}
+
+
+void V8::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
+  if (IsDeadCheck("v8::V8::RemoveGCEpilogueCallback()")) return;
+  i::Heap::RemoveGCEpilogueCallback(callback);
+}
+
+
 void V8::PauseProfiler() {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   PauseProfilerEx(PROFILER_MODULE_CPU);
@@ -3515,6 +3646,8 @@
     // those modules which haven't been started prior to making a
     // snapshot.
 
+    // Make a GC prior to taking a snapshot.
+    i::Heap::CollectAllGarbage(false);
     // Reset snapshot flag and CPU module flags.
     flags &= ~(PROFILER_MODULE_HEAP_SNAPSHOT | PROFILER_MODULE_CPU);
     const int current_flags = i::Logger::GetActiveProfilerModules();
@@ -3546,6 +3679,7 @@
 
 int V8::GetLogLines(int from_pos, char* dest_buf, int max_size) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
+  ASSERT(max_size >= kMinimumSizeForLogLinesBuffer);
   return i::Logger::GetLogLines(from_pos, dest_buf, max_size);
 #endif
   return 0;
@@ -3579,6 +3713,15 @@
 }
 
 
+bool V8::IsExecutionTerminating() {
+  if (!i::V8::IsRunning()) return false;
+  if (i::Top::has_scheduled_exception()) {
+    return i::Top::scheduled_exception() == i::Heap::termination_exception();
+  }
+  return false;
+}
+
+
 String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj) {
   EnsureInitialized("v8::String::Utf8Value::Utf8Value()");
   if (obj.IsEmpty()) {
@@ -3878,8 +4021,151 @@
   i::Execution::ProcessDebugMesssages(true);
 }
 
+Local<Context> Debug::GetDebugContext() {
+  EnsureInitialized("v8::Debug::GetDebugContext()");
+  ENTER_V8;
+  return Utils::ToLocal(i::Debugger::GetDebugContext());
+}
+
 #endif  // ENABLE_DEBUGGER_SUPPORT
 
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+Handle<String> CpuProfileNode::GetFunctionName() const {
+  IsDeadCheck("v8::CpuProfileNode::GetFunctionName");
+  const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
+  const i::CodeEntry* entry = node->entry();
+  if (!entry->has_name_prefix()) {
+    return Handle<String>(ToApi<String>(
+        i::Factory::LookupAsciiSymbol(entry->name())));
+  } else {
+    return Handle<String>(ToApi<String>(i::Factory::NewConsString(
+        i::Factory::LookupAsciiSymbol(entry->name_prefix()),
+        i::Factory::LookupAsciiSymbol(entry->name()))));
+  }
+}
+
+
+Handle<String> CpuProfileNode::GetScriptResourceName() const {
+  IsDeadCheck("v8::CpuProfileNode::GetScriptResourceName");
+  const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
+  return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
+      node->entry()->resource_name())));
+}
+
+
+int CpuProfileNode::GetLineNumber() const {
+  IsDeadCheck("v8::CpuProfileNode::GetLineNumber");
+  return reinterpret_cast<const i::ProfileNode*>(this)->entry()->line_number();
+}
+
+
+double CpuProfileNode::GetTotalTime() const {
+  IsDeadCheck("v8::CpuProfileNode::GetTotalTime");
+  return reinterpret_cast<const i::ProfileNode*>(this)->GetTotalMillis();
+}
+
+
+double CpuProfileNode::GetSelfTime() const {
+  IsDeadCheck("v8::CpuProfileNode::GetSelfTime");
+  return reinterpret_cast<const i::ProfileNode*>(this)->GetSelfMillis();
+}
+
+
+double CpuProfileNode::GetTotalSamplesCount() const {
+  IsDeadCheck("v8::CpuProfileNode::GetTotalSamplesCount");
+  return reinterpret_cast<const i::ProfileNode*>(this)->total_ticks();
+}
+
+
+double CpuProfileNode::GetSelfSamplesCount() const {
+  IsDeadCheck("v8::CpuProfileNode::GetSelfSamplesCount");
+  return reinterpret_cast<const i::ProfileNode*>(this)->self_ticks();
+}
+
+
+unsigned CpuProfileNode::GetCallUid() const {
+  IsDeadCheck("v8::CpuProfileNode::GetCallUid");
+  return reinterpret_cast<const i::ProfileNode*>(this)->entry()->call_uid();
+}
+
+
+int CpuProfileNode::GetChildrenCount() const {
+  IsDeadCheck("v8::CpuProfileNode::GetChildrenCount");
+  return reinterpret_cast<const i::ProfileNode*>(this)->children()->length();
+}
+
+
+const CpuProfileNode* CpuProfileNode::GetChild(int index) const {
+  IsDeadCheck("v8::CpuProfileNode::GetChild");
+  const i::ProfileNode* child =
+      reinterpret_cast<const i::ProfileNode*>(this)->children()->at(index);
+  return reinterpret_cast<const CpuProfileNode*>(child);
+}
+
+
+unsigned CpuProfile::GetUid() const {
+  IsDeadCheck("v8::CpuProfile::GetUid");
+  return reinterpret_cast<const i::CpuProfile*>(this)->uid();
+}
+
+
+Handle<String> CpuProfile::GetTitle() const {
+  IsDeadCheck("v8::CpuProfile::GetTitle");
+  const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
+  return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
+      profile->title())));
+}
+
+
+const CpuProfileNode* CpuProfile::GetBottomUpRoot() const {
+  IsDeadCheck("v8::CpuProfile::GetBottomUpRoot");
+  const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
+  return reinterpret_cast<const CpuProfileNode*>(profile->bottom_up()->root());
+}
+
+
+const CpuProfileNode* CpuProfile::GetTopDownRoot() const {
+  IsDeadCheck("v8::CpuProfile::GetTopDownRoot");
+  const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
+  return reinterpret_cast<const CpuProfileNode*>(profile->top_down()->root());
+}
+
+
+int CpuProfiler::GetProfilesCount() {
+  IsDeadCheck("v8::CpuProfiler::GetProfilesCount");
+  return i::CpuProfiler::GetProfilesCount();
+}
+
+
+const CpuProfile* CpuProfiler::GetProfile(int index) {
+  IsDeadCheck("v8::CpuProfiler::GetProfile");
+  return reinterpret_cast<const CpuProfile*>(i::CpuProfiler::GetProfile(index));
+}
+
+
+const CpuProfile* CpuProfiler::FindProfile(unsigned uid) {
+  IsDeadCheck("v8::CpuProfiler::FindProfile");
+  return reinterpret_cast<const CpuProfile*>(i::CpuProfiler::FindProfile(uid));
+}
+
+
+void CpuProfiler::StartProfiling(Handle<String> title) {
+  IsDeadCheck("v8::CpuProfiler::StartProfiling");
+  i::CpuProfiler::StartProfiling(*Utils::OpenHandle(*title));
+}
+
+
+const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title) {
+  IsDeadCheck("v8::CpuProfiler::StopProfiling");
+  return reinterpret_cast<const CpuProfile*>(
+      i::CpuProfiler::StopProfiling(*Utils::OpenHandle(*title)));
+}
+
+#endif  // ENABLE_LOGGING_AND_PROFILING
+
+
 namespace internal {
 
 
diff --git a/src/api.h b/src/api.h
index a28e1f0..7b88112 100644
--- a/src/api.h
+++ b/src/api.h
@@ -221,7 +221,7 @@
       OpenHandle(const v8::Array* data);
   static inline v8::internal::Handle<v8::internal::String>
       OpenHandle(const String* data);
-  static inline v8::internal::Handle<v8::internal::JSFunction>
+  static inline v8::internal::Handle<v8::internal::Object>
       OpenHandle(const Script* data);
   static inline v8::internal::Handle<v8::internal::JSFunction>
       OpenHandle(const Function* data);
@@ -247,7 +247,11 @@
 template <class T>
 v8::internal::Handle<T> v8::internal::Handle<T>::EscapeFrom(
     v8::HandleScope* scope) {
-  return Utils::OpenHandle(*scope->Close(Utils::ToLocal(*this)));
+  v8::internal::Handle<T> handle;
+  if (!is_null()) {
+    handle = *this;
+  }
+  return Utils::OpenHandle(*scope->Close(Utils::ToLocal(handle)));
 }
 
 
@@ -255,7 +259,7 @@
 
 #define MAKE_TO_LOCAL(Name, From, To)                                       \
   Local<v8::To> Utils::Name(v8::internal::Handle<v8::internal::From> obj) { \
-    ASSERT(!obj->IsTheHole());                                              \
+    ASSERT(obj.is_null() || !obj->IsTheHole());                             \
     return Local<To>(reinterpret_cast<To*>(obj.location()));                \
   }
 
@@ -296,7 +300,7 @@
 MAKE_OPEN_HANDLE(Object, JSObject)
 MAKE_OPEN_HANDLE(Array, JSArray)
 MAKE_OPEN_HANDLE(String, String)
-MAKE_OPEN_HANDLE(Script, JSFunction)
+MAKE_OPEN_HANDLE(Script, Object)
 MAKE_OPEN_HANDLE(Function, JSFunction)
 MAKE_OPEN_HANDLE(Message, JSObject)
 MAKE_OPEN_HANDLE(Context, Context)
diff --git a/src/arguments.h b/src/arguments.h
index 3fed223..c17f4cf 100644
--- a/src/arguments.h
+++ b/src/arguments.h
@@ -72,7 +72,7 @@
 };
 
 
-// Cursom arguments replicate a small segment of stack that can be
+// Custom arguments replicate a small segment of stack that can be
 // accessed through an Arguments object the same way the actual stack
 // can.
 class CustomArguments : public Relocatable {
@@ -80,15 +80,14 @@
   inline CustomArguments(Object* data,
                          JSObject* self,
                          JSObject* holder) {
-    values_[3] = self;
-    values_[2] = holder;
-    values_[1] = Smi::FromInt(0);
+    values_[2] = self;
+    values_[1] = holder;
     values_[0] = data;
   }
   void IterateInstance(ObjectVisitor* v);
-  Object** end() { return values_ + 3; }
+  Object** end() { return values_ + ARRAY_SIZE(values_) - 1; }
  private:
-  Object* values_[4];
+  Object* values_[3];
 };
 
 
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index 354436c..3f0854e 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -144,12 +144,21 @@
 
 
 bool RelocInfo::IsPatchedReturnSequence() {
-  // On ARM a "call instruction" is actually two instructions.
-  //   mov lr, pc
-  //   ldr pc, [pc, #XXX]
-  return (Assembler::instr_at(pc_) == kMovLrPc)
-          && ((Assembler::instr_at(pc_ + Assembler::kInstrSize) & kLdrPCPattern)
-              == kLdrPCPattern);
+  Instr current_instr = Assembler::instr_at(pc_);
+  Instr next_instr = Assembler::instr_at(pc_ + Assembler::kInstrSize);
+#ifdef USE_BLX
+  // A patched return sequence is:
+  //  ldr ip, [pc, #0]
+  //  blx ip
+  return ((current_instr & kLdrPCMask) == kLdrPCPattern)
+          && ((next_instr & kBlxRegMask) == kBlxRegPattern);
+#else
+  // A patched return sequence is:
+  //  mov lr, pc
+  //  ldr pc, [pc, #-4]
+  return (current_instr == kMovLrPc)
+          && ((next_instr & kLdrPCMask) == kLdrPCPattern);
+#endif
 }
 
 
@@ -225,6 +234,16 @@
     target_pc -= kInstrSize;
     instr = Memory::int32_at(target_pc);
   }
+
+#ifdef USE_BLX
+  // If we have a blx instruction, the instruction before it is
+  // what needs to be patched.
+  if ((instr & kBlxRegMask) == kBlxRegPattern) {
+    target_pc -= kInstrSize;
+    instr = Memory::int32_at(target_pc);
+  }
+#endif
+
   // Verify that the instruction to patch is a
   // ldr<cond> <Rd>, [pc +/- offset_12].
   ASSERT((instr & 0x0f7f0000) == 0x051f0000);
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 1b3bcb0..7990368 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -72,11 +72,11 @@
 #ifndef __arm__
   // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
   if (FLAG_enable_vfp3) {
-      supported_ |= 1u << VFP3;
+    supported_ |= 1u << VFP3;
   }
   // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
   if (FLAG_enable_armv7) {
-      supported_ |= 1u << ARMv7;
+    supported_ |= 1u << ARMv7;
   }
 #else  // def __arm__
   if (Serializer::enabled()) {
@@ -96,105 +96,11 @@
     supported_ |= 1u << ARMv7;
     found_by_runtime_probing_ |= 1u << ARMv7;
   }
-#endif  // def __arm__
+#endif
 }
 
 
 // -----------------------------------------------------------------------------
-// Implementation of Register and CRegister
-
-Register no_reg = { -1 };
-
-Register r0  = {  0 };
-Register r1  = {  1 };
-Register r2  = {  2 };
-Register r3  = {  3 };
-Register r4  = {  4 };
-Register r5  = {  5 };
-Register r6  = {  6 };
-Register r7  = {  7 };
-Register r8  = {  8 };  // Used as context register.
-Register r9  = {  9 };
-Register r10 = { 10 };  // Used as roots register.
-Register fp  = { 11 };
-Register ip  = { 12 };
-Register sp  = { 13 };
-Register lr  = { 14 };
-Register pc  = { 15 };
-
-
-CRegister no_creg = { -1 };
-
-CRegister cr0  = {  0 };
-CRegister cr1  = {  1 };
-CRegister cr2  = {  2 };
-CRegister cr3  = {  3 };
-CRegister cr4  = {  4 };
-CRegister cr5  = {  5 };
-CRegister cr6  = {  6 };
-CRegister cr7  = {  7 };
-CRegister cr8  = {  8 };
-CRegister cr9  = {  9 };
-CRegister cr10 = { 10 };
-CRegister cr11 = { 11 };
-CRegister cr12 = { 12 };
-CRegister cr13 = { 13 };
-CRegister cr14 = { 14 };
-CRegister cr15 = { 15 };
-
-// Support for the VFP registers s0 to s31 (d0 to d15).
-// Note that "sN:sM" is the same as "dN/2".
-SwVfpRegister s0  = {  0 };
-SwVfpRegister s1  = {  1 };
-SwVfpRegister s2  = {  2 };
-SwVfpRegister s3  = {  3 };
-SwVfpRegister s4  = {  4 };
-SwVfpRegister s5  = {  5 };
-SwVfpRegister s6  = {  6 };
-SwVfpRegister s7  = {  7 };
-SwVfpRegister s8  = {  8 };
-SwVfpRegister s9  = {  9 };
-SwVfpRegister s10 = { 10 };
-SwVfpRegister s11 = { 11 };
-SwVfpRegister s12 = { 12 };
-SwVfpRegister s13 = { 13 };
-SwVfpRegister s14 = { 14 };
-SwVfpRegister s15 = { 15 };
-SwVfpRegister s16 = { 16 };
-SwVfpRegister s17 = { 17 };
-SwVfpRegister s18 = { 18 };
-SwVfpRegister s19 = { 19 };
-SwVfpRegister s20 = { 20 };
-SwVfpRegister s21 = { 21 };
-SwVfpRegister s22 = { 22 };
-SwVfpRegister s23 = { 23 };
-SwVfpRegister s24 = { 24 };
-SwVfpRegister s25 = { 25 };
-SwVfpRegister s26 = { 26 };
-SwVfpRegister s27 = { 27 };
-SwVfpRegister s28 = { 28 };
-SwVfpRegister s29 = { 29 };
-SwVfpRegister s30 = { 30 };
-SwVfpRegister s31 = { 31 };
-
-DwVfpRegister d0  = {  0 };
-DwVfpRegister d1  = {  1 };
-DwVfpRegister d2  = {  2 };
-DwVfpRegister d3  = {  3 };
-DwVfpRegister d4  = {  4 };
-DwVfpRegister d5  = {  5 };
-DwVfpRegister d6  = {  6 };
-DwVfpRegister d7  = {  7 };
-DwVfpRegister d8  = {  8 };
-DwVfpRegister d9  = {  9 };
-DwVfpRegister d10 = { 10 };
-DwVfpRegister d11 = { 11 };
-DwVfpRegister d12 = { 12 };
-DwVfpRegister d13 = { 13 };
-DwVfpRegister d14 = { 14 };
-DwVfpRegister d15 = { 15 };
-
-// -----------------------------------------------------------------------------
 // Implementation of RelocInfo
 
 const int RelocInfo::kApplyMask = 0;
@@ -354,8 +260,14 @@
     al | B26 | L | 4 | PostIndex | sp.code() * B16;
 // mov lr, pc
 const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
-// ldr pc, [pc, #XXX]
-const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16;
+// ldr rd, [pc, #offset]
+const Instr kLdrPCMask = CondMask | 15 * B24 | 7 * B20 | 15 * B16;
+const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16;
+// blxcc rm
+const Instr kBlxRegMask =
+    15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
+const Instr kBlxRegPattern =
+    B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4;
 
 // Spare buffer.
 static const int kMinimalBufferSize = 4*KB;
@@ -394,6 +306,7 @@
   reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
   num_prinfo_ = 0;
   next_buffer_check_ = 0;
+  const_pool_blocked_nesting_ = 0;
   no_const_pool_before_ = 0;
   last_const_pool_end_ = 0;
   last_bound_pos_ = 0;
@@ -405,6 +318,7 @@
 
 
 Assembler::~Assembler() {
+  ASSERT(const_pool_blocked_nesting_ == 0);
   if (own_buffer_) {
     if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
       spare_buffer_ = buffer_;
@@ -436,6 +350,51 @@
 }
 
 
+bool Assembler::IsNop(Instr instr, int type) {
+  // Check for mov rx, rx.
+  ASSERT(0 <= type && type <= 14);  // mov pc, pc is not a nop.
+  return instr == (al | 13*B21 | type*B12 | type);
+}
+
+
+bool Assembler::IsBranch(Instr instr) {
+  return (instr & (B27 | B25)) == (B27 | B25);
+}
+
+
+int Assembler::GetBranchOffset(Instr instr) {
+  ASSERT(IsBranch(instr));
+  // Take the jump offset in the lower 24 bits, sign extend it and multiply it
+  // with 4 to get the offset in bytes.
+  return ((instr & Imm24Mask) << 8) >> 6;
+}
+
+
+bool Assembler::IsLdrRegisterImmediate(Instr instr) {
+  return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
+}
+
+
+int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
+  ASSERT(IsLdrRegisterImmediate(instr));
+  bool positive = (instr & B23) == B23;
+  int offset = instr & Off12Mask;  // Zero extended offset.
+  return positive ? offset : -offset;
+}
+
+
+Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
+  ASSERT(IsLdrRegisterImmediate(instr));
+  bool positive = offset >= 0;
+  if (!positive) offset = -offset;
+  ASSERT(is_uint12(offset));
+  // Set bit indicating whether the offset should be added.
+  instr = (instr & ~B23) | (positive ? B23 : 0);
+  // Set the actual offset.
+  return (instr & ~Off12Mask) | offset;
+}
+
+
 // Labels refer to positions in the (to be) generated code.
 // There are bound, linked, and unused labels.
 //
@@ -459,10 +418,10 @@
   }
   ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
   int imm26 = ((instr & Imm24Mask) << 8) >> 6;
-  if ((instr & CondMask) == nv && (instr & B24) != 0)
+  if ((instr & CondMask) == nv && (instr & B24) != 0) {
     // blx uses bit 24 to encode bit 2 of imm26
     imm26 += 2;
-
+  }
   return pos + kPcLoadDelta + imm26;
 }
 
@@ -841,9 +800,10 @@
   ASSERT(is_int24(imm24));
   emit(cond | B27 | B25 | (imm24 & Imm24Mask));
 
-  if (cond == al)
+  if (cond == al) {
     // Dead code is a good location to emit the constant pool.
     CheckConstPool(false, false);
+  }
 }
 
 
@@ -990,6 +950,10 @@
   if (dst.is(pc)) {
     WriteRecordedPositions();
   }
+  // Don't allow nop instructions in the form mov rn, rn to be generated using
+  // the mov instruction. They must be generated using nop(int)
+  // pseudo instructions.
+  ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
   addrmod1(cond | 13*B21 | s, r0, dst, src);
 }
 
@@ -1429,11 +1393,28 @@
   // Vdst(15-12) | 1011(11-8) | offset
   ASSERT(CpuFeatures::IsEnabled(VFP3));
   ASSERT(offset % 4 == 0);
+  ASSERT((offset / 4) < 256);
   emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
        0xB*B8 | ((offset / 4) & 255));
 }
 
 
+void Assembler::vldr(const SwVfpRegister dst,
+                     const Register base,
+                     int offset,
+                     const Condition cond) {
+  // Sdst = MEM(Rbase + offset).
+  // Instruction details available in ARM DDI 0406A, A8-628.
+  // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
+  // Vdst(15-12) | 1010(11-8) | offset
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  ASSERT(offset % 4 == 0);
+  ASSERT((offset / 4) < 256);
+  emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
+       0xA*B8 | ((offset / 4) & 255));
+}
+
+
 void Assembler::vstr(const DwVfpRegister src,
                      const Register base,
                      int offset,
@@ -1444,6 +1425,7 @@
   // Vsrc(15-12) | 1011(11-8) | (offset/4)
   ASSERT(CpuFeatures::IsEnabled(VFP3));
   ASSERT(offset % 4 == 0);
+  ASSERT((offset / 4) < 256);
   emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
        0xB*B8 | ((offset / 4) & 255));
 }
@@ -1507,31 +1489,172 @@
 }
 
 
-void Assembler::vcvt(const DwVfpRegister dst,
-                     const SwVfpRegister src,
-                     const Condition cond) {
-  // Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
-  // Instruction details available in ARM DDI 0406A, A8-576.
-  // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=000(18-16) |
-  // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
-       dst.code()*B12 | 0x5*B9 | B8 | B7 | B6 |
-       (0x1 & src.code())*B5 | (src.code() >> 1));
+// Type of data to read from or write to VFP register.
+// Used as specifier in generic vcvt instruction.
+enum VFPType { S32, U32, F32, F64 };
+
+
+static bool IsSignedVFPType(VFPType type) {
+  switch (type) {
+    case S32:
+      return true;
+    case U32:
+      return false;
+    default:
+      UNREACHABLE();
+      return false;
+  }
 }
 
 
-void Assembler::vcvt(const SwVfpRegister dst,
-                     const DwVfpRegister src,
-                     const Condition cond) {
-  // Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd).
-  // Instruction details available in ARM DDI 0406A, A8-576.
-  // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)|
-  // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=? | 1(6) | M=?(5) | 0(4) | Vm(3-0)
+static bool IsIntegerVFPType(VFPType type) {
+  switch (type) {
+    case S32:
+    case U32:
+      return true;
+    case F32:
+    case F64:
+      return false;
+    default:
+      UNREACHABLE();
+      return false;
+  }
+}
+
+
+static bool IsDoubleVFPType(VFPType type) {
+  switch (type) {
+    case F32:
+      return false;
+    case F64:
+      return true;
+    default:
+      UNREACHABLE();
+      return false;
+  }
+}
+
+
+// Depending on split_last_bit split binary representation of reg_code into Vm:M
+// or M:Vm form (where M is single bit).
+static void SplitRegCode(bool split_last_bit,
+                         int reg_code,
+                         int* vm,
+                         int* m) {
+  if (split_last_bit) {
+    *m  = reg_code & 0x1;
+    *vm = reg_code >> 1;
+  } else {
+    *m  = (reg_code & 0x10) >> 4;
+    *vm = reg_code & 0x0F;
+  }
+}
+
+
+// Encode vcvt.src_type.dst_type instruction.
+static Instr EncodeVCVT(const VFPType dst_type,
+                        const int dst_code,
+                        const VFPType src_type,
+                        const int src_code,
+                        const Condition cond) {
+  if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
+    // Conversion between IEEE floating point and 32-bit integer.
+    // Instruction details available in ARM DDI 0406B, A8.6.295.
+    // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
+    // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+    ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
+
+    int sz, opc2, D, Vd, M, Vm, op;
+
+    if (IsIntegerVFPType(dst_type)) {
+      opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
+      sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
+      op = 1;  // round towards zero
+      SplitRegCode(!IsDoubleVFPType(src_type), src_code, &Vm, &M);
+      SplitRegCode(true, dst_code, &Vd, &D);
+    } else {
+      ASSERT(IsIntegerVFPType(src_type));
+
+      opc2 = 0x0;
+      sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
+      op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
+      SplitRegCode(true, src_code, &Vm, &M);
+      SplitRegCode(!IsDoubleVFPType(dst_type), dst_code, &Vd, &D);
+    }
+
+    return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
+            Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
+  } else {
+    // Conversion between IEEE double and single precision.
+    // Instruction details available in ARM DDI 0406B, A8.6.298.
+    // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
+    // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+    int sz, D, Vd, M, Vm;
+
+    ASSERT(IsDoubleVFPType(dst_type) != IsDoubleVFPType(src_type));
+    sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
+    SplitRegCode(IsDoubleVFPType(src_type), dst_code, &Vd, &D);
+    SplitRegCode(!IsDoubleVFPType(src_type), src_code, &Vm, &M);
+
+    return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
+            Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
+  }
+}
+
+
+void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
+                             const SwVfpRegister src,
+                             const Condition cond) {
   ASSERT(CpuFeatures::IsEnabled(VFP3));
-  emit(cond | 0xE*B24 | B23 |(0x1 & dst.code())*B22 |
-       0x3*B20 | B19 | 0x5*B16 | (dst.code() >> 1)*B12 |
-       0x5*B9 | B8 | B7 | B6 | src.code());
+  emit(EncodeVCVT(F64, dst.code(), S32, src.code(), cond));
+}
+
+
+void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
+                             const SwVfpRegister src,
+                             const Condition cond) {
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  emit(EncodeVCVT(F32, dst.code(), S32, src.code(), cond));
+}
+
+
+void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
+                             const SwVfpRegister src,
+                             const Condition cond) {
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  emit(EncodeVCVT(F64, dst.code(), U32, src.code(), cond));
+}
+
+
+void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
+                             const DwVfpRegister src,
+                             const Condition cond) {
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  emit(EncodeVCVT(S32, dst.code(), F64, src.code(), cond));
+}
+
+
+void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
+                             const DwVfpRegister src,
+                             const Condition cond) {
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  emit(EncodeVCVT(U32, dst.code(), F64, src.code(), cond));
+}
+
+
+void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
+                             const SwVfpRegister src,
+                             const Condition cond) {
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  emit(EncodeVCVT(F64, dst.code(), F32, src.code(), cond));
+}
+
+
+void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
+                             const DwVfpRegister src,
+                             const Condition cond) {
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  emit(EncodeVCVT(F32, dst.code(), F64, src.code(), cond));
 }
 
 
@@ -1620,6 +1743,13 @@
 
 
 // Pseudo instructions.
+void Assembler::nop(int type) {
+  // This is mov rx, rx.
+  ASSERT(0 <= type && type <= 14);  // mov pc, pc is not a nop.
+  emit(al | 13*B21 | type*B12 | type);
+}
+
+
 void Assembler::lea(Register dst,
                     const MemOperand& x,
                     SBit s,
@@ -1823,12 +1953,17 @@
 
   // However, some small sequences of instructions must not be broken up by the
   // insertion of a constant pool; such sequences are protected by setting
-  // no_const_pool_before_, which is checked here. Also, recursive calls to
-  // CheckConstPool are blocked by no_const_pool_before_.
-  if (pc_offset() < no_const_pool_before_) {
+  // either const_pool_blocked_nesting_ or no_const_pool_before_, which are
+  // both checked here. Also, recursive calls to CheckConstPool are blocked by
+  // no_const_pool_before_.
+  if (const_pool_blocked_nesting_ > 0 || pc_offset() < no_const_pool_before_) {
     // Emission is currently blocked; make sure we try again as soon as
     // possible.
-    next_buffer_check_ = no_const_pool_before_;
+    if (const_pool_blocked_nesting_ > 0) {
+      next_buffer_check_ = pc_offset() + kInstrSize;
+    } else {
+      next_buffer_check_ = no_const_pool_before_;
+    }
 
     // Something is wrong if emission is forced and blocked at the same time.
     ASSERT(!force_emit);
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index f6b7a06..839ed67 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -84,25 +84,24 @@
   int code_;
 };
 
+const Register no_reg = { -1 };
 
-extern Register no_reg;
-extern Register r0;
-extern Register r1;
-extern Register r2;
-extern Register r3;
-extern Register r4;
-extern Register r5;
-extern Register r6;
-extern Register r7;
-extern Register r8;
-extern Register r9;
-extern Register r10;
-extern Register fp;
-extern Register ip;
-extern Register sp;
-extern Register lr;
-extern Register pc;
-
+const Register r0  = {  0 };
+const Register r1  = {  1 };
+const Register r2  = {  2 };
+const Register r3  = {  3 };
+const Register r4  = {  4 };
+const Register r5  = {  5 };
+const Register r6  = {  6 };
+const Register r7  = {  7 };
+const Register r8  = {  8 };  // Used as context register.
+const Register r9  = {  9 };
+const Register r10 = { 10 };  // Used as roots register.
+const Register fp  = { 11 };
+const Register ip  = { 12 };
+const Register sp  = { 13 };
+const Register lr  = { 14 };
+const Register pc  = { 15 };
 
 // Single word VFP register.
 struct SwVfpRegister {
@@ -139,57 +138,57 @@
 };
 
 
-// Support for VFP registers s0 to s31 (d0 to d15).
+// Support for the VFP registers s0 to s31 (d0 to d15).
 // Note that "s(N):s(N+1)" is the same as "d(N/2)".
-extern SwVfpRegister s0;
-extern SwVfpRegister s1;
-extern SwVfpRegister s2;
-extern SwVfpRegister s3;
-extern SwVfpRegister s4;
-extern SwVfpRegister s5;
-extern SwVfpRegister s6;
-extern SwVfpRegister s7;
-extern SwVfpRegister s8;
-extern SwVfpRegister s9;
-extern SwVfpRegister s10;
-extern SwVfpRegister s11;
-extern SwVfpRegister s12;
-extern SwVfpRegister s13;
-extern SwVfpRegister s14;
-extern SwVfpRegister s15;
-extern SwVfpRegister s16;
-extern SwVfpRegister s17;
-extern SwVfpRegister s18;
-extern SwVfpRegister s19;
-extern SwVfpRegister s20;
-extern SwVfpRegister s21;
-extern SwVfpRegister s22;
-extern SwVfpRegister s23;
-extern SwVfpRegister s24;
-extern SwVfpRegister s25;
-extern SwVfpRegister s26;
-extern SwVfpRegister s27;
-extern SwVfpRegister s28;
-extern SwVfpRegister s29;
-extern SwVfpRegister s30;
-extern SwVfpRegister s31;
+const SwVfpRegister s0  = {  0 };
+const SwVfpRegister s1  = {  1 };
+const SwVfpRegister s2  = {  2 };
+const SwVfpRegister s3  = {  3 };
+const SwVfpRegister s4  = {  4 };
+const SwVfpRegister s5  = {  5 };
+const SwVfpRegister s6  = {  6 };
+const SwVfpRegister s7  = {  7 };
+const SwVfpRegister s8  = {  8 };
+const SwVfpRegister s9  = {  9 };
+const SwVfpRegister s10 = { 10 };
+const SwVfpRegister s11 = { 11 };
+const SwVfpRegister s12 = { 12 };
+const SwVfpRegister s13 = { 13 };
+const SwVfpRegister s14 = { 14 };
+const SwVfpRegister s15 = { 15 };
+const SwVfpRegister s16 = { 16 };
+const SwVfpRegister s17 = { 17 };
+const SwVfpRegister s18 = { 18 };
+const SwVfpRegister s19 = { 19 };
+const SwVfpRegister s20 = { 20 };
+const SwVfpRegister s21 = { 21 };
+const SwVfpRegister s22 = { 22 };
+const SwVfpRegister s23 = { 23 };
+const SwVfpRegister s24 = { 24 };
+const SwVfpRegister s25 = { 25 };
+const SwVfpRegister s26 = { 26 };
+const SwVfpRegister s27 = { 27 };
+const SwVfpRegister s28 = { 28 };
+const SwVfpRegister s29 = { 29 };
+const SwVfpRegister s30 = { 30 };
+const SwVfpRegister s31 = { 31 };
 
-extern DwVfpRegister d0;
-extern DwVfpRegister d1;
-extern DwVfpRegister d2;
-extern DwVfpRegister d3;
-extern DwVfpRegister d4;
-extern DwVfpRegister d5;
-extern DwVfpRegister d6;
-extern DwVfpRegister d7;
-extern DwVfpRegister d8;
-extern DwVfpRegister d9;
-extern DwVfpRegister d10;
-extern DwVfpRegister d11;
-extern DwVfpRegister d12;
-extern DwVfpRegister d13;
-extern DwVfpRegister d14;
-extern DwVfpRegister d15;
+const DwVfpRegister d0  = {  0 };
+const DwVfpRegister d1  = {  1 };
+const DwVfpRegister d2  = {  2 };
+const DwVfpRegister d3  = {  3 };
+const DwVfpRegister d4  = {  4 };
+const DwVfpRegister d5  = {  5 };
+const DwVfpRegister d6  = {  6 };
+const DwVfpRegister d7  = {  7 };
+const DwVfpRegister d8  = {  8 };
+const DwVfpRegister d9  = {  9 };
+const DwVfpRegister d10 = { 10 };
+const DwVfpRegister d11 = { 11 };
+const DwVfpRegister d12 = { 12 };
+const DwVfpRegister d13 = { 13 };
+const DwVfpRegister d14 = { 14 };
+const DwVfpRegister d15 = { 15 };
 
 
 // Coprocessor register
@@ -210,23 +209,24 @@
 };
 
 
-extern CRegister no_creg;
-extern CRegister cr0;
-extern CRegister cr1;
-extern CRegister cr2;
-extern CRegister cr3;
-extern CRegister cr4;
-extern CRegister cr5;
-extern CRegister cr6;
-extern CRegister cr7;
-extern CRegister cr8;
-extern CRegister cr9;
-extern CRegister cr10;
-extern CRegister cr11;
-extern CRegister cr12;
-extern CRegister cr13;
-extern CRegister cr14;
-extern CRegister cr15;
+const CRegister no_creg = { -1 };
+
+const CRegister cr0  = {  0 };
+const CRegister cr1  = {  1 };
+const CRegister cr2  = {  2 };
+const CRegister cr3  = {  3 };
+const CRegister cr4  = {  4 };
+const CRegister cr5  = {  5 };
+const CRegister cr6  = {  6 };
+const CRegister cr7  = {  7 };
+const CRegister cr8  = {  8 };
+const CRegister cr9  = {  9 };
+const CRegister cr10 = { 10 };
+const CRegister cr11 = { 11 };
+const CRegister cr12 = { 12 };
+const CRegister cr13 = { 13 };
+const CRegister cr14 = { 14 };
+const CRegister cr15 = { 15 };
 
 
 // Coprocessor number
@@ -509,7 +509,10 @@
 
 
 extern const Instr kMovLrPc;
+extern const Instr kLdrPCMask;
 extern const Instr kLdrPCPattern;
+extern const Instr kBlxRegMask;
+extern const Instr kBlxRegPattern;
 
 
 class Assembler : public Malloced {
@@ -590,12 +593,34 @@
   static const int kInstrSize = sizeof(Instr);
 
   // Distance between the instruction referring to the address of the call
-  // target (ldr pc, [target addr in const pool]) and the return address
+  // target and the return address.
+#ifdef USE_BLX
+  // Call sequence is:
+  //  ldr  ip, [pc, #...] @ call address
+  //  blx  ip
+  //                      @ return address
+  static const int kCallTargetAddressOffset = 2 * kInstrSize;
+#else
+  // Call sequence is:
+  //  mov  lr, pc
+  //  ldr  pc, [pc, #...] @ call address
+  //                      @ return address
   static const int kCallTargetAddressOffset = kInstrSize;
+#endif
 
   // Distance between start of patched return sequence and the emitted address
   // to jump to.
-  static const int kPatchReturnSequenceAddressOffset = kInstrSize;
+#ifdef USE_BLX
+  // Return sequence is:
+  //  ldr  ip, [pc, #0]   @ emited address and start
+  //  blx  ip
+  static const int kPatchReturnSequenceAddressOffset =  0 * kInstrSize;
+#else
+  // Return sequence is:
+  //  mov  lr, pc         @ start of sequence
+  //  ldr  pc, [pc, #-4]  @ emited address
+  static const int kPatchReturnSequenceAddressOffset =  kInstrSize;
+#endif
 
   // Difference between address of current opcode and value read from pc
   // register.
@@ -801,6 +826,12 @@
             const Register base,
             int offset,  // Offset must be a multiple of 4.
             const Condition cond = al);
+
+  void vldr(const SwVfpRegister dst,
+            const Register base,
+            int offset,  // Offset must be a multiple of 4.
+            const Condition cond = al);
+
   void vstr(const DwVfpRegister src,
             const Register base,
             int offset,  // Offset must be a multiple of 4.
@@ -819,12 +850,27 @@
   void vmov(const Register dst,
             const SwVfpRegister src,
             const Condition cond = al);
-  void vcvt(const DwVfpRegister dst,
-            const SwVfpRegister src,
-            const Condition cond = al);
-  void vcvt(const SwVfpRegister dst,
-            const DwVfpRegister src,
-            const Condition cond = al);
+  void vcvt_f64_s32(const DwVfpRegister dst,
+                    const SwVfpRegister src,
+                    const Condition cond = al);
+  void vcvt_f32_s32(const SwVfpRegister dst,
+                    const SwVfpRegister src,
+                    const Condition cond = al);
+  void vcvt_f64_u32(const DwVfpRegister dst,
+                    const SwVfpRegister src,
+                    const Condition cond = al);
+  void vcvt_s32_f64(const SwVfpRegister dst,
+                    const DwVfpRegister src,
+                    const Condition cond = al);
+  void vcvt_u32_f64(const SwVfpRegister dst,
+                    const DwVfpRegister src,
+                    const Condition cond = al);
+  void vcvt_f64_f32(const DwVfpRegister dst,
+                    const SwVfpRegister src,
+                    const Condition cond = al);
+  void vcvt_f32_f64(const SwVfpRegister dst,
+                    const DwVfpRegister src,
+                    const Condition cond = al);
 
   void vadd(const DwVfpRegister dst,
             const DwVfpRegister src1,
@@ -850,7 +896,7 @@
             const Condition cond = al);
 
   // Pseudo instructions
-  void nop()  { mov(r0, Operand(r0)); }
+  void nop(int type = 0);
 
   void push(Register src, Condition cond = al) {
     str(src, MemOperand(sp, 4, NegPreIndex), cond);
@@ -879,6 +925,22 @@
   // Check whether an immediate fits an addressing mode 1 instruction.
   bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
 
+  // Class for scoping postponing the constant pool generation.
+  class BlockConstPoolScope {
+   public:
+    explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
+      assem_->StartBlockConstPool();
+    }
+    ~BlockConstPoolScope() {
+      assem_->EndBlockConstPool();
+    }
+
+   private:
+    Assembler* assem_;
+
+    DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
+  };
+
   // Postpone the generation of the constant pool for the specified number of
   // instructions.
   void BlockConstPoolFor(int instructions);
@@ -898,16 +960,25 @@
 
   int pc_offset() const { return pc_ - buffer_; }
   int current_position() const { return current_position_; }
-  int current_statement_position() const { return current_position_; }
+  int current_statement_position() const { return current_statement_position_; }
+
+  // Read/patch instructions
+  static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
+  static void instr_at_put(byte* pc, Instr instr) {
+    *reinterpret_cast<Instr*>(pc) = instr;
+  }
+  static bool IsNop(Instr instr, int type = 0);
+  static bool IsBranch(Instr instr);
+  static int GetBranchOffset(Instr instr);
+  static bool IsLdrRegisterImmediate(Instr instr);
+  static int GetLdrRegisterImmediateOffset(Instr instr);
+  static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
+
 
  protected:
   int buffer_space() const { return reloc_info_writer.pos() - pc_; }
 
   // Read/patch instructions
-  static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
-  void instr_at_put(byte* pc, Instr instr) {
-    *reinterpret_cast<Instr*>(pc) = instr;
-  }
   Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
   void instr_at_put(int pos, Instr instr) {
     *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
@@ -927,6 +998,13 @@
     if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset;
   }
 
+  void StartBlockConstPool() {
+    const_pool_blocked_nesting_++;
+  }
+  void EndBlockConstPool() {
+    const_pool_blocked_nesting_--;
+  }
+
  private:
   // Code buffer:
   // The buffer into which code and relocation info are generated.
@@ -976,8 +1054,9 @@
   // distance between pools.
   static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval;
 
-  // Emission of the constant pool may be blocked in some code sequences
-  int no_const_pool_before_;  // block emission before this pc offset
+  // Emission of the constant pool may be blocked in some code sequences.
+  int const_pool_blocked_nesting_;  // Block emission if this is not zero.
+  int no_const_pool_before_;  // Block emission before this pc offset.
 
   // Keep track of the last emitted pool to guarantee a maximal distance
   int last_const_pool_end_;  // pc offset following the last constant pool
@@ -1029,6 +1108,7 @@
   friend class RegExpMacroAssemblerARM;
   friend class RelocInfo;
   friend class CodePatcher;
+  friend class BlockConstPoolScope;
 };
 
 } }  // namespace v8::internal
diff --git a/src/arm/assembler-thumb2.h b/src/arm/assembler-thumb2.h
index 869ac46..2da1138 100644
--- a/src/arm/assembler-thumb2.h
+++ b/src/arm/assembler-thumb2.h
@@ -898,7 +898,7 @@
 
   int pc_offset() const { return pc_ - buffer_; }
   int current_position() const { return current_position_; }
-  int current_statement_position() const { return current_position_; }
+  int current_statement_position() const { return current_statement_position_; }
 
  protected:
   int buffer_space() const { return reloc_info_writer.pos() - pc_; }
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index edb1b0a..7bb8c46 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -61,10 +61,10 @@
     ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
   }
 
-  // JumpToRuntime expects r0 to contain the number of arguments
+  // JumpToExternalReference expects r0 to contain the number of arguments
   // including the receiver and the extra arguments.
   __ add(r0, r0, Operand(num_extra_args + 1));
-  __ JumpToRuntime(ExternalReference(id));
+  __ JumpToExternalReference(ExternalReference(id));
 }
 
 
@@ -593,7 +593,7 @@
       __ bind(&loop);
       __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
       __ bind(&entry);
-      __ cmp(r5, Operand(r6));
+      __ cmp(r5, r6);
       __ b(lt, &loop);
     }
 
@@ -666,7 +666,7 @@
       __ bind(&loop);
       __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
       __ bind(&entry);
-      __ cmp(r2, Operand(r6));
+      __ cmp(r2, r6);
       __ b(lt, &loop);
     }
 
@@ -863,7 +863,7 @@
   __ ldr(r0, MemOperand(r0));  // dereference handle
   __ push(r0);  // push parameter
   __ bind(&entry);
-  __ cmp(r4, Operand(r2));
+  __ cmp(r4, r2);
   __ b(ne, &loop);
 
   // Initialize all JavaScript callee-saved registers, since they will be seen
@@ -1213,7 +1213,7 @@
   Label invoke, dont_adapt_arguments;
 
   Label enough, too_few;
-  __ cmp(r0, Operand(r2));
+  __ cmp(r0, r2);
   __ b(lt, &too_few);
   __ cmp(r2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
   __ b(eq, &dont_adapt_arguments);
diff --git a/src/arm/codegen-arm-inl.h b/src/arm/codegen-arm-inl.h
index 17e18d9..6edec4d 100644
--- a/src/arm/codegen-arm-inl.h
+++ b/src/arm/codegen-arm-inl.h
@@ -29,6 +29,8 @@
 #ifndef V8_ARM_CODEGEN_ARM_INL_H_
 #define V8_ARM_CODEGEN_ARM_INL_H_
 
+#include "virtual-frame-arm.h"
+
 namespace v8 {
 namespace internal {
 
@@ -43,6 +45,7 @@
 
 
 void CodeGenerator::LoadAndSpill(Expression* expression) {
+  ASSERT(VirtualFrame::SpilledScope::is_spilled());
   Load(expression);
 }
 
@@ -57,11 +60,6 @@
 }
 
 
-void Reference::GetValueAndSpill() {
-  GetValue();
-}
-
-
 // Platform-specific inline functions.
 
 void DeferredCode::Jump() { __ jmp(&entry_label_); }
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index e47d392..dea0b63 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -31,11 +31,15 @@
 #include "codegen-inl.h"
 #include "compiler.h"
 #include "debug.h"
+#include "ic-inl.h"
+#include "jsregexp.h"
 #include "parser.h"
+#include "regexp-macro-assembler.h"
+#include "regexp-stack.h"
 #include "register-allocator-inl.h"
 #include "runtime.h"
 #include "scopes.h"
-
+#include "virtual-frame-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -129,13 +133,11 @@
       allocator_(NULL),
       cc_reg_(al),
       state_(NULL),
+      loop_nesting_(0),
       function_return_is_shadowed_(false) {
 }
 
 
-Scope* CodeGenerator::scope() { return info_->function()->scope(); }
-
-
 // Calling conventions:
 // fp: caller's frame pointer
 // sp: stack pointer
@@ -145,6 +147,7 @@
 void CodeGenerator::Generate(CompilationInfo* info) {
   // Record the position for debugging purposes.
   CodeForFunctionPosition(info->function());
+  Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
 
   // Initialize state.
   info_ = info;
@@ -154,6 +157,11 @@
   ASSERT(frame_ == NULL);
   frame_ = new VirtualFrame();
   cc_reg_ = al;
+
+  // Adjust for function-level loop nesting.
+  ASSERT_EQ(0, loop_nesting_);
+  loop_nesting_ = info->loop_nesting();
+
   {
     CodeGenState state(this);
 
@@ -182,7 +190,7 @@
       // for stack overflow.
       frame_->AllocateStackSlots();
 
-      VirtualFrame::SpilledScope spilled_scope;
+      VirtualFrame::SpilledScope spilled_scope(frame_);
       int heap_slots = scope()->num_heap_slots();
       if (heap_slots > 0) {
         // Allocate local context.
@@ -198,7 +206,7 @@
 
 #ifdef DEBUG
         JumpTarget verified_true;
-        __ cmp(r0, Operand(cp));
+        __ cmp(r0, cp);
         verified_true.Branch(eq);
         __ stop("NewContext: r0 is expected to be the same as cp");
         verified_true.Bind();
@@ -239,29 +247,10 @@
       }
 
       // Store the arguments object.  This must happen after context
-      // initialization because the arguments object may be stored in the
-      // context.
-      if (scope()->arguments() != NULL) {
-        Comment cmnt(masm_, "[ allocate arguments object");
-        ASSERT(scope()->arguments_shadow() != NULL);
-        Variable* arguments = scope()->arguments()->var();
-        Variable* shadow = scope()->arguments_shadow()->var();
-        ASSERT(arguments != NULL && arguments->slot() != NULL);
-        ASSERT(shadow != NULL && shadow->slot() != NULL);
-        ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
-        __ ldr(r2, frame_->Function());
-        // The receiver is below the arguments, the return address, and the
-        // frame pointer on the stack.
-        const int kReceiverDisplacement = 2 + scope()->num_parameters();
-        __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
-        __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
-        frame_->Adjust(3);
-        __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
-        frame_->CallStub(&stub, 3);
-        frame_->EmitPush(r0);
-        StoreToSlot(arguments->slot(), NOT_CONST_INIT);
-        StoreToSlot(shadow->slot(), NOT_CONST_INIT);
-        frame_->Drop();  // Value is no longer needed.
+      // initialization because the arguments object may be stored in
+      // the context.
+      if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
+        StoreArgumentsObject(true);
       }
 
       // Initialize ThisFunction reference if present.
@@ -275,8 +264,6 @@
       // fp, and lr have been pushed on the stack.  Adjust the virtual
       // frame to match this state.
       frame_->Adjust(4);
-      allocator_->Unuse(r1);
-      allocator_->Unuse(lr);
 
       // Bind all the bailout labels to the beginning of the function.
       List<CompilationInfo::Bailout*>* bailouts = info->bailouts();
@@ -347,42 +334,44 @@
       frame_->CallRuntime(Runtime::kTraceExit, 1);
     }
 
+#ifdef DEBUG
     // Add a label for checking the size of the code used for returning.
     Label check_exit_codesize;
     masm_->bind(&check_exit_codesize);
+#endif
+    // Make sure that the constant pool is not emitted inside of the return
+    // sequence.
+    { Assembler::BlockConstPoolScope block_const_pool(masm_);
+      // Tear down the frame which will restore the caller's frame pointer and
+      // the link register.
+      frame_->Exit();
 
-    // Calculate the exact length of the return sequence and make sure that
-    // the constant pool is not emitted inside of the return sequence.
-    int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
-    int return_sequence_length = Assembler::kJSReturnSequenceLength;
-    if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
-      // Additional mov instruction generated.
-      return_sequence_length++;
+      // Here we use masm_-> instead of the __ macro to avoid the code coverage
+      // tool from instrumenting as we rely on the code size here.
+      int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
+      masm_->add(sp, sp, Operand(sp_delta));
+      masm_->Jump(lr);
+
+#ifdef DEBUG
+      // Check that the size of the code used for returning matches what is
+      // expected by the debugger. If the sp_delts above cannot be encoded in
+      // the add instruction the add will generate two instructions.
+      int return_sequence_length =
+          masm_->InstructionsGeneratedSince(&check_exit_codesize);
+      CHECK(return_sequence_length == Assembler::kJSReturnSequenceLength ||
+            return_sequence_length == Assembler::kJSReturnSequenceLength + 1);
+#endif
     }
-    masm_->BlockConstPoolFor(return_sequence_length);
-
-    // Tear down the frame which will restore the caller's frame pointer and
-    // the link register.
-    frame_->Exit();
-
-    // Here we use masm_-> instead of the __ macro to avoid the code coverage
-    // tool from instrumenting as we rely on the code size here.
-    masm_->add(sp, sp, Operand(sp_delta));
-    masm_->Jump(lr);
-
-    // Check that the size of the code used for returning matches what is
-    // expected by the debugger. The add instruction above is an addressing
-    // mode 1 instruction where there are restrictions on which immediate values
-    // can be encoded in the instruction and which immediate values requires
-    // use of an additional instruction for moving the immediate to a temporary
-    // register.
-    ASSERT_EQ(return_sequence_length,
-              masm_->InstructionsGeneratedSince(&check_exit_codesize));
   }
 
+  // Adjust for function-level loop nesting.
+  ASSERT(loop_nesting_ == info->loop_nesting());
+  loop_nesting_ = 0;
+
   // Code generation state must be reset.
   ASSERT(!has_cc());
   ASSERT(state_ == NULL);
+  ASSERT(loop_nesting() == 0);
   ASSERT(!function_return_is_shadowed_);
   function_return_.Unuse();
   DeleteFrame();
@@ -506,6 +495,7 @@
         has_valid_frame() &&
         !has_cc() &&
         frame_->height() == original_height) {
+      frame_->SpillAll();
       true_target->Jump();
     }
   }
@@ -530,6 +520,7 @@
 
   if (has_cc()) {
     // Convert cc_reg_ into a boolean value.
+    VirtualFrame::SpilledScope scope(frame_);
     JumpTarget loaded;
     JumpTarget materialize_true;
     materialize_true.Branch(cc_reg_);
@@ -544,6 +535,7 @@
   }
 
   if (true_target.is_linked() || false_target.is_linked()) {
+    VirtualFrame::SpilledScope scope(frame_);
     // We have at least one condition value that has been "translated"
     // into a branch, thus it needs to be loaded explicitly.
     JumpTarget loaded;
@@ -573,19 +565,19 @@
   }
   ASSERT(has_valid_frame());
   ASSERT(!has_cc());
-  ASSERT(frame_->height() == original_height + 1);
+  ASSERT_EQ(original_height + 1, frame_->height());
 }
 
 
 void CodeGenerator::LoadGlobal() {
-  VirtualFrame::SpilledScope spilled_scope;
-  __ ldr(r0, GlobalObject());
-  frame_->EmitPush(r0);
+  Register reg = frame_->GetTOSRegister();
+  __ ldr(reg, GlobalObject());
+  frame_->EmitPush(reg);
 }
 
 
 void CodeGenerator::LoadGlobalReceiver(Register scratch) {
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   __ ldr(scratch, ContextOperand(cp, Context::GLOBAL_INDEX));
   __ ldr(scratch,
          FieldMemOperand(scratch, GlobalObject::kGlobalReceiverOffset));
@@ -593,9 +585,69 @@
 }
 
 
+ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
+  if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
+  ASSERT(scope()->arguments_shadow() != NULL);
+  // We don't want to do lazy arguments allocation for functions that
+  // have heap-allocated contexts, because it interfers with the
+  // uninitialized const tracking in the context objects.
+  return (scope()->num_heap_slots() > 0)
+      ? EAGER_ARGUMENTS_ALLOCATION
+      : LAZY_ARGUMENTS_ALLOCATION;
+}
+
+
+void CodeGenerator::StoreArgumentsObject(bool initial) {
+  VirtualFrame::SpilledScope spilled_scope(frame_);
+
+  ArgumentsAllocationMode mode = ArgumentsMode();
+  ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
+
+  Comment cmnt(masm_, "[ store arguments object");
+  if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
+    // When using lazy arguments allocation, we store the hole value
+    // as a sentinel indicating that the arguments object hasn't been
+    // allocated yet.
+    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+    frame_->EmitPush(ip);
+  } else {
+    ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+    __ ldr(r2, frame_->Function());
+    // The receiver is below the arguments, the return address, and the
+    // frame pointer on the stack.
+    const int kReceiverDisplacement = 2 + scope()->num_parameters();
+    __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
+    __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
+    frame_->Adjust(3);
+    __ Push(r2, r1, r0);
+    frame_->CallStub(&stub, 3);
+    frame_->EmitPush(r0);
+  }
+
+  Variable* arguments = scope()->arguments()->var();
+  Variable* shadow = scope()->arguments_shadow()->var();
+  ASSERT(arguments != NULL && arguments->slot() != NULL);
+  ASSERT(shadow != NULL && shadow->slot() != NULL);
+  JumpTarget done;
+  if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
+    // We have to skip storing into the arguments slot if it has
+    // already been written to. This can happen if the a function
+    // has a local variable named 'arguments'.
+    LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+    frame_->EmitPop(r0);
+    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+    __ cmp(r0, ip);
+    done.Branch(ne);
+  }
+  StoreToSlot(arguments->slot(), NOT_CONST_INIT);
+  if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
+  StoreToSlot(shadow->slot(), NOT_CONST_INIT);
+}
+
+
 void CodeGenerator::LoadTypeofExpression(Expression* expr) {
   // Special handling of identifiers as subexpressions of typeof.
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Variable* variable = expr->AsVariableProxy()->AsVariable();
   if (variable != NULL && !variable->is_this() && variable->is_global()) {
     // For a global variable we build the property reference
@@ -605,11 +657,11 @@
     Literal key(variable->name());
     Property property(&global, &key, RelocInfo::kNoPosition);
     Reference ref(this, &property);
-    ref.GetValueAndSpill();
+    ref.GetValue();
   } else if (variable != NULL && variable->slot() != NULL) {
     // For a variable that rewrites to a slot, we signal it is the immediate
     // subexpression of a typeof.
-    LoadFromSlot(variable->slot(), INSIDE_TYPEOF);
+    LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
     frame_->SpillAll();
   } else {
     // Anything else can be handled normally.
@@ -635,7 +687,6 @@
 
 
 void CodeGenerator::LoadReference(Reference* ref) {
-  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ LoadReference");
   Expression* e = ref->expression();
   Property* property = e->AsProperty();
@@ -644,11 +695,11 @@
   if (property != NULL) {
     // The expression is either a property or a variable proxy that rewrites
     // to a property.
-    LoadAndSpill(property->obj());
+    Load(property->obj());
     if (property->key()->IsPropertyName()) {
       ref->set_type(Reference::NAMED);
     } else {
-      LoadAndSpill(property->key());
+      Load(property->key());
       ref->set_type(Reference::KEYED);
     }
   } else if (var != NULL) {
@@ -663,6 +714,7 @@
     }
   } else {
     // Anything else is a runtime error.
+    VirtualFrame::SpilledScope spilled_scope(frame_);
     LoadAndSpill(e);
     frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
   }
@@ -670,16 +722,18 @@
 
 
 void CodeGenerator::UnloadReference(Reference* ref) {
-  VirtualFrame::SpilledScope spilled_scope;
-  // Pop a reference from the stack while preserving TOS.
-  Comment cmnt(masm_, "[ UnloadReference");
   int size = ref->size();
-  if (size > 0) {
-    frame_->EmitPop(r0);
-    frame_->Drop(size);
-    frame_->EmitPush(r0);
-  }
   ref->set_unloaded();
+  if (size == 0) return;
+
+  // Pop a reference from the stack while preserving TOS.
+  VirtualFrame::RegisterAllocationScope scope(this);
+  Comment cmnt(masm_, "[ UnloadReference");
+  if (size > 0) {
+    Register tos = frame_->PopToRegister();
+    frame_->Drop(size);
+    frame_->EmitPush(tos);
+  }
 }
 
 
@@ -688,7 +742,7 @@
 // may jump to 'false_target' in case the register converts to 'false'.
 void CodeGenerator::ToBoolean(JumpTarget* true_target,
                               JumpTarget* false_target) {
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   // Note: The generated code snippet does not change stack variables.
   //       Only the condition code should be set.
   frame_->EmitPop(r0);
@@ -730,13 +784,54 @@
 void CodeGenerator::GenericBinaryOperation(Token::Value op,
                                            OverwriteMode overwrite_mode,
                                            int constant_rhs) {
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   // sp[0] : y
   // sp[1] : x
   // result : r0
 
   // Stub is entered with a call: 'return address' is in lr.
   switch (op) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+    case Token::MOD:
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SHL:
+    case Token::SHR:
+    case Token::SAR: {
+      frame_->EmitPop(r0);  // r0 : y
+      frame_->EmitPop(r1);  // r1 : x
+      GenericBinaryOpStub stub(op, overwrite_mode, r1, r0, constant_rhs);
+      frame_->CallStub(&stub, 0);
+      break;
+    }
+
+    case Token::COMMA:
+      frame_->EmitPop(r0);
+      // Simply discard left value.
+      frame_->Drop();
+      break;
+
+    default:
+      // Other cases should have been handled before this point.
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void CodeGenerator::VirtualFrameBinaryOperation(Token::Value op,
+                                                OverwriteMode overwrite_mode,
+                                                int constant_rhs) {
+  // top of virtual frame: y
+  // 2nd elt. on virtual frame : x
+  // result : top of virtual frame
+
+  // Stub is entered with a call: 'return address' is in lr.
+  switch (op) {
     case Token::ADD:  // fall through.
     case Token::SUB:  // fall through.
     case Token::MUL:
@@ -748,18 +843,24 @@
     case Token::SHL:
     case Token::SHR:
     case Token::SAR: {
-      frame_->EmitPop(r0);  // r0 : y
-      frame_->EmitPop(r1);  // r1 : x
-      GenericBinaryOpStub stub(op, overwrite_mode, constant_rhs);
-      frame_->CallStub(&stub, 0);
+      Register rhs = frame_->PopToRegister();
+      Register lhs = frame_->PopToRegister(rhs);  // Don't pop to rhs register.
+      {
+        VirtualFrame::SpilledScope spilled_scope(frame_);
+        GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
+        frame_->CallStub(&stub, 0);
+      }
+      frame_->EmitPush(r0);
       break;
     }
 
-    case Token::COMMA:
-      frame_->EmitPop(r0);
-      // simply discard left value
+    case Token::COMMA: {
+      Register scratch = frame_->PopToRegister();
+      // Simply discard left value.
       frame_->Drop();
+      frame_->EmitPush(scratch);
       break;
+    }
 
     default:
       // Other cases should have been handled before this point.
@@ -774,11 +875,13 @@
   DeferredInlineSmiOperation(Token::Value op,
                              int value,
                              bool reversed,
-                             OverwriteMode overwrite_mode)
+                             OverwriteMode overwrite_mode,
+                             Register tos)
       : op_(op),
         value_(value),
         reversed_(reversed),
-        overwrite_mode_(overwrite_mode) {
+        overwrite_mode_(overwrite_mode),
+        tos_register_(tos) {
     set_comment("[ DeferredInlinedSmiOperation");
   }
 
@@ -789,18 +892,21 @@
   int value_;
   bool reversed_;
   OverwriteMode overwrite_mode_;
+  Register tos_register_;
 };
 
 
 void DeferredInlineSmiOperation::Generate() {
+  Register lhs = r1;
+  Register rhs = r0;
   switch (op_) {
     case Token::ADD: {
       // Revert optimistic add.
       if (reversed_) {
-        __ sub(r0, r0, Operand(Smi::FromInt(value_)));
+        __ sub(r0, tos_register_, Operand(Smi::FromInt(value_)));
         __ mov(r1, Operand(Smi::FromInt(value_)));
       } else {
-        __ sub(r1, r0, Operand(Smi::FromInt(value_)));
+        __ sub(r1, tos_register_, Operand(Smi::FromInt(value_)));
         __ mov(r0, Operand(Smi::FromInt(value_)));
       }
       break;
@@ -809,10 +915,10 @@
     case Token::SUB: {
       // Revert optimistic sub.
       if (reversed_) {
-        __ rsb(r0, r0, Operand(Smi::FromInt(value_)));
+        __ rsb(r0, tos_register_, Operand(Smi::FromInt(value_)));
         __ mov(r1, Operand(Smi::FromInt(value_)));
       } else {
-        __ add(r1, r0, Operand(Smi::FromInt(value_)));
+        __ add(r1, tos_register_, Operand(Smi::FromInt(value_)));
         __ mov(r0, Operand(Smi::FromInt(value_)));
       }
       break;
@@ -826,10 +932,23 @@
     case Token::BIT_XOR:
     case Token::BIT_AND: {
       if (reversed_) {
-        __ mov(r1, Operand(Smi::FromInt(value_)));
+        if (tos_register_.is(r0)) {
+          __ mov(r1, Operand(Smi::FromInt(value_)));
+        } else {
+          ASSERT(tos_register_.is(r1));
+          __ mov(r0, Operand(Smi::FromInt(value_)));
+          lhs = r0;
+          rhs = r1;
+        }
       } else {
-        __ mov(r1, Operand(r0));
-        __ mov(r0, Operand(Smi::FromInt(value_)));
+        if (tos_register_.is(r1)) {
+          __ mov(r0, Operand(Smi::FromInt(value_)));
+        } else {
+          ASSERT(tos_register_.is(r0));
+          __ mov(r1, Operand(Smi::FromInt(value_)));
+          lhs = r0;
+          rhs = r1;
+        }
       }
       break;
     }
@@ -838,8 +957,14 @@
     case Token::SHR:
     case Token::SAR: {
       if (!reversed_) {
-        __ mov(r1, Operand(r0));
-        __ mov(r0, Operand(Smi::FromInt(value_)));
+        if (tos_register_.is(r1)) {
+          __ mov(r0, Operand(Smi::FromInt(value_)));
+        } else {
+          ASSERT(tos_register_.is(r0));
+          __ mov(r1, Operand(Smi::FromInt(value_)));
+          lhs = r0;
+          rhs = r1;
+        }
       } else {
         UNREACHABLE();  // Should have been handled in SmiOperation.
       }
@@ -852,8 +977,13 @@
       break;
   }
 
-  GenericBinaryOpStub stub(op_, overwrite_mode_, value_);
+  GenericBinaryOpStub stub(op_, overwrite_mode_, lhs, rhs, value_);
   __ CallStub(&stub);
+  // The generic stub returns its value in r0, but that's not
+  // necessarily what we want.  We want whatever the inlined code
+  // expected, which is that the answer is in the same register as
+  // the operand was.
+  __ Move(tos_register_, r0);
 }
 
 
@@ -882,48 +1012,106 @@
                                  Handle<Object> value,
                                  bool reversed,
                                  OverwriteMode mode) {
-  VirtualFrame::SpilledScope spilled_scope;
-  // NOTE: This is an attempt to inline (a bit) more of the code for
-  // some possible smi operations (like + and -) when (at least) one
-  // of the operands is a literal smi. With this optimization, the
-  // performance of the system is increased by ~15%, and the generated
-  // code size is increased by ~1% (measured on a combination of
-  // different benchmarks).
-
-  // sp[0] : operand
-
   int int_value = Smi::cast(*value)->value();
 
-  JumpTarget exit;
-  frame_->EmitPop(r0);
+  bool something_to_inline;
+  switch (op) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::BIT_AND:
+    case Token::BIT_OR:
+    case Token::BIT_XOR: {
+      something_to_inline = true;
+      break;
+    }
+    case Token::SHL:
+    case Token::SHR:
+    case Token::SAR: {
+      if (reversed) {
+        something_to_inline = false;
+      } else {
+        something_to_inline = true;
+      }
+      break;
+    }
+    case Token::MOD: {
+      if (reversed || int_value < 2 || !IsPowerOf2(int_value)) {
+        something_to_inline = false;
+      } else {
+        something_to_inline = true;
+      }
+      break;
+    }
+    case Token::MUL: {
+      if (!IsEasyToMultiplyBy(int_value)) {
+        something_to_inline = false;
+      } else {
+        something_to_inline = true;
+      }
+      break;
+    }
+    default: {
+      something_to_inline = false;
+      break;
+    }
+  }
 
-  bool something_to_inline = true;
+  if (!something_to_inline) {
+    if (!reversed) {
+      // Push the rhs onto the virtual frame by putting it in a TOS register.
+      Register rhs = frame_->GetTOSRegister();
+      __ mov(rhs, Operand(value));
+      frame_->EmitPush(rhs);
+      VirtualFrameBinaryOperation(op, mode, int_value);
+    } else {
+      // Pop the rhs, then push lhs and rhs in the right order.  Only performs
+      // at most one pop, the rest takes place in TOS registers.
+      Register lhs = frame_->GetTOSRegister();    // Get reg for pushing.
+      Register rhs = frame_->PopToRegister(lhs);  // Don't use lhs for this.
+      __ mov(lhs, Operand(value));
+      frame_->EmitPush(lhs);
+      frame_->EmitPush(rhs);
+      VirtualFrameBinaryOperation(op, mode, kUnknownIntValue);
+    }
+    return;
+  }
+
+  // We move the top of stack to a register (normally no move is invoved).
+  Register tos = frame_->PopToRegister();
+  // All other registers are spilled.  The deferred code expects one argument
+  // in a register and all other values are flushed to the stack.  The
+  // answer is returned in the same register that the top of stack argument was
+  // in.
+  frame_->SpillAll();
+
   switch (op) {
     case Token::ADD: {
       DeferredCode* deferred =
-          new DeferredInlineSmiOperation(op, int_value, reversed, mode);
+          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
 
-      __ add(r0, r0, Operand(value), SetCC);
+      __ add(tos, tos, Operand(value), SetCC);
       deferred->Branch(vs);
-      __ tst(r0, Operand(kSmiTagMask));
+      __ tst(tos, Operand(kSmiTagMask));
       deferred->Branch(ne);
       deferred->BindExit();
+      frame_->EmitPush(tos);
       break;
     }
 
     case Token::SUB: {
       DeferredCode* deferred =
-          new DeferredInlineSmiOperation(op, int_value, reversed, mode);
+          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
 
       if (reversed) {
-        __ rsb(r0, r0, Operand(value), SetCC);
+        __ rsb(tos, tos, Operand(value), SetCC);
       } else {
-        __ sub(r0, r0, Operand(value), SetCC);
+        __ sub(tos, tos, Operand(value), SetCC);
       }
       deferred->Branch(vs);
-      __ tst(r0, Operand(kSmiTagMask));
+      __ tst(tos, Operand(kSmiTagMask));
       deferred->Branch(ne);
       deferred->BindExit();
+      frame_->EmitPush(tos);
       break;
     }
 
@@ -932,46 +1120,46 @@
     case Token::BIT_XOR:
     case Token::BIT_AND: {
       DeferredCode* deferred =
-        new DeferredInlineSmiOperation(op, int_value, reversed, mode);
-      __ tst(r0, Operand(kSmiTagMask));
+        new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
+      __ tst(tos, Operand(kSmiTagMask));
       deferred->Branch(ne);
       switch (op) {
-        case Token::BIT_OR:  __ orr(r0, r0, Operand(value)); break;
-        case Token::BIT_XOR: __ eor(r0, r0, Operand(value)); break;
-        case Token::BIT_AND: __ and_(r0, r0, Operand(value)); break;
+        case Token::BIT_OR:  __ orr(tos, tos, Operand(value)); break;
+        case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
+        case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break;
         default: UNREACHABLE();
       }
       deferred->BindExit();
+      frame_->EmitPush(tos);
       break;
     }
 
     case Token::SHL:
     case Token::SHR:
     case Token::SAR: {
-      if (reversed) {
-        something_to_inline = false;
-        break;
-      }
+      ASSERT(!reversed);
+      Register scratch = VirtualFrame::scratch0();
+      Register scratch2 = VirtualFrame::scratch1();
       int shift_value = int_value & 0x1f;  // least significant 5 bits
       DeferredCode* deferred =
-        new DeferredInlineSmiOperation(op, shift_value, false, mode);
-      __ tst(r0, Operand(kSmiTagMask));
+        new DeferredInlineSmiOperation(op, shift_value, false, mode, tos);
+      __ tst(tos, Operand(kSmiTagMask));
       deferred->Branch(ne);
-      __ mov(r2, Operand(r0, ASR, kSmiTagSize));  // remove tags
+      __ mov(scratch, Operand(tos, ASR, kSmiTagSize));  // remove tags
       switch (op) {
         case Token::SHL: {
           if (shift_value != 0) {
-            __ mov(r2, Operand(r2, LSL, shift_value));
+            __ mov(scratch, Operand(scratch, LSL, shift_value));
           }
-          // check that the *unsigned* result fits in a smi
-          __ add(r3, r2, Operand(0x40000000), SetCC);
+          // check that the *signed* result fits in a smi
+          __ add(scratch2, scratch, Operand(0x40000000), SetCC);
           deferred->Branch(mi);
           break;
         }
         case Token::SHR: {
           // LSR by immediate 0 means shifting 32 bits.
           if (shift_value != 0) {
-            __ mov(r2, Operand(r2, LSR, shift_value));
+            __ mov(scratch, Operand(scratch, LSR, shift_value));
           }
           // check that the *unsigned* result fits in a smi
           // neither of the two high-order bits can be set:
@@ -979,47 +1167,45 @@
           // - 0x40000000: this number would convert to negative when
           // smi tagging these two cases can only happen with shifts
           // by 0 or 1 when handed a valid smi
-          __ and_(r3, r2, Operand(0xc0000000), SetCC);
+          __ tst(scratch, Operand(0xc0000000));
           deferred->Branch(ne);
           break;
         }
         case Token::SAR: {
           if (shift_value != 0) {
             // ASR by immediate 0 means shifting 32 bits.
-            __ mov(r2, Operand(r2, ASR, shift_value));
+            __ mov(scratch, Operand(scratch, ASR, shift_value));
           }
           break;
         }
         default: UNREACHABLE();
       }
-      __ mov(r0, Operand(r2, LSL, kSmiTagSize));
+      __ mov(tos, Operand(scratch, LSL, kSmiTagSize));
       deferred->BindExit();
+      frame_->EmitPush(tos);
       break;
     }
 
     case Token::MOD: {
-      if (reversed || int_value < 2 || !IsPowerOf2(int_value)) {
-        something_to_inline = false;
-        break;
-      }
+      ASSERT(!reversed);
+      ASSERT(int_value >= 2);
+      ASSERT(IsPowerOf2(int_value));
       DeferredCode* deferred =
-        new DeferredInlineSmiOperation(op, int_value, reversed, mode);
+        new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
       unsigned mask = (0x80000000u | kSmiTagMask);
-      __ tst(r0, Operand(mask));
+      __ tst(tos, Operand(mask));
       deferred->Branch(ne);  // Go to deferred code on non-Smis and negative.
       mask = (int_value << kSmiTagSize) - 1;
-      __ and_(r0, r0, Operand(mask));
+      __ and_(tos, tos, Operand(mask));
       deferred->BindExit();
+      frame_->EmitPush(tos);
       break;
     }
 
     case Token::MUL: {
-      if (!IsEasyToMultiplyBy(int_value)) {
-        something_to_inline = false;
-        break;
-      }
+      ASSERT(IsEasyToMultiplyBy(int_value));
       DeferredCode* deferred =
-        new DeferredInlineSmiOperation(op, int_value, reversed, mode);
+        new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
       unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
       max_smi_that_wont_overflow <<= kSmiTagSize;
       unsigned mask = 0x80000000u;
@@ -1031,33 +1217,18 @@
       // conservative way and for a non-Smi.  It also filters out negative
       // numbers, unfortunately, but since this code is inline we prefer
       // brevity to comprehensiveness.
-      __ tst(r0, Operand(mask));
+      __ tst(tos, Operand(mask));
       deferred->Branch(ne);
-      MultiplyByKnownInt(masm_, r0, r0, int_value);
+      MultiplyByKnownInt(masm_, tos, tos, int_value);
       deferred->BindExit();
+      frame_->EmitPush(tos);
       break;
     }
 
     default:
-      something_to_inline = false;
+      UNREACHABLE();
       break;
   }
-
-  if (!something_to_inline) {
-    if (!reversed) {
-      frame_->EmitPush(r0);
-      __ mov(r0, Operand(value));
-      frame_->EmitPush(r0);
-      GenericBinaryOperation(op, mode, int_value);
-    } else {
-      __ mov(ip, Operand(value));
-      frame_->EmitPush(ip);
-      frame_->EmitPush(r0);
-      GenericBinaryOperation(op, mode, kUnknownIntValue);
-    }
-  }
-
-  exit.Bind();
 }
 
 
@@ -1065,10 +1236,11 @@
                                Expression* left,
                                Expression* right,
                                bool strict) {
-  if (left != NULL) LoadAndSpill(left);
-  if (right != NULL) LoadAndSpill(right);
+  VirtualFrame::RegisterAllocationScope scope(this);
 
-  VirtualFrame::SpilledScope spilled_scope;
+  if (left != NULL) Load(left);
+  if (right != NULL) Load(right);
+
   // sp[0] : y
   // sp[1] : x
   // result : cc register
@@ -1076,32 +1248,49 @@
   // Strict only makes sense for equality comparisons.
   ASSERT(!strict || cc == eq);
 
-  JumpTarget exit;
-  JumpTarget smi;
+  Register lhs;
+  Register rhs;
+
+  // We load the top two stack positions into registers chosen by the virtual
+  // frame.  This should keep the register shuffling to a minimum.
   // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
   if (cc == gt || cc == le) {
     cc = ReverseCondition(cc);
-    frame_->EmitPop(r1);
-    frame_->EmitPop(r0);
+    lhs = frame_->PopToRegister();
+    rhs = frame_->PopToRegister(lhs);  // Don't pop to the same register again!
   } else {
-    frame_->EmitPop(r0);
-    frame_->EmitPop(r1);
+    rhs = frame_->PopToRegister();
+    lhs = frame_->PopToRegister(rhs);  // Don't pop to the same register again!
   }
-  __ orr(r2, r0, Operand(r1));
-  __ tst(r2, Operand(kSmiTagMask));
+
+  ASSERT(rhs.is(r0) || rhs.is(r1));
+  ASSERT(lhs.is(r0) || lhs.is(r1));
+
+  // Now we have the two sides in r0 and r1.  We flush any other registers
+  // because the stub doesn't know about register allocation.
+  frame_->SpillAll();
+  Register scratch = VirtualFrame::scratch0();
+  __ orr(scratch, lhs, Operand(rhs));
+  __ tst(scratch, Operand(kSmiTagMask));
+  JumpTarget smi;
   smi.Branch(eq);
 
   // Perform non-smi comparison by stub.
   // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
   // We call with 0 args because there are 0 on the stack.
+  if (!rhs.is(r0)) {
+    __ Swap(rhs, lhs, ip);
+  }
+
   CompareStub stub(cc, strict);
   frame_->CallStub(&stub, 0);
   __ cmp(r0, Operand(0));
+  JumpTarget exit;
   exit.Jump();
 
   // Do smi comparisons by pointer comparison.
   smi.Bind();
-  __ cmp(r1, Operand(r0));
+  __ cmp(lhs, Operand(rhs));
 
   exit.Bind();
   cc_reg_ = cc;
@@ -1112,7 +1301,7 @@
 void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
                                       CallFunctionFlags flags,
                                       int position) {
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   // Push the arguments ("left-to-right") on the stack.
   int arg_count = args->length();
   for (int i = 0; i < arg_count; i++) {
@@ -1133,8 +1322,189 @@
 }
 
 
+void CodeGenerator::CallApplyLazy(Expression* applicand,
+                                  Expression* receiver,
+                                  VariableProxy* arguments,
+                                  int position) {
+  // An optimized implementation of expressions of the form
+  // x.apply(y, arguments).
+  // If the arguments object of the scope has not been allocated,
+  // and x.apply is Function.prototype.apply, this optimization
+  // just copies y and the arguments of the current function on the
+  // stack, as receiver and arguments, and calls x.
+  // In the implementation comments, we call x the applicand
+  // and y the receiver.
+  VirtualFrame::SpilledScope spilled_scope(frame_);
+
+  ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
+  ASSERT(arguments->IsArguments());
+
+  // Load applicand.apply onto the stack. This will usually
+  // give us a megamorphic load site. Not super, but it works.
+  LoadAndSpill(applicand);
+  Handle<String> name = Factory::LookupAsciiSymbol("apply");
+  frame_->CallLoadIC(name, RelocInfo::CODE_TARGET);
+  frame_->EmitPush(r0);
+
+  // Load the receiver and the existing arguments object onto the
+  // expression stack. Avoid allocating the arguments object here.
+  LoadAndSpill(receiver);
+  LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+
+  // Emit the source position information after having loaded the
+  // receiver and the arguments.
+  CodeForSourcePosition(position);
+  // Contents of the stack at this point:
+  //   sp[0]: arguments object of the current function or the hole.
+  //   sp[1]: receiver
+  //   sp[2]: applicand.apply
+  //   sp[3]: applicand.
+
+  // Check if the arguments object has been lazily allocated
+  // already. If so, just use that instead of copying the arguments
+  // from the stack. This also deals with cases where a local variable
+  // named 'arguments' has been introduced.
+  __ ldr(r0, MemOperand(sp, 0));
+
+  Label slow, done;
+  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+  __ cmp(ip, r0);
+  __ b(ne, &slow);
+
+  Label build_args;
+  // Get rid of the arguments object probe.
+  frame_->Drop();
+  // Stack now has 3 elements on it.
+  // Contents of stack at this point:
+  //   sp[0]: receiver
+  //   sp[1]: applicand.apply
+  //   sp[2]: applicand.
+
+  // Check that the receiver really is a JavaScript object.
+  __ ldr(r0, MemOperand(sp, 0));
+  __ BranchOnSmi(r0, &build_args);
+  // We allow all JSObjects including JSFunctions.  As long as
+  // JS_FUNCTION_TYPE is the last instance type and it is right
+  // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
+  // bound.
+  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+  __ CompareObjectType(r0, r1, r2, FIRST_JS_OBJECT_TYPE);
+  __ b(lt, &build_args);
+
+  // Check that applicand.apply is Function.prototype.apply.
+  __ ldr(r0, MemOperand(sp, kPointerSize));
+  __ BranchOnSmi(r0, &build_args);
+  __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
+  __ b(ne, &build_args);
+  __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
+  Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
+  __ ldr(r1, FieldMemOperand(r0, SharedFunctionInfo::kCodeOffset));
+  __ cmp(r1, Operand(apply_code));
+  __ b(ne, &build_args);
+
+  // Check that applicand is a function.
+  __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
+  __ BranchOnSmi(r1, &build_args);
+  __ CompareObjectType(r1, r2, r3, JS_FUNCTION_TYPE);
+  __ b(ne, &build_args);
+
+  // Copy the arguments to this function possibly from the
+  // adaptor frame below it.
+  Label invoke, adapted;
+  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
+  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ b(eq, &adapted);
+
+  // No arguments adaptor frame. Copy fixed number of arguments.
+  __ mov(r0, Operand(scope()->num_parameters()));
+  for (int i = 0; i < scope()->num_parameters(); i++) {
+    __ ldr(r2, frame_->ParameterAt(i));
+    __ push(r2);
+  }
+  __ jmp(&invoke);
+
+  // Arguments adaptor frame present. Copy arguments from there, but
+  // avoid copying too many arguments to avoid stack overflows.
+  __ bind(&adapted);
+  static const uint32_t kArgumentsLimit = 1 * KB;
+  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ mov(r0, Operand(r0, LSR, kSmiTagSize));
+  __ mov(r3, r0);
+  __ cmp(r0, Operand(kArgumentsLimit));
+  __ b(gt, &build_args);
+
+  // Loop through the arguments pushing them onto the execution
+  // stack. We don't inform the virtual frame of the push, so we don't
+  // have to worry about getting rid of the elements from the virtual
+  // frame.
+  Label loop;
+  // r3 is a small non-negative integer, due to the test above.
+  __ cmp(r3, Operand(0));
+  __ b(eq, &invoke);
+  // Compute the address of the first argument.
+  __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2));
+  __ add(r2, r2, Operand(kPointerSize));
+  __ bind(&loop);
+  // Post-decrement argument address by kPointerSize on each iteration.
+  __ ldr(r4, MemOperand(r2, kPointerSize, NegPostIndex));
+  __ push(r4);
+  __ sub(r3, r3, Operand(1), SetCC);
+  __ b(gt, &loop);
+
+  // Invoke the function.
+  __ bind(&invoke);
+  ParameterCount actual(r0);
+  __ InvokeFunction(r1, actual, CALL_FUNCTION);
+  // Drop applicand.apply and applicand from the stack, and push
+  // the result of the function call, but leave the spilled frame
+  // unchanged, with 3 elements, so it is correct when we compile the
+  // slow-case code.
+  __ add(sp, sp, Operand(2 * kPointerSize));
+  __ push(r0);
+  // Stack now has 1 element:
+  //   sp[0]: result
+  __ jmp(&done);
+
+  // Slow-case: Allocate the arguments object since we know it isn't
+  // there, and fall-through to the slow-case where we call
+  // applicand.apply.
+  __ bind(&build_args);
+  // Stack now has 3 elements, because we have jumped from where:
+  //   sp[0]: receiver
+  //   sp[1]: applicand.apply
+  //   sp[2]: applicand.
+  StoreArgumentsObject(false);
+
+  // Stack and frame now have 4 elements.
+  __ bind(&slow);
+
+  // Generic computation of x.apply(y, args) with no special optimization.
+  // Flip applicand.apply and applicand on the stack, so
+  // applicand looks like the receiver of the applicand.apply call.
+  // Then process it as a normal function call.
+  __ ldr(r0, MemOperand(sp, 3 * kPointerSize));
+  __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
+  __ str(r0, MemOperand(sp, 2 * kPointerSize));
+  __ str(r1, MemOperand(sp, 3 * kPointerSize));
+
+  CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
+  frame_->CallStub(&call_function, 3);
+  // The function and its two arguments have been dropped.
+  frame_->Drop();  // Drop the receiver as well.
+  frame_->EmitPush(r0);
+  // Stack now has 1 element:
+  //   sp[0]: result
+  __ bind(&done);
+
+  // Restore the context register after a call.
+  __ ldr(cp, frame_->Context());
+}
+
+
 void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(has_cc());
   Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
   target->Branch(cc);
@@ -1143,7 +1513,7 @@
 
 
 void CodeGenerator::CheckStack() {
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ check stack");
   __ LoadRoot(ip, Heap::kStackLimitRootIndex);
   // Put the lr setup instruction in the delay slot.  kInstrSize is added to
@@ -1165,7 +1535,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
     VisitAndSpill(statements->at(i));
   }
@@ -1177,7 +1547,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ Block");
   CodeForStatementPosition(node);
   node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
@@ -1191,12 +1561,11 @@
 
 
 void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
-  VirtualFrame::SpilledScope spilled_scope;
   frame_->EmitPush(cp);
-  __ mov(r0, Operand(pairs));
-  frame_->EmitPush(r0);
-  __ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
-  frame_->EmitPush(r0);
+  frame_->EmitPush(Operand(pairs));
+  frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0)));
+
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
   // The result is discarded.
 }
@@ -1206,7 +1575,6 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ Declaration");
   Variable* var = node->proxy()->var();
   ASSERT(var != NULL);  // must have been resolved
@@ -1221,28 +1589,27 @@
     ASSERT(var->is_dynamic());
     // For now, just do a runtime call.
     frame_->EmitPush(cp);
-    __ mov(r0, Operand(var->name()));
-    frame_->EmitPush(r0);
+    frame_->EmitPush(Operand(var->name()));
     // Declaration nodes are always declared in only two modes.
     ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
     PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
-    __ mov(r0, Operand(Smi::FromInt(attr)));
-    frame_->EmitPush(r0);
+    frame_->EmitPush(Operand(Smi::FromInt(attr)));
     // Push initial value, if any.
     // Note: For variables we must not push an initial value (such as
     // 'undefined') because we may have a (legal) redeclaration and we
     // must not destroy the current value.
     if (node->mode() == Variable::CONST) {
-      __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
-      frame_->EmitPush(r0);
+      frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
     } else if (node->fun() != NULL) {
-      LoadAndSpill(node->fun());
+      Load(node->fun());
     } else {
-      __ mov(r0, Operand(0));  // no initial value!
-      frame_->EmitPush(r0);
+      frame_->EmitPush(Operand(0));
     }
+
+    VirtualFrame::SpilledScope spilled_scope(frame_);
     frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
     // Ignore the return value (declarations are statements).
+
     ASSERT(frame_->height() == original_height);
     return;
   }
@@ -1258,12 +1625,11 @@
   }
 
   if (val != NULL) {
-    {
-      // Set initial value.
-      Reference target(this, node->proxy());
-      LoadAndSpill(val);
-      target.SetValue(NOT_CONST_INIT);
-    }
+    // Set initial value.
+    Reference target(this, node->proxy());
+    Load(val);
+    target.SetValue(NOT_CONST_INIT);
+
     // Get rid of the assigned value (declarations are statements).
     frame_->Drop();
   }
@@ -1275,7 +1641,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ ExpressionStatement");
   CodeForStatementPosition(node);
   Expression* expression = node->expression();
@@ -1290,7 +1656,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "// EmptyStatement");
   CodeForStatementPosition(node);
   // nothing to do
@@ -1302,7 +1668,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ IfStatement");
   // Generate different code depending on which parts of the if statement
   // are present or not.
@@ -1388,7 +1754,7 @@
 
 
 void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ ContinueStatement");
   CodeForStatementPosition(node);
   node->target()->continue_target()->Jump();
@@ -1396,7 +1762,7 @@
 
 
 void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ BreakStatement");
   CodeForStatementPosition(node);
   node->target()->break_target()->Jump();
@@ -1404,7 +1770,7 @@
 
 
 void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ ReturnStatement");
 
   CodeForStatementPosition(node);
@@ -1427,7 +1793,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ WithEnterStatement");
   CodeForStatementPosition(node);
   LoadAndSpill(node->expression());
@@ -1438,7 +1804,7 @@
   }
 #ifdef DEBUG
   JumpTarget verified_true;
-  __ cmp(r0, Operand(cp));
+  __ cmp(r0, cp);
   verified_true.Branch(eq);
   __ stop("PushContext: r0 is expected to be the same as cp");
   verified_true.Bind();
@@ -1453,7 +1819,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ WithExitStatement");
   CodeForStatementPosition(node);
   // Pop context.
@@ -1468,7 +1834,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ SwitchStatement");
   CodeForStatementPosition(node);
   node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
@@ -1557,11 +1923,12 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ DoWhileStatement");
   CodeForStatementPosition(node);
   node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
   JumpTarget body(JumpTarget::BIDIRECTIONAL);
+  IncrementLoopNesting();
 
   // Label the top of the loop for the backward CFG edge.  If the test
   // is always true we can use the continue target, and if the test is
@@ -1622,6 +1989,7 @@
   if (node->break_target()->is_linked()) {
     node->break_target()->Bind();
   }
+  DecrementLoopNesting();
   ASSERT(!has_valid_frame() || frame_->height() == original_height);
 }
 
@@ -1630,7 +1998,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ WhileStatement");
   CodeForStatementPosition(node);
 
@@ -1640,6 +2008,7 @@
   if (info == ALWAYS_FALSE) return;
 
   node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  IncrementLoopNesting();
 
   // Label the top of the loop with the continue target for the backward
   // CFG edge.
@@ -1671,6 +2040,7 @@
   if (node->break_target()->is_linked()) {
     node->break_target()->Bind();
   }
+  DecrementLoopNesting();
   ASSERT(!has_valid_frame() || frame_->height() == original_height);
 }
 
@@ -1679,7 +2049,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ ForStatement");
   CodeForStatementPosition(node);
   if (node->init() != NULL) {
@@ -1692,6 +2062,7 @@
   if (info == ALWAYS_FALSE) return;
 
   node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  IncrementLoopNesting();
 
   // If there is no update statement, label the top of the loop with the
   // continue target, otherwise with the loop target.
@@ -1746,6 +2117,7 @@
   if (node->break_target()->is_linked()) {
     node->break_target()->Bind();
   }
+  DecrementLoopNesting();
   ASSERT(!has_valid_frame() || frame_->height() == original_height);
 }
 
@@ -1754,7 +2126,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ ForInStatement");
   CodeForStatementPosition(node);
 
@@ -1909,7 +2281,7 @@
 
   __ ldr(r0, frame_->ElementAt(0));  // load the current count
   __ ldr(r1, frame_->ElementAt(1));  // load the length
-  __ cmp(r0, Operand(r1));  // compare to the array length
+  __ cmp(r0, r1);  // compare to the array length
   node->break_target()->Branch(hs);
 
   __ ldr(r0, frame_->ElementAt(0));
@@ -1990,7 +2362,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ TryCatchStatement");
   CodeForStatementPosition(node);
 
@@ -2111,7 +2483,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ TryFinallyStatement");
   CodeForStatementPosition(node);
 
@@ -2295,7 +2667,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ DebuggerStatament");
   CodeForStatementPosition(node);
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -2306,14 +2678,13 @@
 }
 
 
-void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
-  VirtualFrame::SpilledScope spilled_scope;
-  ASSERT(boilerplate->IsBoilerplate());
-
-  __ mov(r0, Operand(boilerplate));
+void CodeGenerator::InstantiateFunction(
+    Handle<SharedFunctionInfo> function_info) {
+  VirtualFrame::SpilledScope spilled_scope(frame_);
+  __ mov(r0, Operand(function_info));
   // Use the fast case closure allocation code that allocates in new
   // space for nested functions that don't need literals cloning.
-  if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) {
+  if (scope()->is_function_scope() && function_info->num_literals() == 0) {
     FastNewClosureStub stub;
     frame_->EmitPush(r0);
     frame_->CallStub(&stub, 1);
@@ -2332,31 +2703,31 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ FunctionLiteral");
 
-  // Build the function boilerplate and instantiate it.
-  Handle<JSFunction> boilerplate =
-      Compiler::BuildBoilerplate(node, script(), this);
+  // Build the function info and instantiate it.
+  Handle<SharedFunctionInfo> function_info =
+      Compiler::BuildFunctionInfo(node, script(), this);
   // Check for stack-overflow exception.
   if (HasStackOverflow()) {
     ASSERT(frame_->height() == original_height);
     return;
   }
-  InstantiateBoilerplate(boilerplate);
-  ASSERT(frame_->height() == original_height + 1);
+  InstantiateFunction(function_info);
+  ASSERT_EQ(original_height + 1, frame_->height());
 }
 
 
-void CodeGenerator::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* node) {
+void CodeGenerator::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* node) {
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
-  InstantiateBoilerplate(node->boilerplate());
-  ASSERT(frame_->height() == original_height + 1);
+  VirtualFrame::SpilledScope spilled_scope(frame_);
+  Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
+  InstantiateFunction(node->shared_function_info());
+  ASSERT_EQ(original_height + 1, frame_->height());
 }
 
 
@@ -2364,7 +2735,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ Conditional");
   JumpTarget then;
   JumpTarget else_;
@@ -2383,15 +2754,16 @@
     LoadAndSpill(node->else_expression());
     if (exit.is_linked()) exit.Bind();
   }
-  ASSERT(frame_->height() == original_height + 1);
+  ASSERT_EQ(original_height + 1, frame_->height());
 }
 
 
 void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
-  VirtualFrame::SpilledScope spilled_scope;
   if (slot->type() == Slot::LOOKUP) {
     ASSERT(slot->var()->is_dynamic());
 
+    // JumpTargets do not yet support merging frames so the frame must be
+    // spilled when jumping to these targets.
     JumpTarget slow;
     JumpTarget done;
 
@@ -2401,16 +2773,18 @@
     // perform a runtime call for all variables in the scope
     // containing the eval.
     if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
-      LoadFromGlobalSlotCheckExtensions(slot, typeof_state, r1, r2, &slow);
+      LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
       // If there was no control flow to slow, we can exit early.
       if (!slow.is_linked()) {
         frame_->EmitPush(r0);
         return;
       }
+      frame_->SpillAll();
 
       done.Jump();
 
     } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
+      frame_->SpillAll();
       Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
       // Only generate the fast case for locals that rewrite to slots.
       // This rules out argument loads.
@@ -2433,6 +2807,7 @@
     }
 
     slow.Bind();
+    VirtualFrame::SpilledScope spilled_scope(frame_);
     frame_->EmitPush(cp);
     __ mov(r0, Operand(slot->var()->name()));
     frame_->EmitPush(r0);
@@ -2447,27 +2822,55 @@
     frame_->EmitPush(r0);
 
   } else {
-    // Special handling for locals allocated in registers.
-    __ ldr(r0, SlotOperand(slot, r2));
-    frame_->EmitPush(r0);
+    Register scratch = VirtualFrame::scratch0();
+    frame_->EmitPush(SlotOperand(slot, scratch));
     if (slot->var()->mode() == Variable::CONST) {
       // Const slots may contain 'the hole' value (the constant hasn't been
       // initialized yet) which needs to be converted into the 'undefined'
       // value.
       Comment cmnt(masm_, "[ Unhole const");
-      frame_->EmitPop(r0);
+      frame_->EmitPop(scratch);
       __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-      __ cmp(r0, ip);
-      __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
-      frame_->EmitPush(r0);
+      __ cmp(scratch, ip);
+      __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex, eq);
+      frame_->EmitPush(scratch);
     }
   }
 }
 
 
+void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
+                                                  TypeofState state) {
+  LoadFromSlot(slot, state);
+
+  // Bail out quickly if we're not using lazy arguments allocation.
+  if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
+
+  // ... or if the slot isn't a non-parameter arguments slot.
+  if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
+
+  VirtualFrame::SpilledScope spilled_scope(frame_);
+
+  // Load the loaded value from the stack into r0 but leave it on the
+  // stack.
+  __ ldr(r0, MemOperand(sp, 0));
+
+  // If the loaded value is the sentinel that indicates that we
+  // haven't loaded the arguments object yet, we need to do it now.
+  JumpTarget exit;
+  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+  __ cmp(r0, ip);
+  exit.Branch(ne);
+  frame_->Drop();
+  StoreArgumentsObject(false);
+  exit.Bind();
+}
+
+
 void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
   ASSERT(slot != NULL);
   if (slot->type() == Slot::LOOKUP) {
+    VirtualFrame::SpilledScope spilled_scope(frame_);
     ASSERT(slot->var()->is_dynamic());
 
     // For now, just do a runtime call.
@@ -2501,17 +2904,22 @@
 
   } else {
     ASSERT(!slot->var()->is_dynamic());
+    Register scratch = VirtualFrame::scratch0();
+    VirtualFrame::RegisterAllocationScope scope(this);
 
+    // The frame must be spilled when branching to this target.
     JumpTarget exit;
+
     if (init_state == CONST_INIT) {
       ASSERT(slot->var()->mode() == Variable::CONST);
       // Only the first const initialization must be executed (the slot
       // still contains 'the hole' value). When the assignment is
       // executed, the code is identical to a normal store (see below).
       Comment cmnt(masm_, "[ Init const");
-      __ ldr(r2, SlotOperand(slot, r2));
+      __ ldr(scratch, SlotOperand(slot, scratch));
       __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-      __ cmp(r2, ip);
+      __ cmp(scratch, ip);
+      frame_->SpillAll();
       exit.Branch(ne);
     }
 
@@ -2524,22 +2932,25 @@
     // initialize consts to 'the hole' value and by doing so, end up
     // calling this code.  r2 may be loaded with context; used below in
     // RecordWrite.
-    frame_->EmitPop(r0);
-    __ str(r0, SlotOperand(slot, r2));
-    frame_->EmitPush(r0);
+    Register tos = frame_->Peek();
+    __ str(tos, SlotOperand(slot, scratch));
     if (slot->type() == Slot::CONTEXT) {
       // Skip write barrier if the written value is a smi.
-      __ tst(r0, Operand(kSmiTagMask));
+      __ tst(tos, Operand(kSmiTagMask));
+      // We don't use tos any more after here.
+      VirtualFrame::SpilledScope spilled_scope(frame_);
       exit.Branch(eq);
-      // r2 is loaded with context when calling SlotOperand above.
+      // scratch is loaded with context when calling SlotOperand above.
       int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
       __ mov(r3, Operand(offset));
-      __ RecordWrite(r2, r3, r1);
+      // r1 could be identical with tos, but that doesn't matter.
+      __ RecordWrite(scratch, r3, r1);
     }
     // If we definitely did not jump over the assignment, we do not need
     // to bind the exit label.  Doing so can defeat peephole
     // optimization.
     if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
+      frame_->SpillAll();
       exit.Bind();
     }
   }
@@ -2548,16 +2959,17 @@
 
 void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
                                                       TypeofState typeof_state,
-                                                      Register tmp,
-                                                      Register tmp2,
                                                       JumpTarget* slow) {
   // Check that no extension objects have been created by calls to
   // eval from the current scope to the global scope.
+  Register tmp = frame_->scratch0();
+  Register tmp2 = frame_->scratch1();
   Register context = cp;
   Scope* s = scope();
   while (s != NULL) {
     if (s->num_heap_slots() > 0) {
       if (s->calls_eval()) {
+        frame_->SpillAll();
         // Check that extension is NULL.
         __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
         __ tst(tmp2, tmp2);
@@ -2575,10 +2987,9 @@
   }
 
   if (s->is_eval_scope()) {
+    frame_->SpillAll();
     Label next, fast;
-    if (!context.is(tmp)) {
-      __ mov(tmp, Operand(context));
-    }
+    __ Move(tmp, context);
     __ bind(&next);
     // Terminate at global context.
     __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
@@ -2596,20 +3007,13 @@
     __ bind(&fast);
   }
 
-  // All extension objects were empty and it is safe to use a global
-  // load IC call.
-  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
   // Load the global object.
   LoadGlobal();
-  // Setup the name register.
-  __ mov(r2, Operand(slot->var()->name()));
-  // Call IC stub.
-  if (typeof_state == INSIDE_TYPEOF) {
-    frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
-  } else {
-    frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET_CONTEXT, 0);
-  }
-
+  // Setup the name register and call load IC.
+  frame_->CallLoadIC(slot->var()->name(),
+                     typeof_state == INSIDE_TYPEOF
+                         ? RelocInfo::CODE_TARGET
+                         : RelocInfo::CODE_TARGET_CONTEXT);
   // Drop the global object. The result is in r0.
   frame_->Drop();
 }
@@ -2619,10 +3023,9 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ Slot");
-  LoadFromSlot(node, NOT_INSIDE_TYPEOF);
-  ASSERT(frame_->height() == original_height + 1);
+  LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
+  ASSERT_EQ(original_height + 1, frame_->height());
 }
 
 
@@ -2630,7 +3033,6 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ VariableProxy");
 
   Variable* var = node->var();
@@ -2640,9 +3042,9 @@
   } else {
     ASSERT(var->is_global());
     Reference ref(this, node);
-    ref.GetValueAndSpill();
+    ref.GetValue();
   }
-  ASSERT(frame_->height() == original_height + 1);
+  ASSERT_EQ(original_height + 1, frame_->height());
 }
 
 
@@ -2650,11 +3052,11 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ Literal");
-  __ mov(r0, Operand(node->handle()));
-  frame_->EmitPush(r0);
-  ASSERT(frame_->height() == original_height + 1);
+  Register reg = frame_->GetTOSRegister();
+  __ mov(reg, Operand(node->handle()));
+  frame_->EmitPush(reg);
+  ASSERT_EQ(original_height + 1, frame_->height());
 }
 
 
@@ -2662,7 +3064,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ RexExp Literal");
 
   // Retrieve the literal array and check the allocated entry.
@@ -2698,7 +3100,7 @@
   done.Bind();
   // Push the literal.
   frame_->EmitPush(r2);
-  ASSERT(frame_->height() == original_height + 1);
+  ASSERT_EQ(original_height + 1, frame_->height());
 }
 
 
@@ -2706,22 +3108,24 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ ObjectLiteral");
 
   // Load the function of this activation.
-  __ ldr(r2, frame_->Function());
+  __ ldr(r3, frame_->Function());
   // Literal array.
-  __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
+  __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
   // Literal index.
-  __ mov(r1, Operand(Smi::FromInt(node->literal_index())));
+  __ mov(r2, Operand(Smi::FromInt(node->literal_index())));
   // Constant properties.
-  __ mov(r0, Operand(node->constant_properties()));
-  frame_->EmitPushMultiple(3, r2.bit() | r1.bit() | r0.bit());
+  __ mov(r1, Operand(node->constant_properties()));
+  // Should the object literal have fast elements?
+  __ mov(r0, Operand(Smi::FromInt(node->fast_elements() ? 1 : 0)));
+  frame_->EmitPushMultiple(4, r3.bit() | r2.bit() | r1.bit() | r0.bit());
   if (node->depth() > 1) {
-    frame_->CallRuntime(Runtime::kCreateObjectLiteral, 3);
+    frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
   } else {
-    frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
+    frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
   }
   frame_->EmitPush(r0);  // save the result
   for (int i = 0; i < node->properties()->length(); i++) {
@@ -2777,7 +3181,7 @@
       }
     }
   }
-  ASSERT(frame_->height() == original_height + 1);
+  ASSERT_EQ(original_height + 1, frame_->height());
 }
 
 
@@ -2785,7 +3189,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ ArrayLiteral");
 
   // Load the function of this activation.
@@ -2836,7 +3240,7 @@
     __ mov(r3, Operand(offset));
     __ RecordWrite(r1, r3, r2);
   }
-  ASSERT(frame_->height() == original_height + 1);
+  ASSERT_EQ(original_height + 1, frame_->height());
 }
 
 
@@ -2844,7 +3248,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   // Call runtime routine to allocate the catch extension object and
   // assign the exception value to the catch variable.
   Comment cmnt(masm_, "[ CatchExtensionObject");
@@ -2852,72 +3256,318 @@
   LoadAndSpill(node->value());
   frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
   frame_->EmitPush(r0);
-  ASSERT(frame_->height() == original_height + 1);
+  ASSERT_EQ(original_height + 1, frame_->height());
+}
+
+
+void CodeGenerator::EmitSlotAssignment(Assignment* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  Comment cmnt(masm(), "[ Variable Assignment");
+  Variable* var = node->target()->AsVariableProxy()->AsVariable();
+  ASSERT(var != NULL);
+  Slot* slot = var->slot();
+  ASSERT(slot != NULL);
+
+  // Evaluate the right-hand side.
+  if (node->is_compound()) {
+    // For a compound assignment the right-hand side is a binary operation
+    // between the current property value and the actual right-hand side.
+    LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
+
+    // Perform the binary operation.
+    Literal* literal = node->value()->AsLiteral();
+    bool overwrite_value =
+        (node->value()->AsBinaryOperation() != NULL &&
+         node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+    if (literal != NULL && literal->handle()->IsSmi()) {
+      SmiOperation(node->binary_op(),
+                   literal->handle(),
+                   false,
+                   overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+    } else {
+      Load(node->value());
+      VirtualFrameBinaryOperation(
+          node->binary_op(), overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+    }
+  } else {
+    Load(node->value());
+  }
+
+  // Perform the assignment.
+  if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
+    CodeForSourcePosition(node->position());
+    StoreToSlot(slot,
+                node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
+  }
+  ASSERT_EQ(original_height + 1, frame_->height());
+}
+
+
+void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  Comment cmnt(masm(), "[ Named Property Assignment");
+  Variable* var = node->target()->AsVariableProxy()->AsVariable();
+  Property* prop = node->target()->AsProperty();
+  ASSERT(var == NULL || (prop == NULL && var->is_global()));
+
+  // Initialize name and evaluate the receiver sub-expression if necessary. If
+  // the receiver is trivial it is not placed on the stack at this point, but
+  // loaded whenever actually needed.
+  Handle<String> name;
+  bool is_trivial_receiver = false;
+  if (var != NULL) {
+    name = var->name();
+  } else {
+    Literal* lit = prop->key()->AsLiteral();
+    ASSERT_NOT_NULL(lit);
+    name = Handle<String>::cast(lit->handle());
+    // Do not materialize the receiver on the frame if it is trivial.
+    is_trivial_receiver = prop->obj()->IsTrivial();
+    if (!is_trivial_receiver) Load(prop->obj());
+  }
+
+  // Change to slow case in the beginning of an initialization block to
+  // avoid the quadratic behavior of repeatedly adding fast properties.
+  if (node->starts_initialization_block()) {
+    // Initialization block consists of assignments of the form expr.x = ..., so
+    // this will never be an assignment to a variable, so there must be a
+    // receiver object.
+    ASSERT_EQ(NULL, var);
+    if (is_trivial_receiver) {
+      Load(prop->obj());
+    } else {
+      frame_->Dup();
+    }
+    frame_->CallRuntime(Runtime::kToSlowProperties, 1);
+  }
+
+  // Change to fast case at the end of an initialization block. To prepare for
+  // that add an extra copy of the receiver to the frame, so that it can be
+  // converted back to fast case after the assignment.
+  if (node->ends_initialization_block() && !is_trivial_receiver) {
+    frame_->Dup();
+  }
+
+  // Stack layout:
+  // [tos]   : receiver (only materialized if non-trivial)
+  // [tos+1] : receiver if at the end of an initialization block
+
+  // Evaluate the right-hand side.
+  if (node->is_compound()) {
+    // For a compound assignment the right-hand side is a binary operation
+    // between the current property value and the actual right-hand side.
+    if (is_trivial_receiver) {
+      Load(prop->obj());
+    } else if (var != NULL) {
+      LoadGlobal();
+    } else {
+      frame_->Dup();
+    }
+    EmitNamedLoad(name, var != NULL);
+    frame_->Drop();  // Receiver is left on the stack.
+    frame_->EmitPush(r0);
+
+    // Perform the binary operation.
+    Literal* literal = node->value()->AsLiteral();
+    bool overwrite_value =
+        (node->value()->AsBinaryOperation() != NULL &&
+         node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+    if (literal != NULL && literal->handle()->IsSmi()) {
+      SmiOperation(node->binary_op(),
+                   literal->handle(),
+                   false,
+                   overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+    } else {
+      Load(node->value());
+      VirtualFrameBinaryOperation(
+          node->binary_op(), overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+    }
+  } else {
+    // For non-compound assignment just load the right-hand side.
+    Load(node->value());
+  }
+
+  // Stack layout:
+  // [tos]   : value
+  // [tos+1] : receiver (only materialized if non-trivial)
+  // [tos+2] : receiver if at the end of an initialization block
+
+  // Perform the assignment.  It is safe to ignore constants here.
+  ASSERT(var == NULL || var->mode() != Variable::CONST);
+  ASSERT_NE(Token::INIT_CONST, node->op());
+  if (is_trivial_receiver) {
+    // Load the receiver and swap with the value.
+    Load(prop->obj());
+    Register t0 = frame_->PopToRegister();
+    Register t1 = frame_->PopToRegister(t0);
+    frame_->EmitPush(t0);
+    frame_->EmitPush(t1);
+  }
+  CodeForSourcePosition(node->position());
+  bool is_contextual = (var != NULL);
+  EmitNamedStore(name, is_contextual);
+  frame_->EmitPush(r0);
+
+  // Change to fast case at the end of an initialization block.
+  if (node->ends_initialization_block()) {
+    ASSERT_EQ(NULL, var);
+    // The argument to the runtime call is the receiver.
+    if (is_trivial_receiver) {
+      Load(prop->obj());
+    } else {
+      // A copy of the receiver is below the value of the assignment. Swap
+      // the receiver and the value of the assignment expression.
+      Register t0 = frame_->PopToRegister();
+      Register t1 = frame_->PopToRegister(t0);
+      frame_->EmitPush(t0);
+      frame_->EmitPush(t1);
+    }
+    frame_->CallRuntime(Runtime::kToFastProperties, 1);
+  }
+
+  // Stack layout:
+  // [tos]   : result
+
+  ASSERT_EQ(original_height + 1, frame_->height());
+}
+
+
+void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  Comment cmnt(masm_, "[ Keyed Property Assignment");
+  Property* prop = node->target()->AsProperty();
+  ASSERT_NOT_NULL(prop);
+
+  // Evaluate the receiver subexpression.
+  Load(prop->obj());
+
+  // Change to slow case in the beginning of an initialization block to
+  // avoid the quadratic behavior of repeatedly adding fast properties.
+  if (node->starts_initialization_block()) {
+    frame_->Dup();
+    frame_->CallRuntime(Runtime::kToSlowProperties, 1);
+  }
+
+  // Change to fast case at the end of an initialization block. To prepare for
+  // that add an extra copy of the receiver to the frame, so that it can be
+  // converted back to fast case after the assignment.
+  if (node->ends_initialization_block()) {
+    frame_->Dup();
+  }
+
+  // Evaluate the key subexpression.
+  Load(prop->key());
+
+  // Stack layout:
+  // [tos]   : key
+  // [tos+1] : receiver
+  // [tos+2] : receiver if at the end of an initialization block
+
+  // Evaluate the right-hand side.
+  if (node->is_compound()) {
+    // For a compound assignment the right-hand side is a binary operation
+    // between the current property value and the actual right-hand side.
+    // Load of the current value leaves receiver and key on the stack.
+    EmitKeyedLoad();
+    frame_->EmitPush(r0);
+
+    // Perform the binary operation.
+    Literal* literal = node->value()->AsLiteral();
+    bool overwrite_value =
+        (node->value()->AsBinaryOperation() != NULL &&
+         node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+    if (literal != NULL && literal->handle()->IsSmi()) {
+      SmiOperation(node->binary_op(),
+                   literal->handle(),
+                   false,
+                   overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+    } else {
+      Load(node->value());
+      VirtualFrameBinaryOperation(
+          node->binary_op(), overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+    }
+  } else {
+    // For non-compound assignment just load the right-hand side.
+    Load(node->value());
+  }
+
+  // Stack layout:
+  // [tos]   : value
+  // [tos+1] : key
+  // [tos+2] : receiver
+  // [tos+3] : receiver if at the end of an initialization block
+
+  // Perform the assignment.  It is safe to ignore constants here.
+  ASSERT(node->op() != Token::INIT_CONST);
+  CodeForSourcePosition(node->position());
+  frame_->PopToR0();
+  EmitKeyedStore(prop->key()->type());
+  frame_->Drop(2);  // Key and receiver are left on the stack.
+  frame_->EmitPush(r0);
+
+  // Stack layout:
+  // [tos]   : result
+  // [tos+1] : receiver if at the end of an initialization block
+
+  // Change to fast case at the end of an initialization block.
+  if (node->ends_initialization_block()) {
+    // The argument to the runtime call is the extra copy of the receiver,
+    // which is below the value of the assignment.  Swap the receiver and
+    // the value of the assignment expression.
+    Register t0 = frame_->PopToRegister();
+    Register t1 = frame_->PopToRegister(t0);
+    frame_->EmitPush(t1);
+    frame_->EmitPush(t0);
+    frame_->CallRuntime(Runtime::kToFastProperties, 1);
+  }
+
+  // Stack layout:
+  // [tos]   : result
+
+  ASSERT_EQ(original_height + 1, frame_->height());
 }
 
 
 void CodeGenerator::VisitAssignment(Assignment* node) {
+  VirtualFrame::RegisterAllocationScope scope(this);
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ Assignment");
 
-  { Reference target(this, node->target(), node->is_compound());
-    if (target.is_illegal()) {
-      // Fool the virtual frame into thinking that we left the assignment's
-      // value on the frame.
-      __ mov(r0, Operand(Smi::FromInt(0)));
-      frame_->EmitPush(r0);
-      ASSERT(frame_->height() == original_height + 1);
-      return;
-    }
+  Variable* var = node->target()->AsVariableProxy()->AsVariable();
+  Property* prop = node->target()->AsProperty();
 
-    if (node->op() == Token::ASSIGN ||
-        node->op() == Token::INIT_VAR ||
-        node->op() == Token::INIT_CONST) {
-      LoadAndSpill(node->value());
+  if (var != NULL && !var->is_global()) {
+    EmitSlotAssignment(node);
 
-    } else {  // Assignment is a compound assignment.
-      // Get the old value of the lhs.
-      target.GetValueAndSpill();
-      Literal* literal = node->value()->AsLiteral();
-      bool overwrite =
-          (node->value()->AsBinaryOperation() != NULL &&
-           node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
-      if (literal != NULL && literal->handle()->IsSmi()) {
-        SmiOperation(node->binary_op(),
-                     literal->handle(),
-                     false,
-                     overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
-        frame_->EmitPush(r0);
+  } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
+             (var != NULL && var->is_global())) {
+    // Properties whose keys are property names and global variables are
+    // treated as named property references.  We do not need to consider
+    // global 'this' because it is not a valid left-hand side.
+    EmitNamedPropertyAssignment(node);
 
-      } else {
-        LoadAndSpill(node->value());
-        GenericBinaryOperation(node->binary_op(),
-                               overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
-        frame_->EmitPush(r0);
-      }
-    }
-    Variable* var = node->target()->AsVariableProxy()->AsVariable();
-    if (var != NULL &&
-        (var->mode() == Variable::CONST) &&
-        node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
-      // Assignment ignored - leave the value on the stack.
-      UnloadReference(&target);
-    } else {
-      CodeForSourcePosition(node->position());
-      if (node->op() == Token::INIT_CONST) {
-        // Dynamic constant initializations must use the function context
-        // and initialize the actual constant declared. Dynamic variable
-        // initializations are simply assignments and use SetValue.
-        target.SetValue(CONST_INIT);
-      } else {
-        target.SetValue(NOT_CONST_INIT);
-      }
-    }
+  } else if (prop != NULL) {
+    // Other properties (including rewritten parameters for a function that
+    // uses arguments) are keyed property assignments.
+    EmitKeyedPropertyAssignment(node);
+
+  } else {
+    // Invalid left-hand side.
+    Load(node->target());
+    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
+    // The runtime call doesn't actually return but the code generator will
+    // still generate code and expects a certain frame height.
+    frame_->EmitPush(r0);
   }
-  ASSERT(frame_->height() == original_height + 1);
+  ASSERT_EQ(original_height + 1, frame_->height());
 }
 
 
@@ -2925,14 +3575,14 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ Throw");
 
   LoadAndSpill(node->exception());
   CodeForSourcePosition(node->position());
   frame_->CallRuntime(Runtime::kThrow, 1);
   frame_->EmitPush(r0);
-  ASSERT(frame_->height() == original_height + 1);
+  ASSERT_EQ(original_height + 1, frame_->height());
 }
 
 
@@ -2940,13 +3590,12 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ Property");
 
   { Reference property(this, node);
-    property.GetValueAndSpill();
+    property.GetValue();
   }
-  ASSERT(frame_->height() == original_height + 1);
+  ASSERT_EQ(original_height + 1, frame_->height());
 }
 
 
@@ -2954,7 +3603,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ Call");
 
   Expression* function = node->expression();
@@ -3080,21 +3729,37 @@
       // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
       // ------------------------------------------------------------------
 
-      LoadAndSpill(property->obj());  // Receiver.
-      // Load the arguments.
-      int arg_count = args->length();
-      for (int i = 0; i < arg_count; i++) {
-        LoadAndSpill(args->at(i));
-      }
+      Handle<String> name = Handle<String>::cast(literal->handle());
 
-      // Set the name register and call the IC initialization code.
-      __ mov(r2, Operand(literal->handle()));
-      InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-      Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
-      CodeForSourcePosition(node->position());
-      frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
-      __ ldr(cp, frame_->Context());
-      frame_->EmitPush(r0);
+      if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
+          name->IsEqualTo(CStrVector("apply")) &&
+          args->length() == 2 &&
+          args->at(1)->AsVariableProxy() != NULL &&
+          args->at(1)->AsVariableProxy()->IsArguments()) {
+        // Use the optimized Function.prototype.apply that avoids
+        // allocating lazily allocated arguments objects.
+        CallApplyLazy(property->obj(),
+                      args->at(0),
+                      args->at(1)->AsVariableProxy(),
+                      node->position());
+
+      } else {
+        LoadAndSpill(property->obj());  // Receiver.
+        // Load the arguments.
+        int arg_count = args->length();
+        for (int i = 0; i < arg_count; i++) {
+          LoadAndSpill(args->at(i));
+        }
+
+        // Set the name register and call the IC initialization code.
+        __ mov(r2, Operand(name));
+        InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+        Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
+        CodeForSourcePosition(node->position());
+        frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
+        __ ldr(cp, frame_->Context());
+        frame_->EmitPush(r0);
+      }
 
     } else {
       // -------------------------------------------
@@ -3103,7 +3768,7 @@
 
       LoadAndSpill(property->obj());
       LoadAndSpill(property->key());
-      EmitKeyedLoad(false);
+      EmitKeyedLoad();
       frame_->Drop();  // key
       // Put the function below the receiver.
       if (property->is_synthetic()) {
@@ -3137,7 +3802,7 @@
     CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
     frame_->EmitPush(r0);
   }
-  ASSERT(frame_->height() == original_height + 1);
+  ASSERT_EQ(original_height + 1, frame_->height());
 }
 
 
@@ -3145,7 +3810,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ CallNew");
 
   // According to ECMA-262, section 11.2.2, page 44, the function
@@ -3180,12 +3845,12 @@
 
   // Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)).
   __ str(r0, frame_->Top());
-  ASSERT(frame_->height() == original_height + 1);
+  ASSERT_EQ(original_height + 1, frame_->height());
 }
 
 
 void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 1);
   JumpTarget leave, null, function, non_function_constructor;
 
@@ -3245,7 +3910,7 @@
 
 
 void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 1);
   JumpTarget leave;
   LoadAndSpill(args->at(0));
@@ -3264,7 +3929,7 @@
 
 
 void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 2);
   JumpTarget leave;
   LoadAndSpill(args->at(0));  // Load the object.
@@ -3289,7 +3954,7 @@
 
 
 void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 1);
   LoadAndSpill(args->at(0));
   frame_->EmitPop(r0);
@@ -3299,7 +3964,7 @@
 
 
 void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
   ASSERT_EQ(args->length(), 3);
 #ifdef ENABLE_LOGGING_AND_PROFILING
@@ -3315,7 +3980,7 @@
 
 
 void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 1);
   LoadAndSpill(args->at(0));
   frame_->EmitPop(r0);
@@ -3324,91 +3989,86 @@
 }
 
 
-// This should generate code that performs a charCodeAt() call or returns
+// Generates the Math.pow method - currently just calls runtime.
+void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 2);
+  Load(args->at(0));
+  Load(args->at(1));
+  frame_->CallRuntime(Runtime::kMath_pow, 2);
+  frame_->EmitPush(r0);
+}
+
+
+// Generates the Math.sqrt method - currently just calls runtime.
+void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+  Load(args->at(0));
+  frame_->CallRuntime(Runtime::kMath_sqrt, 1);
+  frame_->EmitPush(r0);
+}
+
+
+// This generates code that performs a charCodeAt() call or returns
 // undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
-// It is not yet implemented on ARM, so it always goes to the slow case.
+// It can handle flat, 8 and 16 bit characters and cons strings where the
+// answer is found in the left hand branch of the cons.  The slow case will
+// flatten the string, which will ensure that the answer is in the left hand
+// side the next time around.
 void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 2);
   Comment(masm_, "[ GenerateFastCharCodeAt");
 
   LoadAndSpill(args->at(0));
   LoadAndSpill(args->at(1));
-  frame_->EmitPop(r0);  // Index.
-  frame_->EmitPop(r1);  // String.
+  frame_->EmitPop(r1);  // Index.
+  frame_->EmitPop(r2);  // String.
 
-  Label slow, end, not_a_flat_string, ascii_string, try_again_with_new_string;
+  Label slow_case;
+  Label exit;
+  StringHelper::GenerateFastCharCodeAt(masm_,
+                                       r2,
+                                       r1,
+                                       r3,
+                                       r0,
+                                       &slow_case,
+                                       &slow_case,
+                                       &slow_case,
+                                       &slow_case);
+  __ jmp(&exit);
 
-  __ tst(r1, Operand(kSmiTagMask));
-  __ b(eq, &slow);  // The 'string' was a Smi.
-
-  ASSERT(kSmiTag == 0);
-  __ tst(r0, Operand(kSmiTagMask | 0x80000000u));
-  __ b(ne, &slow);  // The index was negative or not a Smi.
-
-  __ bind(&try_again_with_new_string);
-  __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
-  __ b(ge, &slow);
-
-  // Now r2 has the string type.
-  __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
-  // Now r3 has the length of the string.  Compare with the index.
-  __ cmp(r3, Operand(r0, LSR, kSmiTagSize));
-  __ b(le, &slow);
-
-  // Here we know the index is in range.  Check that string is sequential.
-  ASSERT_EQ(0, kSeqStringTag);
-  __ tst(r2, Operand(kStringRepresentationMask));
-  __ b(ne, &not_a_flat_string);
-
-  // Check whether it is an ASCII string.
-  ASSERT_EQ(0, kTwoByteStringTag);
-  __ tst(r2, Operand(kStringEncodingMask));
-  __ b(ne, &ascii_string);
-
-  // 2-byte string.  We can add without shifting since the Smi tag size is the
-  // log2 of the number of bytes in a two-byte character.
-  ASSERT_EQ(1, kSmiTagSize);
-  ASSERT_EQ(0, kSmiShiftSize);
-  __ add(r1, r1, Operand(r0));
-  __ ldrh(r0, FieldMemOperand(r1, SeqTwoByteString::kHeaderSize));
-  __ mov(r0, Operand(r0, LSL, kSmiTagSize));
-  __ jmp(&end);
-
-  __ bind(&ascii_string);
-  __ add(r1, r1, Operand(r0, LSR, kSmiTagSize));
-  __ ldrb(r0, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
-  __ mov(r0, Operand(r0, LSL, kSmiTagSize));
-  __ jmp(&end);
-
-  __ bind(&not_a_flat_string);
-  __ and_(r2, r2, Operand(kStringRepresentationMask));
-  __ cmp(r2, Operand(kConsStringTag));
-  __ b(ne, &slow);
-
-  // ConsString.
-  // Check that the right hand side is the empty string (ie if this is really a
-  // flat string in a cons string).  If that is not the case we would rather go
-  // to the runtime system now, to flatten the string.
-  __ ldr(r2, FieldMemOperand(r1, ConsString::kSecondOffset));
-  __ LoadRoot(r3, Heap::kEmptyStringRootIndex);
-  __ cmp(r2, Operand(r3));
-  __ b(ne, &slow);
-
-  // Get the first of the two strings.
-  __ ldr(r1, FieldMemOperand(r1, ConsString::kFirstOffset));
-  __ jmp(&try_again_with_new_string);
-
-  __ bind(&slow);
+  __ bind(&slow_case);
+  // Move the undefined value into the result register, which will
+  // trigger the slow case.
   __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
 
-  __ bind(&end);
+  __ bind(&exit);
   frame_->EmitPush(r0);
 }
 
 
+void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
+  Comment(masm_, "[ GenerateCharFromCode");
+  ASSERT(args->length() == 1);
+
+  Register code = r1;
+  Register scratch = ip;
+  Register result = r0;
+
+  LoadAndSpill(args->at(0));
+  frame_->EmitPop(code);
+
+  StringHelper::GenerateCharFromCode(masm_,
+                                     code,
+                                     scratch,
+                                     result,
+                                     CALL_FUNCTION);
+  frame_->EmitPush(result);
+}
+
+
 void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 1);
   LoadAndSpill(args->at(0));
   JumpTarget answer;
@@ -3427,7 +4087,7 @@
 
 
 void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 1);
   LoadAndSpill(args->at(0));
   JumpTarget answer;
@@ -3448,7 +4108,7 @@
 void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
   // This generates a fast version of:
   // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 1);
   LoadAndSpill(args->at(0));
   frame_->EmitPop(r1);
@@ -3478,7 +4138,7 @@
 void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
   // This generates a fast version of:
   // (%_ClassOf(arg) === 'Function')
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 1);
   LoadAndSpill(args->at(0));
   frame_->EmitPop(r0);
@@ -3491,7 +4151,7 @@
 
 
 void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 1);
   LoadAndSpill(args->at(0));
   frame_->EmitPop(r0);
@@ -3505,7 +4165,7 @@
 
 
 void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 0);
 
   // Get the frame pointer for the calling frame.
@@ -3527,22 +4187,31 @@
 
 
 void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 0);
 
-  // Seed the result with the formal parameters count, which will be used
-  // in case no arguments adaptor frame is found below the current frame.
+  Label exit;
+
+  // Get the number of formal parameters.
   __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
 
-  // Call the shared stub to get to the arguments.length.
-  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
-  frame_->CallStub(&stub, 0);
+  // Check if the calling frame is an arguments adaptor frame.
+  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
+  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ b(ne, &exit);
+
+  // Arguments adaptor case: Read the arguments length from the
+  // adaptor frame.
+  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+  __ bind(&exit);
   frame_->EmitPush(r0);
 }
 
 
-void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope;
+void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 1);
 
   // Satisfy contract with ArgumentsAccessStub:
@@ -3558,12 +4227,57 @@
 }
 
 
-void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope;
+void CodeGenerator::GenerateRandomHeapNumber(
+    ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 0);
-  __ Call(ExternalReference::random_positive_smi_function().address(),
-          RelocInfo::RUNTIME_ENTRY);
-  frame_->EmitPush(r0);
+
+  Label slow_allocate_heapnumber;
+  Label heapnumber_allocated;
+
+  __ AllocateHeapNumber(r4, r1, r2, &slow_allocate_heapnumber);
+  __ jmp(&heapnumber_allocated);
+
+  __ bind(&slow_allocate_heapnumber);
+  // To allocate a heap number, and ensure that it is not a smi, we
+  // call the runtime function FUnaryMinus on 0, returning the double
+  // -0.0. A new, distinct heap number is returned each time.
+  __ mov(r0, Operand(Smi::FromInt(0)));
+  __ push(r0);
+  __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
+  __ mov(r4, Operand(r0));
+
+  __ bind(&heapnumber_allocated);
+
+  // Convert 32 random bits in r0 to 0.(32 random bits) in a double
+  // by computing:
+  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
+  if (CpuFeatures::IsSupported(VFP3)) {
+    __ PrepareCallCFunction(0, r1);
+    __ CallCFunction(ExternalReference::random_uint32_function(), 0);
+
+    CpuFeatures::Scope scope(VFP3);
+    // 0x41300000 is the top half of 1.0 x 2^20 as a double.
+    // Create this constant using mov/orr to avoid PC relative load.
+    __ mov(r1, Operand(0x41000000));
+    __ orr(r1, r1, Operand(0x300000));
+    // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
+    __ vmov(d7, r0, r1);
+    // Move 0x4130000000000000 to VFP.
+    __ mov(r0, Operand(0));
+    __ vmov(d8, r0, r1);
+    // Subtract and store the result in the heap number.
+    __ vsub(d7, d7, d8);
+    __ sub(r0, r4, Operand(kHeapObjectTag));
+    __ vstr(d7, r0, HeapNumber::kValueOffset);
+    frame_->EmitPush(r4);
+  } else {
+    __ mov(r0, Operand(r4));
+    __ PrepareCallCFunction(1, r1);
+    __ CallCFunction(
+        ExternalReference::fill_heap_number_with_random_function(), 1);
+    frame_->EmitPush(r0);
+  }
 }
 
 
@@ -3611,8 +4325,169 @@
   Load(args->at(1));
   Load(args->at(2));
   Load(args->at(3));
+  RegExpExecStub stub;
+  frame_->CallStub(&stub, 4);
+  frame_->EmitPush(r0);
+}
 
-  frame_->CallRuntime(Runtime::kRegExpExec, 4);
+
+void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
+  // No stub. This code only occurs a few times in regexp.js.
+  const int kMaxInlineLength = 100;
+  ASSERT_EQ(3, args->length());
+  Load(args->at(0));  // Size of array, smi.
+  Load(args->at(1));  // "index" property value.
+  Load(args->at(2));  // "input" property value.
+  {
+    VirtualFrame::SpilledScope spilled_scope(frame_);
+    Label slowcase;
+    Label done;
+    __ ldr(r1, MemOperand(sp, kPointerSize * 2));
+    STATIC_ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTagSize == 1);
+    __ tst(r1, Operand(kSmiTagMask));
+    __ b(ne, &slowcase);
+    __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
+    __ b(hi, &slowcase);
+    // Smi-tagging is equivalent to multiplying by 2.
+    // Allocate RegExpResult followed by FixedArray with size in ebx.
+    // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
+    // Elements:  [Map][Length][..elements..]
+    // Size of JSArray with two in-object properties and the header of a
+    // FixedArray.
+    int objects_size =
+        (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
+    __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
+    __ add(r2, r5, Operand(objects_size));
+    __ AllocateInNewSpace(r2,  // In: Size, in words.
+                          r0,  // Out: Start of allocation (tagged).
+                          r3,  // Scratch register.
+                          r4,  // Scratch register.
+                          &slowcase,
+                          TAG_OBJECT);
+    // r0: Start of allocated area, object-tagged.
+    // r1: Number of elements in array, as smi.
+    // r5: Number of elements, untagged.
+
+    // Set JSArray map to global.regexp_result_map().
+    // Set empty properties FixedArray.
+    // Set elements to point to FixedArray allocated right after the JSArray.
+    // Interleave operations for better latency.
+    __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
+    __ add(r3, r0, Operand(JSRegExpResult::kSize));
+    __ mov(r4, Operand(Factory::empty_fixed_array()));
+    __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
+    __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
+    __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
+    __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+    __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+
+    // Set input, index and length fields from arguments.
+    __ ldm(ia_w, sp, static_cast<RegList>(r2.bit() | r4.bit()));
+    __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
+    __ add(sp, sp, Operand(kPointerSize));
+    __ str(r4, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
+    __ str(r2, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
+
+    // Fill out the elements FixedArray.
+    // r0: JSArray, tagged.
+    // r3: FixedArray, tagged.
+    // r5: Number of elements in array, untagged.
+
+    // Set map.
+    __ mov(r2, Operand(Factory::fixed_array_map()));
+    __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+    // Set FixedArray length.
+    __ str(r5, FieldMemOperand(r3, FixedArray::kLengthOffset));
+    // Fill contents of fixed-array with the-hole.
+    __ mov(r2, Operand(Factory::the_hole_value()));
+    __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    // Fill fixed array elements with hole.
+    // r0: JSArray, tagged.
+    // r2: the hole.
+    // r3: Start of elements in FixedArray.
+    // r5: Number of elements to fill.
+    Label loop;
+    __ tst(r5, Operand(r5));
+    __ bind(&loop);
+    __ b(le, &done);  // Jump if r1 is negative or zero.
+    __ sub(r5, r5, Operand(1), SetCC);
+    __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
+    __ jmp(&loop);
+
+    __ bind(&slowcase);
+    __ CallRuntime(Runtime::kRegExpConstructResult, 3);
+
+    __ bind(&done);
+  }
+  frame_->Forget(3);
+  frame_->EmitPush(r0);
+}
+
+
+class DeferredSearchCache: public DeferredCode {
+ public:
+  DeferredSearchCache(Register dst, Register cache, Register key)
+      : dst_(dst), cache_(cache), key_(key) {
+    set_comment("[ DeferredSearchCache");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register dst_, cache_, key_;
+};
+
+
+void DeferredSearchCache::Generate() {
+  __ Push(cache_, key_);
+  __ CallRuntime(Runtime::kGetFromCache, 2);
+  if (!dst_.is(r0)) {
+    __ mov(dst_, r0);
+  }
+}
+
+
+void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
+  ASSERT_EQ(2, args->length());
+
+  ASSERT_NE(NULL, args->at(0)->AsLiteral());
+  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+
+  Handle<FixedArray> jsfunction_result_caches(
+      Top::global_context()->jsfunction_result_caches());
+  if (jsfunction_result_caches->length() <= cache_id) {
+    __ Abort("Attempt to use undefined cache.");
+    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+    frame_->EmitPush(r0);
+    return;
+  }
+
+  Load(args->at(1));
+  frame_->EmitPop(r2);
+
+  __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
+  __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalContextOffset));
+  __ ldr(r1, ContextOperand(r1, Context::JSFUNCTION_RESULT_CACHES_INDEX));
+  __ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(cache_id)));
+
+  DeferredSearchCache* deferred = new DeferredSearchCache(r0, r1, r2);
+
+  const int kFingerOffset =
+      FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
+  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  __ ldr(r0, FieldMemOperand(r1, kFingerOffset));
+  // r0 now holds finger offset as a smi.
+  __ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  // r3 now points to the start of fixed array elements.
+  __ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
+  // Note side effect of PreIndex: r3 now points to the key of the pair.
+  __ cmp(r2, r0);
+  deferred->Branch(ne);
+
+  __ ldr(r0, MemOperand(r3, kPointerSize));
+
+  deferred->BindExit();
   frame_->EmitPush(r0);
 }
 
@@ -3623,7 +4498,128 @@
   // Load the argument on the stack and jump to the runtime.
   Load(args->at(0));
 
-  frame_->CallRuntime(Runtime::kNumberToString, 1);
+  NumberToStringStub stub;
+  frame_->CallStub(&stub, 1);
+  frame_->EmitPush(r0);
+}
+
+
+class DeferredSwapElements: public DeferredCode {
+ public:
+  DeferredSwapElements(Register object, Register index1, Register index2)
+      : object_(object), index1_(index1), index2_(index2) {
+    set_comment("[ DeferredSwapElements");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register object_, index1_, index2_;
+};
+
+
+void DeferredSwapElements::Generate() {
+  __ push(object_);
+  __ push(index1_);
+  __ push(index2_);
+  __ CallRuntime(Runtime::kSwapElements, 3);
+}
+
+
+void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
+  Comment cmnt(masm_, "[ GenerateSwapElements");
+
+  ASSERT_EQ(3, args->length());
+
+  Load(args->at(0));
+  Load(args->at(1));
+  Load(args->at(2));
+
+  Register index2 = r2;
+  Register index1 = r1;
+  Register object = r0;
+  Register tmp1 = r3;
+  Register tmp2 = r4;
+
+  frame_->EmitPop(index2);
+  frame_->EmitPop(index1);
+  frame_->EmitPop(object);
+
+  DeferredSwapElements* deferred =
+      new DeferredSwapElements(object, index1, index2);
+
+  // Fetch the map and check if array is in fast case.
+  // Check that object doesn't require security checks and
+  // has no indexed interceptor.
+  __ CompareObjectType(object, tmp1, tmp2, FIRST_JS_OBJECT_TYPE);
+  deferred->Branch(lt);
+  __ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset));
+  __ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
+  deferred->Branch(nz);
+
+  // Check the object's elements are in fast case.
+  __ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset));
+  __ ldr(tmp2, FieldMemOperand(tmp1, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+  __ cmp(tmp2, ip);
+  deferred->Branch(ne);
+
+  // Smi-tagging is equivalent to multiplying by 2.
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+
+  // Check that both indices are smis.
+  __ mov(tmp2, index1);
+  __ orr(tmp2, tmp2, index2);
+  __ tst(tmp2, Operand(kSmiTagMask));
+  deferred->Branch(nz);
+
+  // Bring the offsets into the fixed array in tmp1 into index1 and
+  // index2.
+  __ mov(tmp2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ add(index1, tmp2, Operand(index1, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ add(index2, tmp2, Operand(index2, LSL, kPointerSizeLog2 - kSmiTagSize));
+
+  // Swap elements.
+  Register tmp3 = object;
+  object = no_reg;
+  __ ldr(tmp3, MemOperand(tmp1, index1));
+  __ ldr(tmp2, MemOperand(tmp1, index2));
+  __ str(tmp3, MemOperand(tmp1, index2));
+  __ str(tmp2, MemOperand(tmp1, index1));
+
+  Label done;
+  __ InNewSpace(tmp1, tmp2, eq, &done);
+  // Possible optimization: do a check that both values are Smis
+  // (or them and test against Smi mask.)
+
+  __ mov(tmp2, tmp1);
+  RecordWriteStub recordWrite1(tmp1, index1, tmp3);
+  __ CallStub(&recordWrite1);
+
+  RecordWriteStub recordWrite2(tmp2, index2, tmp3);
+  __ CallStub(&recordWrite2);
+
+  __ bind(&done);
+
+  deferred->BindExit();
+  __ LoadRoot(tmp1, Heap::kUndefinedValueRootIndex);
+  frame_->EmitPush(tmp1);
+}
+
+
+void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
+  Comment cmnt(masm_, "[ GenerateCallFunction");
+
+  ASSERT(args->length() >= 2);
+
+  int n_args = args->length() - 2;  // for receiver and function.
+  Load(args->at(0));  // receiver
+  for (int i = 0; i < n_args; i++) {
+    Load(args->at(i + 1));
+  }
+  Load(args->at(n_args + 1));  // function
+  frame_->CallJSFunction(n_args);
   frame_->EmitPush(r0);
 }
 
@@ -3647,7 +4643,7 @@
 
 
 void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
@@ -3655,7 +4651,7 @@
   LoadAndSpill(args->at(1));
   frame_->EmitPop(r0);
   frame_->EmitPop(r1);
-  __ cmp(r0, Operand(r1));
+  __ cmp(r0, r1);
   cc_reg_ = eq;
 }
 
@@ -3664,7 +4660,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   if (CheckForInlineRuntimeCall(node)) {
     ASSERT((has_cc() && frame_->height() == original_height) ||
            (!has_cc() && frame_->height() == original_height + 1));
@@ -3702,7 +4698,7 @@
     frame_->CallRuntime(function, arg_count);
     frame_->EmitPush(r0);
   }
-  ASSERT(frame_->height() == original_height + 1);
+  ASSERT_EQ(original_height + 1, frame_->height());
 }
 
 
@@ -3710,7 +4706,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ UnaryOperation");
 
   Token::Value op = node->op();
@@ -3841,7 +4837,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ CountOperation");
 
   bool is_postfix = node->is_postfix();
@@ -3866,10 +4862,10 @@
         __ mov(r0, Operand(Smi::FromInt(0)));
         frame_->EmitPush(r0);
       }
-      ASSERT(frame_->height() == original_height + 1);
+      ASSERT_EQ(original_height + 1, frame_->height());
       return;
     }
-    target.GetValueAndSpill();
+    target.GetValue();
     frame_->EmitPop(r0);
 
     JumpTarget slow;
@@ -3934,18 +4930,11 @@
 
   // Postfix: Discard the new value and use the old.
   if (is_postfix) frame_->EmitPop(r0);
-  ASSERT(frame_->height() == original_height + 1);
+  ASSERT_EQ(original_height + 1, frame_->height());
 }
 
 
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ BinaryOperation");
-  Token::Value op = node->op();
-
+void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
   // According to ECMA-262 section 11.11, page 58, the binary logical
   // operators must yield the result of one of the two expressions
   // before any ToBoolean() conversions. This means that the value
@@ -3957,8 +4946,7 @@
   // after evaluating the left hand side (due to the shortcut
   // semantics), but the compiler must (statically) know if the result
   // of compiling the binary operation is materialized or not.
-
-  if (op == Token::AND) {
+  if (node->op() == Token::AND) {
     JumpTarget is_true;
     LoadConditionAndSpill(node->left(),
                           &is_true,
@@ -4004,7 +4992,8 @@
       ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked());
     }
 
-  } else if (op == Token::OR) {
+  } else {
+    ASSERT(node->op() == Token::OR);
     JumpTarget is_false;
     LoadConditionAndSpill(node->left(),
                           true_target(),
@@ -4049,7 +5038,19 @@
       // Nothing to do.
       ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked());
     }
+  }
+}
 
+
+void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  Comment cmnt(masm_, "[ BinaryOperation");
+
+  if (node->op() == Token::AND || node->op() == Token::OR) {
+    VirtualFrame::SpilledScope spilled_scope(frame_);
+    GenerateLogicalBooleanOperation(node);
   } else {
     // Optimize for the case where (at least) one of the expressions
     // is a literal small integer.
@@ -4065,31 +5066,31 @@
          node->right()->AsBinaryOperation()->ResultOverwriteAllowed());
 
     if (rliteral != NULL && rliteral->handle()->IsSmi()) {
-      LoadAndSpill(node->left());
+      VirtualFrame::RegisterAllocationScope scope(this);
+      Load(node->left());
       SmiOperation(node->op(),
                    rliteral->handle(),
                    false,
                    overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
-
     } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
-      LoadAndSpill(node->right());
+      VirtualFrame::RegisterAllocationScope scope(this);
+      Load(node->right());
       SmiOperation(node->op(),
                    lliteral->handle(),
                    true,
                    overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
-
     } else {
+      VirtualFrame::RegisterAllocationScope scope(this);
       OverwriteMode overwrite_mode = NO_OVERWRITE;
       if (overwrite_left) {
         overwrite_mode = OVERWRITE_LEFT;
       } else if (overwrite_right) {
         overwrite_mode = OVERWRITE_RIGHT;
       }
-      LoadAndSpill(node->left());
-      LoadAndSpill(node->right());
-      GenericBinaryOperation(node->op(), overwrite_mode);
+      Load(node->left());
+      Load(node->right());
+      VirtualFrameBinaryOperation(node->op(), overwrite_mode);
     }
-    frame_->EmitPush(r0);
   }
   ASSERT(!has_valid_frame() ||
          (has_cc() && frame_->height() == original_height) ||
@@ -4101,10 +5102,10 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   __ ldr(r0, frame_->Function());
   frame_->EmitPush(r0);
-  ASSERT(frame_->height() == original_height + 1);
+  ASSERT_EQ(original_height + 1, frame_->height());
 }
 
 
@@ -4112,9 +5113,10 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ CompareOperation");
 
+  VirtualFrame::RegisterAllocationScope nonspilled_scope(this);
+
   // Get the expressions from the node.
   Expression* left = node->left();
   Expression* right = node->right();
@@ -4131,10 +5133,12 @@
         right->AsLiteral() != NULL && right->AsLiteral()->IsNull();
     // The 'null' value can only be equal to 'null' or 'undefined'.
     if (left_is_null || right_is_null) {
-      LoadAndSpill(left_is_null ? right : left);
-      frame_->EmitPop(r0);
+      Load(left_is_null ? right : left);
+      Register tos = frame_->PopToRegister();
+      // JumpTargets can't cope with register allocation yet.
+      frame_->SpillAll();
       __ LoadRoot(ip, Heap::kNullValueRootIndex);
-      __ cmp(r0, ip);
+      __ cmp(tos, ip);
 
       // The 'null' value is only equal to 'undefined' if using non-strict
       // comparisons.
@@ -4142,17 +5146,17 @@
         true_target()->Branch(eq);
 
         __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-        __ cmp(r0, Operand(ip));
+        __ cmp(tos, Operand(ip));
         true_target()->Branch(eq);
 
-        __ tst(r0, Operand(kSmiTagMask));
+        __ tst(tos, Operand(kSmiTagMask));
         false_target()->Branch(eq);
 
         // It can be an undetectable object.
-        __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
-        __ ldrb(r0, FieldMemOperand(r0, Map::kBitFieldOffset));
-        __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
-        __ cmp(r0, Operand(1 << Map::kIsUndetectable));
+        __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
+        __ ldrb(tos, FieldMemOperand(tos, Map::kBitFieldOffset));
+        __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
+        __ cmp(tos, Operand(1 << Map::kIsUndetectable));
       }
 
       cc_reg_ = eq;
@@ -4171,90 +5175,95 @@
        right->AsLiteral()->handle()->IsString())) {
     Handle<String> check(String::cast(*right->AsLiteral()->handle()));
 
-    // Load the operand, move it to register r1.
+    // Load the operand, move it to a register.
     LoadTypeofExpression(operation->expression());
-    frame_->EmitPop(r1);
+    Register tos = frame_->PopToRegister();
+
+    // JumpTargets can't cope with register allocation yet.
+    frame_->SpillAll();
+
+    Register scratch = VirtualFrame::scratch0();
 
     if (check->Equals(Heap::number_symbol())) {
-      __ tst(r1, Operand(kSmiTagMask));
+      __ tst(tos, Operand(kSmiTagMask));
       true_target()->Branch(eq);
-      __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
+      __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
       __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
-      __ cmp(r1, ip);
+      __ cmp(tos, ip);
       cc_reg_ = eq;
 
     } else if (check->Equals(Heap::string_symbol())) {
-      __ tst(r1, Operand(kSmiTagMask));
+      __ tst(tos, Operand(kSmiTagMask));
       false_target()->Branch(eq);
 
-      __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
+      __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
 
       // It can be an undetectable string object.
-      __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset));
-      __ and_(r2, r2, Operand(1 << Map::kIsUndetectable));
-      __ cmp(r2, Operand(1 << Map::kIsUndetectable));
+      __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
+      __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
+      __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
       false_target()->Branch(eq);
 
-      __ ldrb(r2, FieldMemOperand(r1, Map::kInstanceTypeOffset));
-      __ cmp(r2, Operand(FIRST_NONSTRING_TYPE));
+      __ ldrb(scratch, FieldMemOperand(tos, Map::kInstanceTypeOffset));
+      __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE));
       cc_reg_ = lt;
 
     } else if (check->Equals(Heap::boolean_symbol())) {
       __ LoadRoot(ip, Heap::kTrueValueRootIndex);
-      __ cmp(r1, ip);
+      __ cmp(tos, ip);
       true_target()->Branch(eq);
       __ LoadRoot(ip, Heap::kFalseValueRootIndex);
-      __ cmp(r1, ip);
+      __ cmp(tos, ip);
       cc_reg_ = eq;
 
     } else if (check->Equals(Heap::undefined_symbol())) {
       __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-      __ cmp(r1, ip);
+      __ cmp(tos, ip);
       true_target()->Branch(eq);
 
-      __ tst(r1, Operand(kSmiTagMask));
+      __ tst(tos, Operand(kSmiTagMask));
       false_target()->Branch(eq);
 
       // It can be an undetectable object.
-      __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
-      __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset));
-      __ and_(r2, r2, Operand(1 << Map::kIsUndetectable));
-      __ cmp(r2, Operand(1 << Map::kIsUndetectable));
+      __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
+      __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
+      __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
+      __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
 
       cc_reg_ = eq;
 
     } else if (check->Equals(Heap::function_symbol())) {
-      __ tst(r1, Operand(kSmiTagMask));
+      __ tst(tos, Operand(kSmiTagMask));
       false_target()->Branch(eq);
-      Register map_reg = r2;
-      __ CompareObjectType(r1, map_reg, r1, JS_FUNCTION_TYPE);
+      Register map_reg = scratch;
+      __ CompareObjectType(tos, map_reg, tos, JS_FUNCTION_TYPE);
       true_target()->Branch(eq);
       // Regular expressions are callable so typeof == 'function'.
-      __ CompareInstanceType(map_reg, r1, JS_REGEXP_TYPE);
+      __ CompareInstanceType(map_reg, tos, JS_REGEXP_TYPE);
       cc_reg_ = eq;
 
     } else if (check->Equals(Heap::object_symbol())) {
-      __ tst(r1, Operand(kSmiTagMask));
+      __ tst(tos, Operand(kSmiTagMask));
       false_target()->Branch(eq);
 
       __ LoadRoot(ip, Heap::kNullValueRootIndex);
-      __ cmp(r1, ip);
+      __ cmp(tos, ip);
       true_target()->Branch(eq);
 
-      Register map_reg = r2;
-      __ CompareObjectType(r1, map_reg, r1, JS_REGEXP_TYPE);
+      Register map_reg = scratch;
+      __ CompareObjectType(tos, map_reg, tos, JS_REGEXP_TYPE);
       false_target()->Branch(eq);
 
       // It can be an undetectable object.
-      __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
-      __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
-      __ cmp(r1, Operand(1 << Map::kIsUndetectable));
+      __ ldrb(tos, FieldMemOperand(map_reg, Map::kBitFieldOffset));
+      __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
+      __ cmp(tos, Operand(1 << Map::kIsUndetectable));
       false_target()->Branch(eq);
 
-      __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
-      __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
+      __ ldrb(tos, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
+      __ cmp(tos, Operand(FIRST_JS_OBJECT_TYPE));
       false_target()->Branch(lt);
-      __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
+      __ cmp(tos, Operand(LAST_JS_OBJECT_TYPE));
       cc_reg_ = le;
 
     } else {
@@ -4293,6 +5302,7 @@
       break;
 
     case Token::IN: {
+      VirtualFrame::SpilledScope scope(frame_);
       LoadAndSpill(left);
       LoadAndSpill(right);
       frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
@@ -4301,6 +5311,7 @@
     }
 
     case Token::INSTANCEOF: {
+      VirtualFrame::SpilledScope scope(frame_);
       LoadAndSpill(left);
       LoadAndSpill(right);
       InstanceofStub stub;
@@ -4319,13 +5330,352 @@
 }
 
 
-void CodeGenerator::EmitKeyedLoad(bool is_global) {
-  Comment cmnt(masm_, "[ Load from keyed Property");
-  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-  RelocInfo::Mode rmode = is_global
-                          ? RelocInfo::CODE_TARGET_CONTEXT
-                          : RelocInfo::CODE_TARGET;
-  frame_->CallCodeObject(ic, rmode, 0);
+class DeferredReferenceGetNamedValue: public DeferredCode {
+ public:
+  explicit DeferredReferenceGetNamedValue(Handle<String> name) : name_(name) {
+    set_comment("[ DeferredReferenceGetNamedValue");
+  }
+
+  virtual void Generate();
+
+ private:
+  Handle<String> name_;
+};
+
+
+void DeferredReferenceGetNamedValue::Generate() {
+  Register scratch1 = VirtualFrame::scratch0();
+  Register scratch2 = VirtualFrame::scratch1();
+  __ DecrementCounter(&Counters::named_load_inline, 1, scratch1, scratch2);
+  __ IncrementCounter(&Counters::named_load_inline_miss, 1, scratch1, scratch2);
+
+  // Setup the registers and call load IC.
+  // On entry to this deferred code, r0 is assumed to already contain the
+  // receiver from the top of the stack.
+  __ mov(r2, Operand(name_));
+
+  // The rest of the instructions in the deferred code must be together.
+  { Assembler::BlockConstPoolScope block_const_pool(masm_);
+    Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+    __ Call(ic, RelocInfo::CODE_TARGET);
+    // The call must be followed by a nop(1) instruction to indicate that the
+    // in-object has been inlined.
+    __ nop(PROPERTY_ACCESS_INLINED);
+
+    // Block the constant pool for one more instruction after leaving this
+    // constant pool block scope to include the branch instruction ending the
+    // deferred code.
+    __ BlockConstPoolFor(1);
+  }
+}
+
+
+class DeferredReferenceGetKeyedValue: public DeferredCode {
+ public:
+  DeferredReferenceGetKeyedValue() {
+    set_comment("[ DeferredReferenceGetKeyedValue");
+  }
+
+  virtual void Generate();
+};
+
+
+void DeferredReferenceGetKeyedValue::Generate() {
+  Register scratch1 = VirtualFrame::scratch0();
+  Register scratch2 = VirtualFrame::scratch1();
+  __ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2);
+  __ IncrementCounter(&Counters::keyed_load_inline_miss, 1, scratch1, scratch2);
+
+  // The rest of the instructions in the deferred code must be together.
+  { Assembler::BlockConstPoolScope block_const_pool(masm_);
+    // Call keyed load IC. It has all arguments on the stack and the key in r0.
+    __ ldr(r0, MemOperand(sp, 0));
+    Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+    __ Call(ic, RelocInfo::CODE_TARGET);
+    // The call must be followed by a nop instruction to indicate that the
+    // keyed load has been inlined.
+    __ nop(PROPERTY_ACCESS_INLINED);
+
+    // Block the constant pool for one more instruction after leaving this
+    // constant pool block scope to include the branch instruction ending the
+    // deferred code.
+    __ BlockConstPoolFor(1);
+  }
+}
+
+
+class DeferredReferenceSetKeyedValue: public DeferredCode {
+ public:
+  DeferredReferenceSetKeyedValue() {
+    set_comment("[ DeferredReferenceSetKeyedValue");
+  }
+
+  virtual void Generate();
+};
+
+
+void DeferredReferenceSetKeyedValue::Generate() {
+  Register scratch1 = VirtualFrame::scratch0();
+  Register scratch2 = VirtualFrame::scratch1();
+  __ DecrementCounter(&Counters::keyed_store_inline, 1, scratch1, scratch2);
+  __ IncrementCounter(
+      &Counters::keyed_store_inline_miss, 1, scratch1, scratch2);
+
+  // The rest of the instructions in the deferred code must be together.
+  { Assembler::BlockConstPoolScope block_const_pool(masm_);
+    // Call keyed load IC. It has receiver amd key on the stack and the value to
+    // store in r0.
+    Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+    __ Call(ic, RelocInfo::CODE_TARGET);
+    // The call must be followed by a nop instruction to indicate that the
+    // keyed store has been inlined.
+    __ nop(PROPERTY_ACCESS_INLINED);
+
+    // Block the constant pool for one more instruction after leaving this
+    // constant pool block scope to include the branch instruction ending the
+    // deferred code.
+    __ BlockConstPoolFor(1);
+  }
+}
+
+
+void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
+  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
+    Comment cmnt(masm(), "[ Load from named Property");
+    // Setup the name register and call load IC.
+    frame_->CallLoadIC(name,
+                       is_contextual
+                           ? RelocInfo::CODE_TARGET_CONTEXT
+                           : RelocInfo::CODE_TARGET);
+  } else {
+    // Inline the in-object property case.
+    Comment cmnt(masm(), "[ Inlined named property load");
+
+    // Counter will be decremented in the deferred code. Placed here to avoid
+    // having it in the instruction stream below where patching will occur.
+    __ IncrementCounter(&Counters::named_load_inline, 1,
+                        frame_->scratch0(), frame_->scratch1());
+
+    // The following instructions are the inlined load of an in-object property.
+    // Parts of this code is patched, so the exact instructions generated needs
+    // to be fixed. Therefore the instruction pool is blocked when generating
+    // this code
+
+    // Load the receiver from the stack.
+    frame_->SpillAllButCopyTOSToR0();
+
+    DeferredReferenceGetNamedValue* deferred =
+        new DeferredReferenceGetNamedValue(name);
+
+#ifdef DEBUG
+    int kInlinedNamedLoadInstructions = 7;
+    Label check_inlined_codesize;
+    masm_->bind(&check_inlined_codesize);
+#endif
+
+    { Assembler::BlockConstPoolScope block_const_pool(masm_);
+      // Check that the receiver is a heap object.
+      __ tst(r0, Operand(kSmiTagMask));
+      deferred->Branch(eq);
+
+      // Check the map. The null map used below is patched by the inline cache
+      // code.
+      __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+      __ mov(r3, Operand(Factory::null_value()));
+      __ cmp(r2, r3);
+      deferred->Branch(ne);
+
+      // Initially use an invalid index. The index will be patched by the
+      // inline cache code.
+      __ ldr(r0, MemOperand(r0, 0));
+
+      // Make sure that the expected number of instructions are generated.
+      ASSERT_EQ(kInlinedNamedLoadInstructions,
+                masm_->InstructionsGeneratedSince(&check_inlined_codesize));
+    }
+
+    deferred->BindExit();
+  }
+}
+
+
+void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
+#ifdef DEBUG
+  int expected_height = frame_->height() - (is_contextual ? 1 : 2);
+#endif
+  frame_->CallStoreIC(name, is_contextual);
+
+  ASSERT_EQ(expected_height, frame_->height());
+}
+
+
+void CodeGenerator::EmitKeyedLoad() {
+  if (loop_nesting() == 0) {
+    Comment cmnt(masm_, "[ Load from keyed property");
+    frame_->CallKeyedLoadIC();
+  } else {
+    // Inline the keyed load.
+    Comment cmnt(masm_, "[ Inlined load from keyed property");
+
+    // Counter will be decremented in the deferred code. Placed here to avoid
+    // having it in the instruction stream below where patching will occur.
+    __ IncrementCounter(&Counters::keyed_load_inline, 1,
+                        frame_->scratch0(), frame_->scratch1());
+
+    // Load the receiver and key from the stack.
+    frame_->SpillAllButCopyTOSToR1R0();
+    Register receiver = r0;
+    Register key = r1;
+    VirtualFrame::SpilledScope spilled(frame_);
+
+    DeferredReferenceGetKeyedValue* deferred =
+        new DeferredReferenceGetKeyedValue();
+
+    // Check that the receiver is a heap object.
+    __ tst(receiver, Operand(kSmiTagMask));
+    deferred->Branch(eq);
+
+    // The following instructions are the part of the inlined load keyed
+    // property code which can be patched. Therefore the exact number of
+    // instructions generated need to be fixed, so the constant pool is blocked
+    // while generating this code.
+#ifdef DEBUG
+    int kInlinedKeyedLoadInstructions = 19;
+    Label check_inlined_codesize;
+    masm_->bind(&check_inlined_codesize);
+#endif
+    { Assembler::BlockConstPoolScope block_const_pool(masm_);
+      Register scratch1 = VirtualFrame::scratch0();
+      Register scratch2 = VirtualFrame::scratch1();
+      // Check the map. The null map used below is patched by the inline cache
+      // code.
+      __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+      __ mov(scratch2, Operand(Factory::null_value()));
+      __ cmp(scratch1, scratch2);
+      deferred->Branch(ne);
+
+      // Check that the key is a smi.
+      __ tst(key, Operand(kSmiTagMask));
+      deferred->Branch(ne);
+
+      // Get the elements array from the receiver and check that it
+      // is not a dictionary.
+      __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
+      __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
+      __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+      __ cmp(scratch2, ip);
+      deferred->Branch(ne);
+
+      // Check that key is within bounds. Use unsigned comparison to handle
+      // negative keys.
+      __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
+      __ cmp(scratch2, Operand(key, ASR, kSmiTagSize));
+      deferred->Branch(ls);  // Unsigned less equal.
+
+      // Load and check that the result is not the hole (key is a smi).
+      __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
+      __ add(scratch1,
+             scratch1,
+             Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+      __ ldr(r0,
+             MemOperand(scratch1, key, LSL,
+                        kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
+      __ cmp(r0, scratch2);
+      // This is the only branch to deferred where r0 and r1 do not contain the
+      // receiver and key.  We can't just load undefined here because we have to
+      // check the prototype.
+      deferred->Branch(eq);
+
+      // Make sure that the expected number of instructions are generated.
+      ASSERT_EQ(kInlinedKeyedLoadInstructions,
+                masm_->InstructionsGeneratedSince(&check_inlined_codesize));
+    }
+
+    deferred->BindExit();
+  }
+}
+
+
+void CodeGenerator::EmitKeyedStore(StaticType* key_type) {
+  VirtualFrame::SpilledScope scope(frame_);
+  // Generate inlined version of the keyed store if the code is in a loop
+  // and the key is likely to be a smi.
+  if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
+    // Inline the keyed store.
+    Comment cmnt(masm_, "[ Inlined store to keyed property");
+
+    DeferredReferenceSetKeyedValue* deferred =
+        new DeferredReferenceSetKeyedValue();
+
+    // Counter will be decremented in the deferred code. Placed here to avoid
+    // having it in the instruction stream below where patching will occur.
+    __ IncrementCounter(&Counters::keyed_store_inline, 1,
+                        frame_->scratch0(), frame_->scratch1());
+
+    // Check that the value is a smi. As this inlined code does not set the
+    // write barrier it is only possible to store smi values.
+    __ tst(r0, Operand(kSmiTagMask));
+    deferred->Branch(ne);
+
+    // Load the key and receiver from the stack.
+    __ ldr(r1, MemOperand(sp, 0));
+    __ ldr(r2, MemOperand(sp, kPointerSize));
+
+    // Check that the key is a smi.
+    __ tst(r1, Operand(kSmiTagMask));
+    deferred->Branch(ne);
+
+    // Check that the receiver is a heap object.
+    __ tst(r2, Operand(kSmiTagMask));
+    deferred->Branch(eq);
+
+    // Check that the receiver is a JSArray.
+    __ CompareObjectType(r2, r3, r3, JS_ARRAY_TYPE);
+    deferred->Branch(ne);
+
+    // Check that the key is within bounds. Both the key and the length of
+    // the JSArray are smis. Use unsigned comparison to handle negative keys.
+    __ ldr(r3, FieldMemOperand(r2, JSArray::kLengthOffset));
+    __ cmp(r3, r1);
+    deferred->Branch(ls);  // Unsigned less equal.
+
+    // The following instructions are the part of the inlined store keyed
+    // property code which can be patched. Therefore the exact number of
+    // instructions generated need to be fixed, so the constant pool is blocked
+    // while generating this code.
+#ifdef DEBUG
+    int kInlinedKeyedStoreInstructions = 7;
+    Label check_inlined_codesize;
+    masm_->bind(&check_inlined_codesize);
+#endif
+    { Assembler::BlockConstPoolScope block_const_pool(masm_);
+      // Get the elements array from the receiver and check that it
+      // is not a dictionary.
+      __ ldr(r3, FieldMemOperand(r2, JSObject::kElementsOffset));
+      __ ldr(r4, FieldMemOperand(r3, JSObject::kMapOffset));
+      // Read the fixed array map from the constant pool (not from the root
+      // array) so that the value can be patched.  When debugging, we patch this
+      // comparison to always fail so that we will hit the IC call in the
+      // deferred code which will allow the debugger to break for fast case
+      // stores.
+      __ mov(r5, Operand(Factory::fixed_array_map()));
+      __ cmp(r4, r5);
+      deferred->Branch(ne);
+
+      // Store the value.
+      __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+      __ str(r0, MemOperand(r3, r1, LSL,
+                            kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
+
+      // Make sure that the expected number of instructions are generated.
+      ASSERT_EQ(kInlinedKeyedStoreInstructions,
+                masm_->InstructionsGeneratedSince(&check_inlined_codesize));
+    }
+
+    deferred->BindExit();
+  } else {
+    frame()->CallKeyedStoreIC();
+  }
 }
 
 
@@ -4370,34 +5720,22 @@
       Comment cmnt(masm, "[ Load from Slot");
       Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
       ASSERT(slot != NULL);
-      cgen_->LoadFromSlot(slot, NOT_INSIDE_TYPEOF);
+      cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
       break;
     }
 
     case NAMED: {
-      VirtualFrame* frame = cgen_->frame();
-      Comment cmnt(masm, "[ Load from named Property");
-      Handle<String> name(GetName());
       Variable* var = expression_->AsVariableProxy()->AsVariable();
-      Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-      // Setup the name register.
-      __ mov(r2, Operand(name));
-      ASSERT(var == NULL || var->is_global());
-      RelocInfo::Mode rmode = (var == NULL)
-                            ? RelocInfo::CODE_TARGET
-                            : RelocInfo::CODE_TARGET_CONTEXT;
-      frame->CallCodeObject(ic, rmode, 0);
-      frame->EmitPush(r0);
+      bool is_global = var != NULL;
+      ASSERT(!is_global || var->is_global());
+      cgen_->EmitNamedLoad(GetName(), is_global);
+      cgen_->frame()->EmitPush(r0);
       break;
     }
 
     case KEYED: {
-      // TODO(181): Implement inlined version of array indexing once
-      // loop nesting is properly tracked on ARM.
       ASSERT(property != NULL);
-      Variable* var = expression_->AsVariableProxy()->AsVariable();
-      ASSERT(var == NULL || var->is_global());
-      cgen_->EmitKeyedLoad(var != NULL);
+      cgen_->EmitKeyedLoad();
       cgen_->frame()->EmitPush(r0);
       break;
     }
@@ -4427,35 +5765,27 @@
       Comment cmnt(masm, "[ Store to Slot");
       Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
       cgen_->StoreToSlot(slot, init_state);
-      cgen_->UnloadReference(this);
+      set_unloaded();
       break;
     }
 
     case NAMED: {
       Comment cmnt(masm, "[ Store to named Property");
-      // Call the appropriate IC code.
-      Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-      Handle<String> name(GetName());
-
-      frame->EmitPop(r0);
-      frame->EmitPop(r1);
-      __ mov(r2, Operand(name));
-      frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
+      cgen_->EmitNamedStore(GetName(), false);
       frame->EmitPush(r0);
       set_unloaded();
       break;
     }
 
     case KEYED: {
+      VirtualFrame::SpilledScope scope(frame);
       Comment cmnt(masm, "[ Store to keyed Property");
       Property* property = expression_->AsProperty();
       ASSERT(property != NULL);
       cgen_->CodeForSourcePosition(property->position());
 
-      // Call IC code.
-      Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-      frame->EmitPop(r0);  // value
-      frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
+      frame->EmitPop(r0);  // Value.
+      cgen_->EmitKeyedStore(property->key()->type());
       frame->EmitPush(r0);
       cgen_->UnloadReference(this);
       break;
@@ -4468,11 +5798,11 @@
 
 
 void FastNewClosureStub::Generate(MacroAssembler* masm) {
-  // Clone the boilerplate in new space. Set the context to the
-  // current context in cp.
+  // Create a new closure from the given function info in new
+  // space. Set the context to the current context in cp.
   Label gc;
 
-  // Pop the boilerplate function from the stack.
+  // Pop the function info from the stack.
   __ pop(r3);
 
   // Attempt to allocate new JSFunction in new space.
@@ -4490,27 +5820,24 @@
   __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
   __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
 
-  // Clone the rest of the boilerplate fields. We don't have to update
-  // the write barrier because the allocated object is in new space.
-  for (int offset = kPointerSize;
-       offset < JSFunction::kSize;
-       offset += kPointerSize) {
-    if (offset == JSFunction::kContextOffset) {
-      __ str(cp, FieldMemOperand(r0, offset));
-    } else {
-      __ ldr(r1, FieldMemOperand(r3, offset));
-      __ str(r1, FieldMemOperand(r0, offset));
-    }
-  }
+  // Initialize the rest of the function. We don't have to update the
+  // write barrier because the allocated object is in new space.
+  __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
+  __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
+  __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+  __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
+  __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
+  __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
+  __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
+  __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
 
-  // Return result. The argument boilerplate has been popped already.
+  // Return result. The argument function info has been popped already.
   __ Ret();
 
   // Create a new closure through the slower runtime call.
   __ bind(&gc);
-  __ push(cp);
-  __ push(r3);
-  __ TailCallRuntime(ExternalReference(Runtime::kNewClosure), 2, 1);
+  __ Push(cp, r3);
+  __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
 }
 
 
@@ -4560,7 +5887,7 @@
 
   // Need to collect. Call into runtime system.
   __ bind(&gc);
-  __ TailCallRuntime(ExternalReference(Runtime::kNewContext), 1, 1);
+  __ TailCallRuntime(Runtime::kNewContext, 1, 1);
 }
 
 
@@ -4622,44 +5949,7 @@
   __ Ret();
 
   __ bind(&slow_case);
-  ExternalReference runtime(Runtime::kCreateArrayLiteralShallow);
-  __ TailCallRuntime(runtime, 3, 1);
-}
-
-
-// Count leading zeros in a 32 bit word.  On ARM5 and later it uses the clz
-// instruction.  On pre-ARM5 hardware this routine gives the wrong answer for 0
-// (31 instead of 32).
-static void CountLeadingZeros(
-    MacroAssembler* masm,
-    Register source,
-    Register scratch,
-    Register zeros) {
-#ifdef CAN_USE_ARMV5_INSTRUCTIONS
-  __ clz(zeros, source);  // This instruction is only supported after ARM5.
-#else
-  __ mov(zeros, Operand(0));
-  __ mov(scratch, source);
-  // Top 16.
-  __ tst(scratch, Operand(0xffff0000));
-  __ add(zeros, zeros, Operand(16), LeaveCC, eq);
-  __ mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
-  // Top 8.
-  __ tst(scratch, Operand(0xff000000));
-  __ add(zeros, zeros, Operand(8), LeaveCC, eq);
-  __ mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
-  // Top 4.
-  __ tst(scratch, Operand(0xf0000000));
-  __ add(zeros, zeros, Operand(4), LeaveCC, eq);
-  __ mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
-  // Top 2.
-  __ tst(scratch, Operand(0xc0000000));
-  __ add(zeros, zeros, Operand(2), LeaveCC, eq);
-  __ mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
-  // Top bit.
-  __ tst(scratch, Operand(0x80000000u));
-  __ add(zeros, zeros, Operand(1), LeaveCC, eq);
-#endif
+  __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
 }
 
 
@@ -4726,25 +6016,27 @@
   __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
   // Subtract from 0 if source was negative.
   __ rsb(source_, source_, Operand(0), LeaveCC, ne);
+
+  // We have -1, 0 or 1, which we treat specially. Register source_ contains
+  // absolute value: it is either equal to 1 (special case of -1 and 1),
+  // greater than 1 (not a special case) or less than 1 (special case of 0).
   __ cmp(source_, Operand(1));
   __ b(gt, &not_special);
 
-  // We have -1, 0 or 1, which we treat specially.
-  __ cmp(source_, Operand(0));
   // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
   static const uint32_t exponent_word_for_1 =
       HeapNumber::kExponentBias << HeapNumber::kExponentShift;
-  __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, ne);
+  __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
   // 1, 0 and -1 all have 0 for the second word.
   __ mov(mantissa, Operand(0));
   __ Ret();
 
   __ bind(&not_special);
-  // Count leading zeros.  Uses result2 for a scratch register on pre-ARM5.
+  // Count leading zeros.  Uses mantissa for a scratch register on pre-ARM5.
   // Gets the wrong answer for 0, but we already checked for that case above.
-  CountLeadingZeros(masm, source_, mantissa, zeros_);
+  __ CountLeadingZeros(source_, mantissa, zeros_);
   // Compute exponent and or it into the exponent register.
-  // We use result2 as a scratch register here.
+  // We use mantissa as a scratch register here.
   __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias));
   __ orr(exponent,
          exponent,
@@ -4763,45 +6055,6 @@
 }
 
 
-// This stub can convert a signed int32 to a heap number (double).  It does
-// not work for int32s that are in Smi range!  No GC occurs during this stub
-// so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public CodeStub {
- public:
-  WriteInt32ToHeapNumberStub(Register the_int,
-                             Register the_heap_number,
-                             Register scratch)
-      : the_int_(the_int),
-        the_heap_number_(the_heap_number),
-        scratch_(scratch) { }
-
- private:
-  Register the_int_;
-  Register the_heap_number_;
-  Register scratch_;
-
-  // Minor key encoding in 16 bits.
-  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
-  class OpBits: public BitField<Token::Value, 2, 14> {};
-
-  Major MajorKey() { return WriteInt32ToHeapNumber; }
-  int MinorKey() {
-    // Encode the parameters in a unique 16 bit value.
-    return  the_int_.code() +
-           (the_heap_number_.code() << 4) +
-           (scratch_.code() << 8);
-  }
-
-  void Generate(MacroAssembler* masm);
-
-  const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
-
-#ifdef DEBUG
-  void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
-#endif
-};
-
-
 // See comment for class.
 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
   Label max_negative_int;
@@ -4858,7 +6111,7 @@
   Label not_identical;
   Label heap_number, return_equal;
   Register exp_mask_reg = r5;
-  __ cmp(r0, Operand(r1));
+  __ cmp(r0, r1);
   __ b(ne, &not_identical);
 
   // The two objects are identical.  If we know that one of them isn't NaN then
@@ -4887,7 +6140,7 @@
           __ cmp(r4, Operand(ODDBALL_TYPE));
           __ b(ne, &return_equal);
           __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
-          __ cmp(r0, Operand(r2));
+          __ cmp(r0, r2);
           __ b(ne, &return_equal);
           if (cc == le) {
             // undefined <= undefined should fail.
@@ -4984,7 +6237,7 @@
     CpuFeatures::Scope scope(VFP3);
     __ mov(r7, Operand(r1, ASR, kSmiTagSize));
     __ vmov(s15, r7);
-    __ vcvt(d7, s15);
+    __ vcvt_f64_s32(d7, s15);
     // Load the double from rhs, tagged HeapNumber r0, to d6.
     __ sub(r7, r0, Operand(kHeapObjectTag));
     __ vldr(d6, r7, HeapNumber::kValueOffset);
@@ -5027,7 +6280,7 @@
     __ vldr(d7, r7, HeapNumber::kValueOffset);
     __ mov(r7, Operand(r0, ASR, kSmiTagSize));
     __ vmov(s13, r7);
-    __ vcvt(d6, s13);
+    __ vcvt_f64_s32(d6, s13);
   } else {
     __ push(lr);
     // Load lhs to a double in r2, r3.
@@ -5129,8 +6382,10 @@
   } else {
     // Call a native function to do a comparison between two non-NaNs.
     // Call C routine that may not cause GC or other trouble.
-    __ mov(r5, Operand(ExternalReference::compare_doubles()));
-    __ Jump(r5);  // Tail call.
+    __ push(lr);
+    __ PrepareCallCFunction(4, r5);  // Two doubles count as 4 arguments.
+    __ CallCFunction(ExternalReference::compare_doubles(), 4);
+    __ pop(pc);  // Return.
   }
 }
 
@@ -5223,6 +6478,123 @@
 }
 
 
+void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
+                                                         Register object,
+                                                         Register result,
+                                                         Register scratch1,
+                                                         Register scratch2,
+                                                         Register scratch3,
+                                                         bool object_is_smi,
+                                                         Label* not_found) {
+  // Use of registers. Register result is used as a temporary.
+  Register number_string_cache = result;
+  Register mask = scratch3;
+
+  // Load the number string cache.
+  __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+  // Make the hash mask from the length of the number string cache. It
+  // contains two elements (number and string) for each cache entry.
+  __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
+  // Divide length by two (length is not a smi).
+  __ mov(mask, Operand(mask, ASR, 1));
+  __ sub(mask, mask, Operand(1));  // Make mask.
+
+  // Calculate the entry in the number string cache. The hash value in the
+  // number string cache for smis is just the smi value, and the hash for
+  // doubles is the xor of the upper and lower words. See
+  // Heap::GetNumberStringCache.
+  Label is_smi;
+  Label load_result_from_cache;
+  if (!object_is_smi) {
+    __ BranchOnSmi(object, &is_smi);
+    if (CpuFeatures::IsSupported(VFP3)) {
+      CpuFeatures::Scope scope(VFP3);
+      __ CheckMap(object,
+                  scratch1,
+                  Factory::heap_number_map(),
+                  not_found,
+                  true);
+
+      ASSERT_EQ(8, kDoubleSize);
+      __ add(scratch1,
+             object,
+             Operand(HeapNumber::kValueOffset - kHeapObjectTag));
+      __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
+      __ eor(scratch1, scratch1, Operand(scratch2));
+      __ and_(scratch1, scratch1, Operand(mask));
+
+      // Calculate address of entry in string cache: each entry consists
+      // of two pointer sized fields.
+      __ add(scratch1,
+             number_string_cache,
+             Operand(scratch1, LSL, kPointerSizeLog2 + 1));
+
+      Register probe = mask;
+      __ ldr(probe,
+             FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+      __ BranchOnSmi(probe, not_found);
+      __ sub(scratch2, object, Operand(kHeapObjectTag));
+      __ vldr(d0, scratch2, HeapNumber::kValueOffset);
+      __ sub(probe, probe, Operand(kHeapObjectTag));
+      __ vldr(d1, probe, HeapNumber::kValueOffset);
+      __ vcmp(d0, d1);
+      __ vmrs(pc);
+      __ b(ne, not_found);  // The cache did not contain this value.
+      __ b(&load_result_from_cache);
+    } else {
+      __ b(not_found);
+    }
+  }
+
+  __ bind(&is_smi);
+  Register scratch = scratch1;
+  __ and_(scratch, mask, Operand(object, ASR, 1));
+  // Calculate address of entry in string cache: each entry consists
+  // of two pointer sized fields.
+  __ add(scratch,
+         number_string_cache,
+         Operand(scratch, LSL, kPointerSizeLog2 + 1));
+
+  // Check if the entry is the smi we are looking for.
+  Register probe = mask;
+  __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+  __ cmp(object, probe);
+  __ b(ne, not_found);
+
+  // Get the result from the cache.
+  __ bind(&load_result_from_cache);
+  __ ldr(result,
+         FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+  __ IncrementCounter(&Counters::number_to_string_native,
+                      1,
+                      scratch1,
+                      scratch2);
+}
+
+
+void NumberToStringStub::Generate(MacroAssembler* masm) {
+  Label runtime;
+
+  __ ldr(r1, MemOperand(sp, 0));
+
+  // Generate code to lookup number in the number string cache.
+  GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime);
+  __ add(sp, sp, Operand(1 * kPointerSize));
+  __ Ret();
+
+  __ bind(&runtime);
+  // Handle number to string in the runtime system if not found in the cache.
+  __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
+}
+
+
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+  __ RecordWriteHelper(object_, offset_, scratch_);
+  __ Ret();
+}
+
+
 // On entry r0 (rhs) and r1 (lhs) are the values to be compared.
 // On exit r0 is 0, positive or negative to indicate the result of
 // the comparison.
@@ -5338,8 +6710,7 @@
 
   __ bind(&slow);
 
-  __ push(r1);
-  __ push(r0);
+  __ Push(r1, r0);
   // Figure out which native to call and setup the arguments.
   Builtins::JavaScript native;
   if (cc_ == eq) {
@@ -5363,90 +6734,257 @@
 }
 
 
-// Allocates a heap number or jumps to the label if the young space is full and
-// a scavenge is needed.
-static void AllocateHeapNumber(
-    MacroAssembler* masm,
-    Label* need_gc,       // Jump here if young space is full.
-    Register result,  // The tagged address of the new heap number.
-    Register scratch1,  // A scratch register.
-    Register scratch2) {  // Another scratch register.
-  // Allocate an object in the heap for the heap number and tag it as a heap
-  // object.
-  __ AllocateInNewSpace(HeapNumber::kSize / kPointerSize,
-                        result,
-                        scratch1,
-                        scratch2,
-                        need_gc,
-                        TAG_OBJECT);
-
-  // Get heap number map and store it in the allocated object.
-  __ LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
-  __ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
-}
-
-
 // We fall into this code if the operands were Smis, but the result was
 // not (eg. overflow).  We branch into this code (to the not_smi label) if
 // the operands were not both Smi.  The operands are in r0 and r1.  In order
 // to call the C-implemented binary fp operation routines we need to end up
 // with the double precision floating point operands in r0 and r1 (for the
 // value in r1) and r2 and r3 (for the value in r0).
-static void HandleBinaryOpSlowCases(MacroAssembler* masm,
-                                    Label* not_smi,
-                                    const Builtins::JavaScript& builtin,
-                                    Token::Value operation,
-                                    OverwriteMode mode) {
-  Label slow, slow_pop_2_first, do_the_call;
-  Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
-  // Smi-smi case (overflow).
-  // Since both are Smis there is no heap number to overwrite, so allocate.
-  // The new heap number is in r5.  r6 and r7 are scratch.
-  AllocateHeapNumber(masm, &slow, r5, r6, r7);
+void GenericBinaryOpStub::HandleBinaryOpSlowCases(
+    MacroAssembler* masm,
+    Label* not_smi,
+    Register lhs,
+    Register rhs,
+    const Builtins::JavaScript& builtin) {
+  Label slow, slow_reverse, do_the_call;
+  bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_;
 
-  // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
-  // using registers d7 and d6 for the double values.
-  bool use_fp_registers = CpuFeatures::IsSupported(VFP3) &&
-      Token::MOD != operation;
-  if (use_fp_registers) {
-    CpuFeatures::Scope scope(VFP3);
-    __ mov(r7, Operand(r0, ASR, kSmiTagSize));
-    __ vmov(s15, r7);
-    __ vcvt(d7, s15);
-    __ mov(r7, Operand(r1, ASR, kSmiTagSize));
-    __ vmov(s13, r7);
-    __ vcvt(d6, s13);
-  } else {
-    // Write Smi from r0 to r3 and r2 in double format.  r6 is scratch.
-    __ mov(r7, Operand(r0));
-    ConvertToDoubleStub stub1(r3, r2, r7, r6);
-    __ push(lr);
-    __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
-    // Write Smi from r1 to r1 and r0 in double format.  r6 is scratch.
-    __ mov(r7, Operand(r1));
-    ConvertToDoubleStub stub2(r1, r0, r7, r6);
-    __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
-    __ pop(lr);
+  ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
+
+  if (ShouldGenerateSmiCode()) {
+    // Smi-smi case (overflow).
+    // Since both are Smis there is no heap number to overwrite, so allocate.
+    // The new heap number is in r5.  r6 and r7 are scratch.
+    __ AllocateHeapNumber(r5, r6, r7, lhs.is(r0) ? &slow_reverse : &slow);
+
+    // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
+    // using registers d7 and d6 for the double values.
+    if (use_fp_registers) {
+      CpuFeatures::Scope scope(VFP3);
+      __ mov(r7, Operand(rhs, ASR, kSmiTagSize));
+      __ vmov(s15, r7);
+      __ vcvt_f64_s32(d7, s15);
+      __ mov(r7, Operand(lhs, ASR, kSmiTagSize));
+      __ vmov(s13, r7);
+      __ vcvt_f64_s32(d6, s13);
+    } else {
+      // Write Smi from rhs to r3 and r2 in double format.  r6 is scratch.
+      __ mov(r7, Operand(rhs));
+      ConvertToDoubleStub stub1(r3, r2, r7, r6);
+      __ push(lr);
+      __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+      // Write Smi from lhs to r1 and r0 in double format.  r6 is scratch.
+      __ mov(r7, Operand(lhs));
+      ConvertToDoubleStub stub2(r1, r0, r7, r6);
+      __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+      __ pop(lr);
+    }
+    __ jmp(&do_the_call);  // Tail call.  No return.
   }
 
-  __ jmp(&do_the_call);  // Tail call.  No return.
+  // We branch here if at least one of r0 and r1 is not a Smi.
+  __ bind(not_smi);
+
+  // After this point we have the left hand side in r1 and the right hand side
+  // in r0.
+  if (lhs.is(r0)) {
+    __ Swap(r0, r1, ip);
+  }
+
+  if (ShouldGenerateFPCode()) {
+    Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
+
+    if (runtime_operands_type_ == BinaryOpIC::DEFAULT) {
+      switch (op_) {
+        case Token::ADD:
+        case Token::SUB:
+        case Token::MUL:
+        case Token::DIV:
+          GenerateTypeTransition(masm);
+          break;
+
+        default:
+          break;
+      }
+    }
+
+    if (mode_ == NO_OVERWRITE) {
+      // In the case where there is no chance of an overwritable float we may as
+      // well do the allocation immediately while r0 and r1 are untouched.
+      __ AllocateHeapNumber(r5, r6, r7, &slow);
+    }
+
+    // Move r0 to a double in r2-r3.
+    __ tst(r0, Operand(kSmiTagMask));
+    __ b(eq, &r0_is_smi);  // It's a Smi so don't check it's a heap number.
+    __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+    __ b(ne, &slow);
+    if (mode_ == OVERWRITE_RIGHT) {
+      __ mov(r5, Operand(r0));  // Overwrite this heap number.
+    }
+    if (use_fp_registers) {
+      CpuFeatures::Scope scope(VFP3);
+      // Load the double from tagged HeapNumber r0 to d7.
+      __ sub(r7, r0, Operand(kHeapObjectTag));
+      __ vldr(d7, r7, HeapNumber::kValueOffset);
+    } else {
+      // Calling convention says that second double is in r2 and r3.
+      __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
+      __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
+    }
+    __ jmp(&finished_loading_r0);
+    __ bind(&r0_is_smi);
+    if (mode_ == OVERWRITE_RIGHT) {
+      // We can't overwrite a Smi so get address of new heap number into r5.
+    __ AllocateHeapNumber(r5, r6, r7, &slow);
+    }
+
+    if (use_fp_registers) {
+      CpuFeatures::Scope scope(VFP3);
+      // Convert smi in r0 to double in d7.
+      __ mov(r7, Operand(r0, ASR, kSmiTagSize));
+      __ vmov(s15, r7);
+      __ vcvt_f64_s32(d7, s15);
+    } else {
+      // Write Smi from r0 to r3 and r2 in double format.
+      __ mov(r7, Operand(r0));
+      ConvertToDoubleStub stub3(r3, r2, r7, r6);
+      __ push(lr);
+      __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
+      __ pop(lr);
+    }
+
+    // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis.
+    // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC.
+    Label r1_is_not_smi;
+    if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) {
+      __ tst(r1, Operand(kSmiTagMask));
+      __ b(ne, &r1_is_not_smi);
+      GenerateTypeTransition(masm);
+      __ jmp(&r1_is_smi);
+    }
+
+    __ bind(&finished_loading_r0);
+
+    // Move r1 to a double in r0-r1.
+    __ tst(r1, Operand(kSmiTagMask));
+    __ b(eq, &r1_is_smi);  // It's a Smi so don't check it's a heap number.
+    __ bind(&r1_is_not_smi);
+    __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
+    __ b(ne, &slow);
+    if (mode_ == OVERWRITE_LEFT) {
+      __ mov(r5, Operand(r1));  // Overwrite this heap number.
+    }
+    if (use_fp_registers) {
+      CpuFeatures::Scope scope(VFP3);
+      // Load the double from tagged HeapNumber r1 to d6.
+      __ sub(r7, r1, Operand(kHeapObjectTag));
+      __ vldr(d6, r7, HeapNumber::kValueOffset);
+    } else {
+      // Calling convention says that first double is in r0 and r1.
+      __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
+      __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
+    }
+    __ jmp(&finished_loading_r1);
+    __ bind(&r1_is_smi);
+    if (mode_ == OVERWRITE_LEFT) {
+      // We can't overwrite a Smi so get address of new heap number into r5.
+    __ AllocateHeapNumber(r5, r6, r7, &slow);
+    }
+
+    if (use_fp_registers) {
+      CpuFeatures::Scope scope(VFP3);
+      // Convert smi in r1 to double in d6.
+      __ mov(r7, Operand(r1, ASR, kSmiTagSize));
+      __ vmov(s13, r7);
+      __ vcvt_f64_s32(d6, s13);
+    } else {
+      // Write Smi from r1 to r1 and r0 in double format.
+      __ mov(r7, Operand(r1));
+      ConvertToDoubleStub stub4(r1, r0, r7, r6);
+      __ push(lr);
+      __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
+      __ pop(lr);
+    }
+
+    __ bind(&finished_loading_r1);
+
+    __ bind(&do_the_call);
+    // If we are inlining the operation using VFP3 instructions for
+    // add, subtract, multiply, or divide, the arguments are in d6 and d7.
+    if (use_fp_registers) {
+      CpuFeatures::Scope scope(VFP3);
+      // ARMv7 VFP3 instructions to implement
+      // double precision, add, subtract, multiply, divide.
+
+      if (Token::MUL == op_) {
+        __ vmul(d5, d6, d7);
+      } else if (Token::DIV == op_) {
+        __ vdiv(d5, d6, d7);
+      } else if (Token::ADD == op_) {
+        __ vadd(d5, d6, d7);
+      } else if (Token::SUB == op_) {
+        __ vsub(d5, d6, d7);
+      } else {
+        UNREACHABLE();
+      }
+      __ sub(r0, r5, Operand(kHeapObjectTag));
+      __ vstr(d5, r0, HeapNumber::kValueOffset);
+      __ add(r0, r0, Operand(kHeapObjectTag));
+      __ mov(pc, lr);
+    } else {
+      // If we did not inline the operation, then the arguments are in:
+      // r0: Left value (least significant part of mantissa).
+      // r1: Left value (sign, exponent, top of mantissa).
+      // r2: Right value (least significant part of mantissa).
+      // r3: Right value (sign, exponent, top of mantissa).
+      // r5: Address of heap number for result.
+
+      __ push(lr);   // For later.
+      __ PrepareCallCFunction(4, r4);  // Two doubles count as 4 arguments.
+      // Call C routine that may not cause GC or other trouble. r5 is callee
+      // save.
+      __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
+      // Store answer in the overwritable heap number.
+  #if !defined(USE_ARM_EABI)
+      // Double returned in fp coprocessor register 0 and 1, encoded as register
+      // cr8.  Offsets must be divisible by 4 for coprocessor so we need to
+      // substract the tag from r5.
+      __ sub(r4, r5, Operand(kHeapObjectTag));
+      __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
+  #else
+      // Double returned in registers 0 and 1.
+      __ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
+      __ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4));
+  #endif
+      __ mov(r0, Operand(r5));
+      // And we are done.
+      __ pop(pc);
+    }
+  }
+
+
+  if (lhs.is(r0)) {
+    __ b(&slow);
+    __ bind(&slow_reverse);
+    __ Swap(r0, r1, ip);
+  }
 
   // We jump to here if something goes wrong (one param is not a number of any
   // sort or new-space allocation fails).
   __ bind(&slow);
 
   // Push arguments to the stack
-  __ push(r1);
-  __ push(r0);
+  __ Push(r1, r0);
 
-  if (Token::ADD == operation) {
+  if (Token::ADD == op_) {
     // Test for string arguments before calling runtime.
     // r1 : first argument
     // r0 : second argument
     // sp[0] : second argument
     // sp[4] : first argument
 
-    Label not_strings, not_string1, string1;
+    Label not_strings, not_string1, string1, string1_smi2;
     __ tst(r1, Operand(kSmiTagMask));
     __ b(eq, &not_string1);
     __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
@@ -5454,13 +6992,24 @@
 
     // First argument is a a string, test second.
     __ tst(r0, Operand(kSmiTagMask));
-    __ b(eq, &string1);
+    __ b(eq, &string1_smi2);
     __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
     __ b(ge, &string1);
 
     // First and second argument are strings.
-    StringAddStub stub(NO_STRING_CHECK_IN_STUB);
-    __ TailCallStub(&stub);
+    StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+    __ TailCallStub(&string_add_stub);
+
+    __ bind(&string1_smi2);
+    // First argument is a string, second is a smi. Try to lookup the number
+    // string for the smi in the number string cache.
+    NumberToStringStub::GenerateLookupNumberStringCache(
+        masm, r0, r2, r4, r5, r6, true, &string1);
+
+    // Replace second argument on stack and tailcall string add stub to make
+    // the result.
+    __ str(r2, MemOperand(sp, 0));
+    __ TailCallStub(&string_add_stub);
 
     // Only first argument is a string.
     __ bind(&string1);
@@ -5480,156 +7029,6 @@
   }
 
   __ InvokeBuiltin(builtin, JUMP_JS);  // Tail call.  No return.
-
-  // We branch here if at least one of r0 and r1 is not a Smi.
-  __ bind(not_smi);
-  if (mode == NO_OVERWRITE) {
-    // In the case where there is no chance of an overwritable float we may as
-    // well do the allocation immediately while r0 and r1 are untouched.
-    AllocateHeapNumber(masm, &slow, r5, r6, r7);
-  }
-
-  // Move r0 to a double in r2-r3.
-  __ tst(r0, Operand(kSmiTagMask));
-  __ b(eq, &r0_is_smi);  // It's a Smi so don't check it's a heap number.
-  __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
-  __ b(ne, &slow);
-  if (mode == OVERWRITE_RIGHT) {
-    __ mov(r5, Operand(r0));  // Overwrite this heap number.
-  }
-  if (use_fp_registers) {
-    CpuFeatures::Scope scope(VFP3);
-    // Load the double from tagged HeapNumber r0 to d7.
-    __ sub(r7, r0, Operand(kHeapObjectTag));
-    __ vldr(d7, r7, HeapNumber::kValueOffset);
-  } else {
-    // Calling convention says that second double is in r2 and r3.
-    __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
-    __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
-  }
-  __ jmp(&finished_loading_r0);
-  __ bind(&r0_is_smi);
-  if (mode == OVERWRITE_RIGHT) {
-    // We can't overwrite a Smi so get address of new heap number into r5.
-    AllocateHeapNumber(masm, &slow, r5, r6, r7);
-  }
-
-  if (use_fp_registers) {
-    CpuFeatures::Scope scope(VFP3);
-    // Convert smi in r0 to double in d7.
-    __ mov(r7, Operand(r0, ASR, kSmiTagSize));
-    __ vmov(s15, r7);
-    __ vcvt(d7, s15);
-  } else {
-    // Write Smi from r0 to r3 and r2 in double format.
-    __ mov(r7, Operand(r0));
-    ConvertToDoubleStub stub3(r3, r2, r7, r6);
-    __ push(lr);
-    __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
-    __ pop(lr);
-  }
-
-  __ bind(&finished_loading_r0);
-
-  // Move r1 to a double in r0-r1.
-  __ tst(r1, Operand(kSmiTagMask));
-  __ b(eq, &r1_is_smi);  // It's a Smi so don't check it's a heap number.
-  __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
-  __ b(ne, &slow);
-  if (mode == OVERWRITE_LEFT) {
-    __ mov(r5, Operand(r1));  // Overwrite this heap number.
-  }
-  if (use_fp_registers) {
-    CpuFeatures::Scope scope(VFP3);
-    // Load the double from tagged HeapNumber r1 to d6.
-    __ sub(r7, r1, Operand(kHeapObjectTag));
-    __ vldr(d6, r7, HeapNumber::kValueOffset);
-  } else {
-    // Calling convention says that first double is in r0 and r1.
-    __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
-    __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
-  }
-  __ jmp(&finished_loading_r1);
-  __ bind(&r1_is_smi);
-  if (mode == OVERWRITE_LEFT) {
-    // We can't overwrite a Smi so get address of new heap number into r5.
-    AllocateHeapNumber(masm, &slow, r5, r6, r7);
-  }
-
-  if (use_fp_registers) {
-    CpuFeatures::Scope scope(VFP3);
-    // Convert smi in r1 to double in d6.
-    __ mov(r7, Operand(r1, ASR, kSmiTagSize));
-    __ vmov(s13, r7);
-    __ vcvt(d6, s13);
-  } else {
-    // Write Smi from r1 to r1 and r0 in double format.
-    __ mov(r7, Operand(r1));
-    ConvertToDoubleStub stub4(r1, r0, r7, r6);
-    __ push(lr);
-    __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
-    __ pop(lr);
-  }
-
-  __ bind(&finished_loading_r1);
-
-  __ bind(&do_the_call);
-  // If we are inlining the operation using VFP3 instructions for
-  // add, subtract, multiply, or divide, the arguments are in d6 and d7.
-  if (use_fp_registers) {
-    CpuFeatures::Scope scope(VFP3);
-    // ARMv7 VFP3 instructions to implement
-    // double precision, add, subtract, multiply, divide.
-
-    if (Token::MUL == operation) {
-      __ vmul(d5, d6, d7);
-    } else if (Token::DIV == operation) {
-      __ vdiv(d5, d6, d7);
-    } else if (Token::ADD == operation) {
-      __ vadd(d5, d6, d7);
-    } else if (Token::SUB == operation) {
-      __ vsub(d5, d6, d7);
-    } else {
-      UNREACHABLE();
-    }
-    __ sub(r0, r5, Operand(kHeapObjectTag));
-    __ vstr(d5, r0, HeapNumber::kValueOffset);
-    __ add(r0, r0, Operand(kHeapObjectTag));
-    __ mov(pc, lr);
-    return;
-  }
-
-  // If we did not inline the operation, then the arguments are in:
-  // r0: Left value (least significant part of mantissa).
-  // r1: Left value (sign, exponent, top of mantissa).
-  // r2: Right value (least significant part of mantissa).
-  // r3: Right value (sign, exponent, top of mantissa).
-  // r5: Address of heap number for result.
-
-  __ push(lr);   // For later.
-  __ push(r5);   // Address of heap number that is answer.
-  __ AlignStack(0);
-  // Call C routine that may not cause GC or other trouble.
-  __ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
-  __ Call(r5);
-  __ pop(r4);  // Address of heap number.
-  __ cmp(r4, Operand(Smi::FromInt(0)));
-  __ pop(r4, eq);  // Conditional pop instruction to get rid of alignment push.
-  // Store answer in the overwritable heap number.
-#if !defined(USE_ARM_EABI)
-  // Double returned in fp coprocessor register 0 and 1, encoded as register
-  // cr8.  Offsets must be divisible by 4 for coprocessor so we need to
-  // substract the tag from r4.
-  __ sub(r5, r4, Operand(kHeapObjectTag));
-  __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset));
-#else
-  // Double returned in registers 0 and 1.
-  __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset));
-  __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + 4));
-#endif
-  __ mov(r0, Operand(r4));
-  // And we are done.
-  __ pop(pc);
 }
 
 
@@ -5688,7 +7087,7 @@
     // conversion using round to zero.
     __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
     __ vmov(d7, scratch2, scratch);
-    __ vcvt(s15, d7);
+    __ vcvt_s32_f64(s15, d7);
     __ vmov(dest, s15);
   } else {
     // Get the top bits of the mantissa.
@@ -5723,31 +7122,35 @@
 // by the ES spec.  If this is the case we do the bitwise op and see if the
 // result is a Smi.  If so, great, otherwise we try to find a heap number to
 // write the answer into (either by allocating or by overwriting).
-// On entry the operands are in r0 and r1.  On exit the answer is in r0.
-void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
+// On entry the operands are in lhs and rhs.  On exit the answer is in r0.
+void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
+                                                Register lhs,
+                                                Register rhs) {
   Label slow, result_not_a_smi;
-  Label r0_is_smi, r1_is_smi;
-  Label done_checking_r0, done_checking_r1;
+  Label rhs_is_smi, lhs_is_smi;
+  Label done_checking_rhs, done_checking_lhs;
 
-  __ tst(r1, Operand(kSmiTagMask));
-  __ b(eq, &r1_is_smi);  // It's a Smi so don't check it's a heap number.
-  __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
+  __ tst(lhs, Operand(kSmiTagMask));
+  __ b(eq, &lhs_is_smi);  // It's a Smi so don't check it's a heap number.
+  __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
   __ b(ne, &slow);
-  GetInt32(masm, r1, r3, r5, r4, &slow);
-  __ jmp(&done_checking_r1);
-  __ bind(&r1_is_smi);
-  __ mov(r3, Operand(r1, ASR, 1));
-  __ bind(&done_checking_r1);
+  GetInt32(masm, lhs, r3, r5, r4, &slow);
+  __ jmp(&done_checking_lhs);
+  __ bind(&lhs_is_smi);
+  __ mov(r3, Operand(lhs, ASR, 1));
+  __ bind(&done_checking_lhs);
 
-  __ tst(r0, Operand(kSmiTagMask));
-  __ b(eq, &r0_is_smi);  // It's a Smi so don't check it's a heap number.
-  __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+  __ tst(rhs, Operand(kSmiTagMask));
+  __ b(eq, &rhs_is_smi);  // It's a Smi so don't check it's a heap number.
+  __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
   __ b(ne, &slow);
-  GetInt32(masm, r0, r2, r5, r4, &slow);
-  __ jmp(&done_checking_r0);
-  __ bind(&r0_is_smi);
-  __ mov(r2, Operand(r0, ASR, 1));
-  __ bind(&done_checking_r0);
+  GetInt32(masm, rhs, r2, r5, r4, &slow);
+  __ jmp(&done_checking_rhs);
+  __ bind(&rhs_is_smi);
+  __ mov(r2, Operand(rhs, ASR, 1));
+  __ bind(&done_checking_rhs);
+
+  ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))));
 
   // r0 and r1: Original operands (Smi or heap numbers).
   // r2 and r3: Signed int32 operands.
@@ -5787,20 +7190,20 @@
   __ bind(&result_not_a_smi);
   switch (mode_) {
     case OVERWRITE_RIGHT: {
-      __ tst(r0, Operand(kSmiTagMask));
+      __ tst(rhs, Operand(kSmiTagMask));
       __ b(eq, &have_to_allocate);
-      __ mov(r5, Operand(r0));
+      __ mov(r5, Operand(rhs));
       break;
     }
     case OVERWRITE_LEFT: {
-      __ tst(r1, Operand(kSmiTagMask));
+      __ tst(lhs, Operand(kSmiTagMask));
       __ b(eq, &have_to_allocate);
-      __ mov(r5, Operand(r1));
+      __ mov(r5, Operand(lhs));
       break;
     }
     case NO_OVERWRITE: {
       // Get a new heap number in r5.  r6 and r7 are scratch.
-      AllocateHeapNumber(masm, &slow, r5, r6, r7);
+      __ AllocateHeapNumber(r5, r6, r7, &slow);
     }
     default: break;
   }
@@ -5820,14 +7223,13 @@
   if (mode_ != NO_OVERWRITE) {
     __ bind(&have_to_allocate);
     // Get a new heap number in r5.  r6 and r7 are scratch.
-    AllocateHeapNumber(masm, &slow, r5, r6, r7);
+    __ AllocateHeapNumber(r5, r6, r7, &slow);
     __ jmp(&got_a_heap_number);
   }
 
   // If all else failed then we go to the runtime system.
   __ bind(&slow);
-  __ push(r1);  // restore stack
-  __ push(r0);
+  __ Push(lhs, rhs);  // Restore stack.
   switch (op_) {
     case Token::BIT_OR:
       __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
@@ -5957,115 +7359,134 @@
 
 
 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
-  // r1 : x
-  // r0 : y
-  // result : r0
+  // lhs_ : x
+  // rhs_ : y
+  // r0   : result
 
-  // All ops need to know whether we are dealing with two Smis.  Set up r2 to
-  // tell us that.
-  __ orr(r2, r1, Operand(r0));  // r2 = x | y;
+  Register result = r0;
+  Register lhs = lhs_;
+  Register rhs = rhs_;
+
+  // This code can't cope with other register allocations yet.
+  ASSERT(result.is(r0) &&
+         ((lhs.is(r0) && rhs.is(r1)) ||
+          (lhs.is(r1) && rhs.is(r0))));
+
+  Register smi_test_reg = VirtualFrame::scratch0();
+  Register scratch = VirtualFrame::scratch1();
+
+  // All ops need to know whether we are dealing with two Smis.  Set up
+  // smi_test_reg to tell us that.
+  if (ShouldGenerateSmiCode()) {
+    __ orr(smi_test_reg, lhs, Operand(rhs));
+  }
 
   switch (op_) {
     case Token::ADD: {
       Label not_smi;
       // Fast path.
-      ASSERT(kSmiTag == 0);  // Adjust code below.
-      __ tst(r2, Operand(kSmiTagMask));
-      __ b(ne, &not_smi);
-      __ add(r0, r1, Operand(r0), SetCC);  // Add y optimistically.
-      // Return if no overflow.
-      __ Ret(vc);
-      __ sub(r0, r0, Operand(r1));  // Revert optimistic add.
-
-      HandleBinaryOpSlowCases(masm,
-                              &not_smi,
-                              Builtins::ADD,
-                              Token::ADD,
-                              mode_);
+      if (ShouldGenerateSmiCode()) {
+        ASSERT(kSmiTag == 0);  // Adjust code below.
+        __ tst(smi_test_reg, Operand(kSmiTagMask));
+        __ b(ne, &not_smi);
+        __ add(r0, r1, Operand(r0), SetCC);  // Add y optimistically.
+        // Return if no overflow.
+        __ Ret(vc);
+        __ sub(r0, r0, Operand(r1));  // Revert optimistic add.
+      }
+      HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::ADD);
       break;
     }
 
     case Token::SUB: {
       Label not_smi;
       // Fast path.
-      ASSERT(kSmiTag == 0);  // Adjust code below.
-      __ tst(r2, Operand(kSmiTagMask));
-      __ b(ne, &not_smi);
-      __ sub(r0, r1, Operand(r0), SetCC);  // Subtract y optimistically.
-      // Return if no overflow.
-      __ Ret(vc);
-      __ sub(r0, r1, Operand(r0));  // Revert optimistic subtract.
-
-      HandleBinaryOpSlowCases(masm,
-                              &not_smi,
-                              Builtins::SUB,
-                              Token::SUB,
-                              mode_);
+      if (ShouldGenerateSmiCode()) {
+        ASSERT(kSmiTag == 0);  // Adjust code below.
+        __ tst(smi_test_reg, Operand(kSmiTagMask));
+        __ b(ne, &not_smi);
+        if (lhs.is(r1)) {
+          __ sub(r0, r1, Operand(r0), SetCC);  // Subtract y optimistically.
+          // Return if no overflow.
+          __ Ret(vc);
+          __ sub(r0, r1, Operand(r0));  // Revert optimistic subtract.
+        } else {
+          __ sub(r0, r0, Operand(r1), SetCC);  // Subtract y optimistically.
+          // Return if no overflow.
+          __ Ret(vc);
+          __ add(r0, r0, Operand(r1));  // Revert optimistic subtract.
+        }
+      }
+      HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::SUB);
       break;
     }
 
     case Token::MUL: {
       Label not_smi, slow;
-      ASSERT(kSmiTag == 0);  // adjust code below
-      __ tst(r2, Operand(kSmiTagMask));
-      __ b(ne, &not_smi);
-      // Remove tag from one operand (but keep sign), so that result is Smi.
-      __ mov(ip, Operand(r0, ASR, kSmiTagSize));
-      // Do multiplication
-      __ smull(r3, r2, r1, ip);  // r3 = lower 32 bits of ip*r1.
-      // Go slow on overflows (overflow bit is not set).
-      __ mov(ip, Operand(r3, ASR, 31));
-      __ cmp(ip, Operand(r2));  // no overflow if higher 33 bits are identical
-      __ b(ne, &slow);
-      // Go slow on zero result to handle -0.
-      __ tst(r3, Operand(r3));
-      __ mov(r0, Operand(r3), LeaveCC, ne);
-      __ Ret(ne);
-      // We need -0 if we were multiplying a negative number with 0 to get 0.
-      // We know one of them was zero.
-      __ add(r2, r0, Operand(r1), SetCC);
-      __ mov(r0, Operand(Smi::FromInt(0)), LeaveCC, pl);
-      __ Ret(pl);  // Return Smi 0 if the non-zero one was positive.
-      // Slow case.  We fall through here if we multiplied a negative number
-      // with 0, because that would mean we should produce -0.
-      __ bind(&slow);
-
-      HandleBinaryOpSlowCases(masm,
-                              &not_smi,
-                              Builtins::MUL,
-                              Token::MUL,
-                              mode_);
+      if (ShouldGenerateSmiCode()) {
+        ASSERT(kSmiTag == 0);  // adjust code below
+        __ tst(smi_test_reg, Operand(kSmiTagMask));
+        Register scratch2 = smi_test_reg;
+        smi_test_reg = no_reg;
+        __ b(ne, &not_smi);
+        // Remove tag from one operand (but keep sign), so that result is Smi.
+        __ mov(ip, Operand(rhs, ASR, kSmiTagSize));
+        // Do multiplication
+        // scratch = lower 32 bits of ip * lhs.
+        __ smull(scratch, scratch2, lhs, ip);
+        // Go slow on overflows (overflow bit is not set).
+        __ mov(ip, Operand(scratch, ASR, 31));
+        // No overflow if higher 33 bits are identical.
+        __ cmp(ip, Operand(scratch2));
+        __ b(ne, &slow);
+        // Go slow on zero result to handle -0.
+        __ tst(scratch, Operand(scratch));
+        __ mov(result, Operand(scratch), LeaveCC, ne);
+        __ Ret(ne);
+        // We need -0 if we were multiplying a negative number with 0 to get 0.
+        // We know one of them was zero.
+        __ add(scratch2, rhs, Operand(lhs), SetCC);
+        __ mov(result, Operand(Smi::FromInt(0)), LeaveCC, pl);
+        __ Ret(pl);  // Return Smi 0 if the non-zero one was positive.
+        // Slow case.  We fall through here if we multiplied a negative number
+        // with 0, because that would mean we should produce -0.
+        __ bind(&slow);
+      }
+      HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::MUL);
       break;
     }
 
     case Token::DIV:
     case Token::MOD: {
       Label not_smi;
-      if (specialized_on_rhs_) {
+      if (ShouldGenerateSmiCode() && specialized_on_rhs_) {
         Label smi_is_unsuitable;
-        __ BranchOnNotSmi(r1, &not_smi);
+        __ BranchOnNotSmi(lhs, &not_smi);
         if (IsPowerOf2(constant_rhs_)) {
           if (op_ == Token::MOD) {
-            __ and_(r0,
-                    r1,
+            __ and_(rhs,
+                    lhs,
                     Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
                     SetCC);
             // We now have the answer, but if the input was negative we also
             // have the sign bit.  Our work is done if the result is
             // positive or zero:
+            if (!rhs.is(r0)) {
+              __ mov(r0, rhs, LeaveCC, pl);
+            }
             __ Ret(pl);
             // A mod of a negative left hand side must return a negative number.
             // Unfortunately if the answer is 0 then we must return -0.  And we
-            // already optimistically trashed r0 so we may need to restore it.
-            __ eor(r0, r0, Operand(0x80000000u), SetCC);
+            // already optimistically trashed rhs so we may need to restore it.
+            __ eor(rhs, rhs, Operand(0x80000000u), SetCC);
             // Next two instructions are conditional on the answer being -0.
-            __ mov(r0, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
+            __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
             __ b(eq, &smi_is_unsuitable);
             // We need to subtract the dividend.  Eg. -3 % 4 == -3.
-            __ sub(r0, r0, Operand(Smi::FromInt(constant_rhs_)));
+            __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_)));
           } else {
             ASSERT(op_ == Token::DIV);
-            __ tst(r1,
+            __ tst(lhs,
                    Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)));
             __ b(ne, &smi_is_unsuitable);  // Go slow on negative or remainder.
             int shift = 0;
@@ -6074,12 +7495,12 @@
               d >>= 1;
               shift++;
             }
-            __ mov(r0, Operand(r1, LSR, shift));
+            __ mov(r0, Operand(lhs, LSR, shift));
             __ bic(r0, r0, Operand(kSmiTagMask));
           }
         } else {
           // Not a power of 2.
-          __ tst(r1, Operand(0x80000000u));
+          __ tst(lhs, Operand(0x80000000u));
           __ b(ne, &smi_is_unsuitable);
           // Find a fixed point reciprocal of the divisor so we can divide by
           // multiplying.
@@ -6095,40 +7516,42 @@
             shift++;
           }
           mul++;
-          __ mov(r2, Operand(mul));
-          __ umull(r3, r2, r2, r1);
-          __ mov(r2, Operand(r2, LSR, shift - 31));
-          // r2 is r1 / rhs.  r2 is not Smi tagged.
-          // r0 is still the known rhs.  r0 is Smi tagged.
-          // r1 is still the unkown lhs.  r1 is Smi tagged.
-          int required_r4_shift = 0;  // Including the Smi tag shift of 1.
-          // r4 = r2 * r0.
+          Register scratch2 = smi_test_reg;
+          smi_test_reg = no_reg;
+          __ mov(scratch2, Operand(mul));
+          __ umull(scratch, scratch2, scratch2, lhs);
+          __ mov(scratch2, Operand(scratch2, LSR, shift - 31));
+          // scratch2 is lhs / rhs.  scratch2 is not Smi tagged.
+          // rhs is still the known rhs.  rhs is Smi tagged.
+          // lhs is still the unkown lhs.  lhs is Smi tagged.
+          int required_scratch_shift = 0;  // Including the Smi tag shift of 1.
+          // scratch = scratch2 * rhs.
           MultiplyByKnownInt2(masm,
-                              r4,
-                              r2,
-                              r0,
+                              scratch,
+                              scratch2,
+                              rhs,
                               constant_rhs_,
-                              &required_r4_shift);
-          // r4 << required_r4_shift is now the Smi tagged rhs * (r1 / rhs).
+                              &required_scratch_shift);
+          // scratch << required_scratch_shift is now the Smi tagged rhs *
+          // (lhs / rhs) where / indicates integer division.
           if (op_ == Token::DIV) {
-            __ sub(r3, r1, Operand(r4, LSL, required_r4_shift), SetCC);
+            __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift));
             __ b(ne, &smi_is_unsuitable);  // There was a remainder.
-            __ mov(r0, Operand(r2, LSL, kSmiTagSize));
+            __ mov(result, Operand(scratch2, LSL, kSmiTagSize));
           } else {
             ASSERT(op_ == Token::MOD);
-            __ sub(r0, r1, Operand(r4, LSL, required_r4_shift));
+            __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift));
           }
         }
         __ Ret();
         __ bind(&smi_is_unsuitable);
-      } else {
-        __ jmp(&not_smi);
       }
-      HandleBinaryOpSlowCases(masm,
-                              &not_smi,
-                              op_ == Token::MOD ? Builtins::MOD : Builtins::DIV,
-                              op_,
-                              mode_);
+      HandleBinaryOpSlowCases(
+          masm,
+          &not_smi,
+          lhs,
+          rhs,
+          op_ == Token::MOD ? Builtins::MOD : Builtins::DIV);
       break;
     }
 
@@ -6140,47 +7563,49 @@
     case Token::SHL: {
       Label slow;
       ASSERT(kSmiTag == 0);  // adjust code below
-      __ tst(r2, Operand(kSmiTagMask));
+      __ tst(smi_test_reg, Operand(kSmiTagMask));
       __ b(ne, &slow);
+      Register scratch2 = smi_test_reg;
+      smi_test_reg = no_reg;
       switch (op_) {
-        case Token::BIT_OR:  __ orr(r0, r0, Operand(r1)); break;
-        case Token::BIT_AND: __ and_(r0, r0, Operand(r1)); break;
-        case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break;
+        case Token::BIT_OR:  __ orr(result, rhs, Operand(lhs)); break;
+        case Token::BIT_AND: __ and_(result, rhs, Operand(lhs)); break;
+        case Token::BIT_XOR: __ eor(result, rhs, Operand(lhs)); break;
         case Token::SAR:
           // Remove tags from right operand.
-          __ GetLeastBitsFromSmi(r2, r0, 5);
-          __ mov(r0, Operand(r1, ASR, r2));
+          __ GetLeastBitsFromSmi(scratch2, rhs, 5);
+          __ mov(result, Operand(lhs, ASR, scratch2));
           // Smi tag result.
-          __ bic(r0, r0, Operand(kSmiTagMask));
+          __ bic(result, result, Operand(kSmiTagMask));
           break;
         case Token::SHR:
           // Remove tags from operands.  We can't do this on a 31 bit number
           // because then the 0s get shifted into bit 30 instead of bit 31.
-          __ mov(r3, Operand(r1, ASR, kSmiTagSize));  // x
-          __ GetLeastBitsFromSmi(r2, r0, 5);
-          __ mov(r3, Operand(r3, LSR, r2));
+          __ mov(scratch, Operand(lhs, ASR, kSmiTagSize));  // x
+          __ GetLeastBitsFromSmi(scratch2, rhs, 5);
+          __ mov(scratch, Operand(scratch, LSR, scratch2));
           // Unsigned shift is not allowed to produce a negative number, so
           // check the sign bit and the sign bit after Smi tagging.
-          __ tst(r3, Operand(0xc0000000));
+          __ tst(scratch, Operand(0xc0000000));
           __ b(ne, &slow);
           // Smi tag result.
-          __ mov(r0, Operand(r3, LSL, kSmiTagSize));
+          __ mov(result, Operand(scratch, LSL, kSmiTagSize));
           break;
         case Token::SHL:
           // Remove tags from operands.
-          __ mov(r3, Operand(r1, ASR, kSmiTagSize));  // x
-          __ GetLeastBitsFromSmi(r2, r0, 5);
-          __ mov(r3, Operand(r3, LSL, r2));
+          __ mov(scratch, Operand(lhs, ASR, kSmiTagSize));  // x
+          __ GetLeastBitsFromSmi(scratch2, rhs, 5);
+          __ mov(scratch, Operand(scratch, LSL, scratch2));
           // Check that the signed result fits in a Smi.
-          __ add(r2, r3, Operand(0x40000000), SetCC);
+          __ add(scratch2, scratch, Operand(0x40000000), SetCC);
           __ b(mi, &slow);
-          __ mov(r0, Operand(r3, LSL, kSmiTagSize));
+          __ mov(result, Operand(scratch, LSL, kSmiTagSize));
           break;
         default: UNREACHABLE();
       }
       __ Ret();
       __ bind(&slow);
-      HandleNonSmiBitwiseOp(masm);
+      HandleNonSmiBitwiseOp(masm, lhs, rhs);
       break;
     }
 
@@ -6188,6 +7613,51 @@
   }
   // This code should be unreachable.
   __ stop("Unreachable");
+
+  // Generate an unreachable reference to the DEFAULT stub so that it can be
+  // found at the end of this stub when clearing ICs at GC.
+  // TODO(kaznacheev): Check performance impact and get rid of this.
+  if (runtime_operands_type_ != BinaryOpIC::DEFAULT) {
+    GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT);
+    __ CallStub(&uninit);
+  }
+}
+
+
+void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+  Label get_result;
+
+  __ Push(r1, r0);
+
+  // Internal frame is necessary to handle exceptions properly.
+  __ EnterInternalFrame();
+  // Call the stub proper to get the result in r0.
+  __ Call(&get_result);
+  __ LeaveInternalFrame();
+
+  __ push(r0);
+
+  __ mov(r0, Operand(Smi::FromInt(MinorKey())));
+  __ push(r0);
+  __ mov(r0, Operand(Smi::FromInt(op_)));
+  __ push(r0);
+  __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_)));
+  __ push(r0);
+
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
+      6,
+      1);
+
+  // The entry point for the result calculation is assumed to be immediately
+  // after this sequence.
+  __ bind(&get_result);
+}
+
+
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
+  GenericBinaryOpStub stub(key, type_info);
+  return stub.GetCode();
 }
 
 
@@ -6196,7 +7666,7 @@
   // argument, so give it a Smi.
   __ mov(r0, Operand(Smi::FromInt(0)));
   __ push(r0);
-  __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1, 1);
+  __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
 
   __ StubReturn(1);
 }
@@ -6233,7 +7703,7 @@
       __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign.
       __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
     } else {
-      AllocateHeapNumber(masm, &slow, r1, r2, r3);
+      __ AllocateHeapNumber(r1, r2, r3, &slow);
       __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
       __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
       __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
@@ -6263,7 +7733,7 @@
       // Allocate a fresh heap number, but don't overwrite r0 until
       // we're sure we can do it without going through the slow case
       // that needs the value in r0.
-      AllocateHeapNumber(masm, &slow, r2, r3, r4);
+      __ AllocateHeapNumber(r2, r3, r4, &slow);
       __ mov(r0, Operand(r2));
     }
 
@@ -6404,7 +7874,8 @@
                               Label* throw_termination_exception,
                               Label* throw_out_of_memory_exception,
                               bool do_gc,
-                              bool always_allocate) {
+                              bool always_allocate,
+                              int frame_alignment_skew) {
   // r0: result parameter for PerformGC, if any
   // r4: number of arguments including receiver  (C callee-saved)
   // r5: pointer to builtin function  (C callee-saved)
@@ -6412,8 +7883,8 @@
 
   if (do_gc) {
     // Passing r0.
-    ExternalReference gc_reference = ExternalReference::perform_gc_function();
-    __ Call(gc_reference.address(), RelocInfo::RUNTIME_ENTRY);
+    __ PrepareCallCFunction(1, r1);
+    __ CallCFunction(ExternalReference::perform_gc_function(), 1);
   }
 
   ExternalReference scope_depth =
@@ -6430,6 +7901,37 @@
   __ mov(r0, Operand(r4));
   __ mov(r1, Operand(r6));
 
+  int frame_alignment = MacroAssembler::ActivationFrameAlignment();
+  int frame_alignment_mask = frame_alignment - 1;
+#if defined(V8_HOST_ARCH_ARM)
+  if (FLAG_debug_code) {
+    if (frame_alignment > kPointerSize) {
+      Label alignment_as_expected;
+      ASSERT(IsPowerOf2(frame_alignment));
+      __ sub(r2, sp, Operand(frame_alignment_skew));
+      __ tst(r2, Operand(frame_alignment_mask));
+      __ b(eq, &alignment_as_expected);
+      // Don't use Check here, as it will call Runtime_Abort re-entering here.
+      __ stop("Unexpected alignment");
+      __ bind(&alignment_as_expected);
+    }
+  }
+#endif
+
+  // Just before the call (jump) below lr is pushed, so the actual alignment is
+  // adding one to the current skew.
+  int alignment_before_call =
+      (frame_alignment_skew + kPointerSize) & frame_alignment_mask;
+  if (alignment_before_call > 0) {
+    // Push until the alignment before the call is met.
+    __ mov(r2, Operand(0));
+    for (int i = alignment_before_call;
+        (i & frame_alignment_mask) != 0;
+        i += kPointerSize) {
+      __ push(r2);
+    }
+  }
+
   // TODO(1242173): To let the GC traverse the return address of the exit
   // frames, we need to know where the return address is. Right now,
   // we push it on the stack to be able to find it again, but we never
@@ -6437,10 +7939,15 @@
   // support moving the C entry code stub. This should be fixed, but currently
   // this is OK because the CEntryStub gets generated so early in the V8 boot
   // sequence that it is not moving ever.
-  masm->add(lr, pc, Operand(4));  // compute return address: (pc + 8) + 4
+  masm->add(lr, pc, Operand(4));  // Compute return address: (pc + 8) + 4
   masm->push(lr);
   masm->Jump(r5);
 
+  // Restore sp back to before aligning the stack.
+  if (alignment_before_call > 0) {
+    __ add(sp, sp, Operand(alignment_before_call));
+  }
+
   if (always_allocate) {
     // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
     // though (contain the result).
@@ -6527,7 +8034,8 @@
                &throw_termination_exception,
                &throw_out_of_memory_exception,
                false,
-               false);
+               false,
+               -kPointerSize);
 
   // Do space-specific GC and retry runtime call.
   GenerateCore(masm,
@@ -6535,7 +8043,8 @@
                &throw_termination_exception,
                &throw_out_of_memory_exception,
                true,
-               false);
+               false,
+               0);
 
   // Do full GC and retry runtime call one final time.
   Failure* failure = Failure::InternalError();
@@ -6545,7 +8054,8 @@
                &throw_termination_exception,
                &throw_out_of_memory_exception,
                true,
-               true);
+               true,
+               kPointerSize);
 
   __ bind(&throw_out_of_memory_exception);
   GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
@@ -6591,7 +8101,7 @@
   __ mov(r6, Operand(Smi::FromInt(marker)));
   __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
   __ ldr(r5, MemOperand(r5));
-  __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | r8.bit());
+  __ Push(r8, r7, r6, r5);
 
   // Setup frame pointer for the frame to be pushed.
   __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
@@ -6738,26 +8248,6 @@
 }
 
 
-void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
-  // Check if the calling frame is an arguments adaptor frame.
-  Label adaptor;
-  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
-  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ b(eq, &adaptor);
-
-  // Nothing to do: The formal number of parameters has already been
-  // passed in register r0 by calling function. Just return it.
-  __ Jump(lr);
-
-  // Arguments adaptor case: Read the arguments length from the
-  // adaptor frame and return it.
-  __ bind(&adaptor);
-  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ Jump(lr);
-}
-
-
 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
   // The displacement is the offset of the last parameter (if any)
   // relative to the frame pointer.
@@ -6805,7 +8295,7 @@
   // by calling the runtime system.
   __ bind(&slow);
   __ push(r1);
-  __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1, 1);
+  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
 }
 
 
@@ -6908,7 +8398,354 @@
 
   // Do the runtime call to allocate the arguments object.
   __ bind(&runtime);
-  __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
+  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+  // Just jump directly to runtime if native RegExp is not selected at compile
+  // time or if regexp entry in generated code is turned off runtime switch or
+  // at compilation.
+#ifndef V8_NATIVE_REGEXP
+  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#else  // V8_NATIVE_REGEXP
+  if (!FLAG_regexp_entry_native) {
+    __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+    return;
+  }
+
+  // Stack frame on entry.
+  //  sp[0]: last_match_info (expected JSArray)
+  //  sp[4]: previous index
+  //  sp[8]: subject string
+  //  sp[12]: JSRegExp object
+
+  static const int kLastMatchInfoOffset = 0 * kPointerSize;
+  static const int kPreviousIndexOffset = 1 * kPointerSize;
+  static const int kSubjectOffset = 2 * kPointerSize;
+  static const int kJSRegExpOffset = 3 * kPointerSize;
+
+  Label runtime, invoke_regexp;
+
+  // Allocation of registers for this function. These are in callee save
+  // registers and will be preserved by the call to the native RegExp code, as
+  // this code is called using the normal C calling convention. When calling
+  // directly from generated code the native RegExp code will not do a GC and
+  // therefore the content of these registers are safe to use after the call.
+  Register subject = r4;
+  Register regexp_data = r5;
+  Register last_match_info_elements = r6;
+
+  // Ensure that a RegExp stack is allocated.
+  ExternalReference address_of_regexp_stack_memory_address =
+      ExternalReference::address_of_regexp_stack_memory_address();
+  ExternalReference address_of_regexp_stack_memory_size =
+      ExternalReference::address_of_regexp_stack_memory_size();
+  __ mov(r0, Operand(address_of_regexp_stack_memory_size));
+  __ ldr(r0, MemOperand(r0, 0));
+  __ tst(r0, Operand(r0));
+  __ b(eq, &runtime);
+
+  // Check that the first argument is a JSRegExp object.
+  __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
+  ASSERT_EQ(0, kSmiTag);
+  __ tst(r0, Operand(kSmiTagMask));
+  __ b(eq, &runtime);
+  __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
+  __ b(ne, &runtime);
+
+  // Check that the RegExp has been compiled (data contains a fixed array).
+  __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
+  if (FLAG_debug_code) {
+    __ tst(regexp_data, Operand(kSmiTagMask));
+    __ Check(nz, "Unexpected type for RegExp data, FixedArray expected");
+    __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
+    __ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
+  }
+
+  // regexp_data: RegExp data (FixedArray)
+  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+  __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
+  __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
+  __ b(ne, &runtime);
+
+  // regexp_data: RegExp data (FixedArray)
+  // Check that the number of captures fit in the static offsets vector buffer.
+  __ ldr(r2,
+         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+  // Calculate number of capture registers (number_of_captures + 1) * 2. This
+  // uses the asumption that smis are 2 * their untagged value.
+  ASSERT_EQ(0, kSmiTag);
+  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+  __ add(r2, r2, Operand(2));  // r2 was a smi.
+  // Check that the static offsets vector buffer is large enough.
+  __ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
+  __ b(hi, &runtime);
+
+  // r2: Number of capture registers
+  // regexp_data: RegExp data (FixedArray)
+  // Check that the second argument is a string.
+  __ ldr(subject, MemOperand(sp, kSubjectOffset));
+  __ tst(subject, Operand(kSmiTagMask));
+  __ b(eq, &runtime);
+  Condition is_string = masm->IsObjectStringType(subject, r0);
+  __ b(NegateCondition(is_string), &runtime);
+  // Get the length of the string to r3.
+  __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset));
+
+  // r2: Number of capture registers
+  // r3: Length of subject string as a smi
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // Check that the third argument is a positive smi less than the subject
+  // string length. A negative value will be greater (unsigned comparison).
+  __ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
+  __ tst(r0, Operand(kSmiTagMask));
+  __ b(eq, &runtime);
+  __ cmp(r3, Operand(r0));
+  __ b(le, &runtime);
+
+  // r2: Number of capture registers
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // Check that the fourth object is a JSArray object.
+  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
+  __ tst(r0, Operand(kSmiTagMask));
+  __ b(eq, &runtime);
+  __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
+  __ b(ne, &runtime);
+  // Check that the JSArray is in fast case.
+  __ ldr(last_match_info_elements,
+         FieldMemOperand(r0, JSArray::kElementsOffset));
+  __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
+#if ANDROID
+  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+#else
+  __ LoadRoot(ip, kFixedArrayMapRootIndex);
+#endif
+  __ cmp(r0, ip);
+  __ b(ne, &runtime);
+  // Check that the last match info has space for the capture registers and the
+  // additional information.
+  __ ldr(r0,
+         FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
+  __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
+  __ cmp(r2, r0);
+  __ b(gt, &runtime);
+
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // Check the representation and encoding of the subject string.
+  Label seq_string;
+  const int kStringRepresentationEncodingMask =
+      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
+  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
+  __ and_(r1, r0, Operand(kStringRepresentationEncodingMask));
+  // First check for sequential string.
+  ASSERT_EQ(0, kStringTag);
+  ASSERT_EQ(0, kSeqStringTag);
+  __ tst(r1, Operand(kIsNotStringMask | kStringRepresentationMask));
+  __ b(eq, &seq_string);
+
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // Check for flat cons string.
+  // A flat cons string is a cons string where the second part is the empty
+  // string. In that case the subject string is just the first part of the cons
+  // string. Also in this case the first part of the cons string is known to be
+  // a sequential string or an external string.
+  __ and_(r0, r0, Operand(kStringRepresentationMask));
+  __ cmp(r0, Operand(kConsStringTag));
+  __ b(ne, &runtime);
+  __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
+  __ LoadRoot(r1, Heap::kEmptyStringRootIndex);
+  __ cmp(r0, r1);
+  __ b(ne, &runtime);
+  __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
+  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
+  ASSERT_EQ(0, kSeqStringTag);
+  __ tst(r0, Operand(kStringRepresentationMask));
+  __ b(nz, &runtime);
+  __ and_(r1, r0, Operand(kStringRepresentationEncodingMask));
+
+  __ bind(&seq_string);
+  // r1: suject string type & kStringRepresentationEncodingMask
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // Check that the irregexp code has been generated for an ascii string. If
+  // it has, the field contains a code object otherwise it contains the hole.
+#ifdef DEBUG
+  const int kSeqAsciiString = kStringTag | kSeqStringTag | kAsciiStringTag;
+  const int kSeqTwoByteString = kStringTag | kSeqStringTag | kTwoByteStringTag;
+  CHECK_EQ(4, kSeqAsciiString);
+  CHECK_EQ(0, kSeqTwoByteString);
+#endif
+  // Find the code object based on the assumptions above.
+  __ mov(r3, Operand(r1, ASR, 2), SetCC);
+  __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
+  __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
+
+  // Check that the irregexp code has been generated for the actual string
+  // encoding. If it has, the field contains a code object otherwise it contains
+  // the hole.
+  __ CompareObjectType(r7, r0, r0, CODE_TYPE);
+  __ b(ne, &runtime);
+
+  // r3: encoding of subject string (1 if ascii, 0 if two_byte);
+  // r7: code
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // Load used arguments before starting to push arguments for call to native
+  // RegExp code to avoid handling changing stack height.
+  __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
+  __ mov(r1, Operand(r1, ASR, kSmiTagSize));
+
+  // r1: previous index
+  // r3: encoding of subject string (1 if ascii, 0 if two_byte);
+  // r7: code
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // All checks done. Now push arguments for native regexp code.
+  __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2);
+
+  static const int kRegExpExecuteArguments = 7;
+  __ push(lr);
+  __ PrepareCallCFunction(kRegExpExecuteArguments, r0);
+
+  // Argument 7 (sp[8]): Indicate that this is a direct call from JavaScript.
+  __ mov(r0, Operand(1));
+  __ str(r0, MemOperand(sp, 2 * kPointerSize));
+
+  // Argument 6 (sp[4]): Start (high end) of backtracking stack memory area.
+  __ mov(r0, Operand(address_of_regexp_stack_memory_address));
+  __ ldr(r0, MemOperand(r0, 0));
+  __ mov(r2, Operand(address_of_regexp_stack_memory_size));
+  __ ldr(r2, MemOperand(r2, 0));
+  __ add(r0, r0, Operand(r2));
+  __ str(r0, MemOperand(sp, 1 * kPointerSize));
+
+  // Argument 5 (sp[0]): static offsets vector buffer.
+  __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector()));
+  __ str(r0, MemOperand(sp, 0 * kPointerSize));
+
+  // For arguments 4 and 3 get string length, calculate start of string data and
+  // calculate the shift of the index (0 for ASCII and 1 for two byte).
+  __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset));
+  __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+  ASSERT_EQ(SeqAsciiString::kHeaderSize, SeqTwoByteString::kHeaderSize);
+  __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ eor(r3, r3, Operand(1));
+  // Argument 4 (r3): End of string data
+  // Argument 3 (r2): Start of string data
+  __ add(r2, r9, Operand(r1, LSL, r3));
+  __ add(r3, r9, Operand(r0, LSL, r3));
+
+  // Argument 2 (r1): Previous index.
+  // Already there
+
+  // Argument 1 (r0): Subject string.
+  __ mov(r0, subject);
+
+  // Locate the code entry and call it.
+  __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ CallCFunction(r7, kRegExpExecuteArguments);
+  __ pop(lr);
+
+  // r0: result
+  // subject: subject string (callee saved)
+  // regexp_data: RegExp data (callee saved)
+  // last_match_info_elements: Last match info elements (callee saved)
+
+  // Check the result.
+  Label success;
+  __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
+  __ b(eq, &success);
+  Label failure;
+  __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
+  __ b(eq, &failure);
+  __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
+  // If not exception it can only be retry. Handle that in the runtime system.
+  __ b(ne, &runtime);
+  // Result must now be exception. If there is no pending exception already a
+  // stack overflow (on the backtrack stack) was detected in RegExp code but
+  // haven't created the exception yet. Handle that in the runtime system.
+  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
+  __ mov(r0, Operand(ExternalReference::the_hole_value_location()));
+  __ ldr(r0, MemOperand(r0, 0));
+  __ mov(r1, Operand(ExternalReference(Top::k_pending_exception_address)));
+  __ ldr(r1, MemOperand(r1, 0));
+  __ cmp(r0, r1);
+  __ b(eq, &runtime);
+  __ bind(&failure);
+  // For failure and exception return null.
+  __ mov(r0, Operand(Factory::null_value()));
+  __ add(sp, sp, Operand(4 * kPointerSize));
+  __ Ret();
+
+  // Process the result from the native regexp code.
+  __ bind(&success);
+  __ ldr(r1,
+         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+  // Calculate number of capture registers (number_of_captures + 1) * 2.
+  ASSERT_EQ(0, kSmiTag);
+  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+  __ add(r1, r1, Operand(2));  // r1 was a smi.
+
+  // r1: number of capture registers
+  // r4: subject string
+  // Store the capture count.
+  __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize));  // To smi.
+  __ str(r2, FieldMemOperand(last_match_info_elements,
+                             RegExpImpl::kLastCaptureCountOffset));
+  // Store last subject and last input.
+  __ mov(r3, last_match_info_elements);  // Moved up to reduce latency.
+  __ mov(r2, Operand(RegExpImpl::kLastSubjectOffset));  // Ditto.
+  __ str(subject,
+         FieldMemOperand(last_match_info_elements,
+                         RegExpImpl::kLastSubjectOffset));
+  __ RecordWrite(r3, r2, r7);
+  __ str(subject,
+         FieldMemOperand(last_match_info_elements,
+                         RegExpImpl::kLastInputOffset));
+  __ mov(r3, last_match_info_elements);
+  __ mov(r2, Operand(RegExpImpl::kLastInputOffset));
+  __ RecordWrite(r3, r2, r7);
+
+  // Get the static offsets vector filled by the native regexp code.
+  ExternalReference address_of_static_offsets_vector =
+      ExternalReference::address_of_static_offsets_vector();
+  __ mov(r2, Operand(address_of_static_offsets_vector));
+
+  // r1: number of capture registers
+  // r2: offsets vector
+  Label next_capture, done;
+  // Capture register counter starts from number of capture registers and
+  // counts down until wraping after zero.
+  __ add(r0,
+         last_match_info_elements,
+         Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
+  __ bind(&next_capture);
+  __ sub(r1, r1, Operand(1), SetCC);
+  __ b(mi, &done);
+  // Read the value from the static offsets vector buffer.
+  __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
+  // Store the smi value in the last match info.
+  __ mov(r3, Operand(r3, LSL, kSmiTagSize));
+  __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
+  __ jmp(&next_capture);
+  __ bind(&done);
+
+  // Return last match info.
+  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
+  __ add(sp, sp, Operand(4 * kPointerSize));
+  __ Ret();
+
+  // Do the runtime call to execute the regexp.
+  __ bind(&runtime);
+  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#endif  // V8_NATIVE_REGEXP
 }
 
 
@@ -6970,62 +8807,207 @@
 }
 
 
+// Unfortunately you have to run without snapshots to see most of these
+// names in the profile since most compare stubs end up in the snapshot.
 const char* CompareStub::GetName() {
+  if (name_ != NULL) return name_;
+  const int kMaxNameLength = 100;
+  name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+  if (name_ == NULL) return "OOM";
+
+  const char* cc_name;
   switch (cc_) {
-    case lt: return "CompareStub_LT";
-    case gt: return "CompareStub_GT";
-    case le: return "CompareStub_LE";
-    case ge: return "CompareStub_GE";
-    case ne: {
-      if (strict_) {
-        if (never_nan_nan_) {
-          return "CompareStub_NE_STRICT_NO_NAN";
-        } else {
-          return "CompareStub_NE_STRICT";
-        }
-      } else {
-        if (never_nan_nan_) {
-          return "CompareStub_NE_NO_NAN";
-        } else {
-          return "CompareStub_NE";
-        }
-      }
-    }
-    case eq: {
-      if (strict_) {
-        if (never_nan_nan_) {
-          return "CompareStub_EQ_STRICT_NO_NAN";
-        } else {
-          return "CompareStub_EQ_STRICT";
-        }
-      } else {
-        if (never_nan_nan_) {
-          return "CompareStub_EQ_NO_NAN";
-        } else {
-          return "CompareStub_EQ";
-        }
-      }
-    }
-    default: return "CompareStub";
+    case lt: cc_name = "LT"; break;
+    case gt: cc_name = "GT"; break;
+    case le: cc_name = "LE"; break;
+    case ge: cc_name = "GE"; break;
+    case eq: cc_name = "EQ"; break;
+    case ne: cc_name = "NE"; break;
+    default: cc_name = "UnknownCondition"; break;
   }
+
+  const char* strict_name = "";
+  if (strict_ && (cc_ == eq || cc_ == ne)) {
+    strict_name = "_STRICT";
+  }
+
+  const char* never_nan_nan_name = "";
+  if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
+    never_nan_nan_name = "_NO_NAN";
+  }
+
+  const char* include_number_compare_name = "";
+  if (!include_number_compare_) {
+    include_number_compare_name = "_NO_NUMBER";
+  }
+
+  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+               "CompareStub_%s%s%s%s",
+               cc_name,
+               strict_name,
+               never_nan_nan_name,
+               include_number_compare_name);
+  return name_;
 }
 
 
 int CompareStub::MinorKey() {
-  // Encode the three parameters in a unique 16 bit value.
-  ASSERT((static_cast<unsigned>(cc_) >> 26) < (1 << 16));
-  int nnn_value = (never_nan_nan_ ? 2 : 0);
-  if (cc_ != eq) nnn_value = 0;  // Avoid duplicate stubs.
-  return (static_cast<unsigned>(cc_) >> 26) | nnn_value | (strict_ ? 1 : 0);
+  // Encode the three parameters in a unique 16 bit value. To avoid duplicate
+  // stubs the never NaN NaN condition is only taken into account if the
+  // condition is equals.
+  ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 13));
+  return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
+         | StrictField::encode(strict_)
+         | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
+         | IncludeNumberCompareField::encode(include_number_compare_);
 }
 
 
-void StringStubBase::GenerateCopyCharacters(MacroAssembler* masm,
-                                            Register dest,
-                                            Register src,
-                                            Register count,
-                                            Register scratch,
-                                            bool ascii) {
+void StringHelper::GenerateFastCharCodeAt(MacroAssembler* masm,
+                                          Register object,
+                                          Register index,
+                                          Register scratch,
+                                          Register result,
+                                          Label* receiver_not_string,
+                                          Label* index_not_smi,
+                                          Label* index_out_of_range,
+                                          Label* slow_case) {
+  Label not_a_flat_string;
+  Label try_again_with_new_string;
+  Label ascii_string;
+  Label got_char_code;
+
+  // If the receiver is a smi trigger the non-string case.
+  __ BranchOnSmi(object, receiver_not_string);
+
+  // Fetch the instance type of the receiver into result register.
+  __ ldr(result, FieldMemOperand(object, HeapObject::kMapOffset));
+  __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+  // If the receiver is not a string trigger the non-string case.
+  __ tst(result, Operand(kIsNotStringMask));
+  __ b(ne, receiver_not_string);
+
+  // If the index is non-smi trigger the non-smi case.
+  __ BranchOnNotSmi(index, index_not_smi);
+
+  // Check for index out of range.
+  __ ldr(scratch, FieldMemOperand(object, String::kLengthOffset));
+  // Now scratch has the length of the string.  Compare with the index.
+  __ cmp(scratch, Operand(index));
+  __ b(ls, index_out_of_range);
+
+  __ bind(&try_again_with_new_string);
+  // ----------- S t a t e -------------
+  //  -- object  : string to access
+  //  -- result  : instance type of the string
+  //  -- scratch : non-negative index < length
+  // -----------------------------------
+
+  // We need special handling for non-flat strings.
+  ASSERT_EQ(0, kSeqStringTag);
+  __ tst(result, Operand(kStringRepresentationMask));
+  __ b(ne, &not_a_flat_string);
+
+  // Check for 1-byte or 2-byte string.
+  ASSERT_EQ(0, kTwoByteStringTag);
+  __ tst(result, Operand(kStringEncodingMask));
+  __ b(ne, &ascii_string);
+
+  // 2-byte string.  We can add without shifting since the Smi tag size is the
+  // log2 of the number of bytes in a two-byte character.
+  ASSERT_EQ(1, kSmiTagSize);
+  ASSERT_EQ(0, kSmiShiftSize);
+  __ add(scratch, object, Operand(index));
+  __ ldrh(result, FieldMemOperand(scratch, SeqTwoByteString::kHeaderSize));
+  __ jmp(&got_char_code);
+
+  // Handle non-flat strings.
+  __ bind(&not_a_flat_string);
+  __ and_(result, result, Operand(kStringRepresentationMask));
+  __ cmp(result, Operand(kConsStringTag));
+  __ b(ne, slow_case);
+
+  // ConsString.
+  // Check whether the right hand side is the empty string (i.e. if
+  // this is really a flat string in a cons string). If that is not
+  // the case we would rather go to the runtime system now to flatten
+  // the string.
+  __ ldr(result, FieldMemOperand(object, ConsString::kSecondOffset));
+  __ LoadRoot(scratch, Heap::kEmptyStringRootIndex);
+  __ cmp(result, Operand(scratch));
+  __ b(ne, slow_case);
+
+  // Get the first of the two strings and load its instance type.
+  __ ldr(object, FieldMemOperand(object, ConsString::kFirstOffset));
+  __ ldr(result, FieldMemOperand(object, HeapObject::kMapOffset));
+  __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+  __ jmp(&try_again_with_new_string);
+
+  // ASCII string.
+  __ bind(&ascii_string);
+  __ add(scratch, object, Operand(index, LSR, kSmiTagSize));
+  __ ldrb(result, FieldMemOperand(scratch, SeqAsciiString::kHeaderSize));
+
+  __ bind(&got_char_code);
+  __ mov(result, Operand(result, LSL, kSmiTagSize));
+}
+
+
+void StringHelper::GenerateCharFromCode(MacroAssembler* masm,
+                                        Register code,
+                                        Register scratch,
+                                        Register result,
+                                        InvokeFlag flag) {
+  ASSERT(!code.is(result));
+
+  Label slow_case;
+  Label exit;
+
+  // Fast case of Heap::LookupSingleCharacterStringFromCode.
+  ASSERT(kSmiTag == 0);
+  ASSERT(kSmiShiftSize == 0);
+  ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+  __ tst(code, Operand(kSmiTagMask |
+                       ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+  __ b(nz, &slow_case);
+
+  ASSERT(kSmiTag == 0);
+  __ mov(result, Operand(Factory::single_character_string_cache()));
+  __ add(result, result, Operand(code, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ ldr(result, MemOperand(result, FixedArray::kHeaderSize - kHeapObjectTag));
+  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+  __ cmp(result, scratch);
+  __ b(eq, &slow_case);
+  __ b(&exit);
+
+  __ bind(&slow_case);
+  if (flag == CALL_FUNCTION) {
+    __ push(code);
+    __ CallRuntime(Runtime::kCharFromCode, 1);
+    if (!result.is(r0)) {
+      __ mov(result, r0);
+    }
+  } else {
+    ASSERT(flag == JUMP_FUNCTION);
+    ASSERT(result.is(r0));
+    __ push(code);
+    __ TailCallRuntime(Runtime::kCharFromCode, 1, 1);
+  }
+
+  __ bind(&exit);
+  if (flag == JUMP_FUNCTION) {
+    ASSERT(result.is(r0));
+    __ Ret();
+  }
+}
+
+
+void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
+                                          Register dest,
+                                          Register src,
+                                          Register count,
+                                          Register scratch,
+                                          bool ascii) {
   Label loop;
   Label done;
   // This loop just copies one character at a time, as it is only used for very
@@ -7056,16 +9038,16 @@
 };
 
 
-void StringStubBase::GenerateCopyCharactersLong(MacroAssembler* masm,
-                                                Register dest,
-                                                Register src,
-                                                Register count,
-                                                Register scratch1,
-                                                Register scratch2,
-                                                Register scratch3,
-                                                Register scratch4,
-                                                Register scratch5,
-                                                int flags) {
+void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
+                                              Register dest,
+                                              Register src,
+                                              Register count,
+                                              Register scratch1,
+                                              Register scratch2,
+                                              Register scratch3,
+                                              Register scratch4,
+                                              Register scratch5,
+                                              int flags) {
   bool ascii = (flags & COPY_ASCII) != 0;
   bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
 
@@ -7199,6 +9181,168 @@
 }
 
 
+void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+                                                        Register c1,
+                                                        Register c2,
+                                                        Register scratch1,
+                                                        Register scratch2,
+                                                        Register scratch3,
+                                                        Register scratch4,
+                                                        Register scratch5,
+                                                        Label* not_found) {
+  // Register scratch3 is the general scratch register in this function.
+  Register scratch = scratch3;
+
+  // Make sure that both characters are not digits as such strings has a
+  // different hash algorithm. Don't try to look for these in the symbol table.
+  Label not_array_index;
+  __ sub(scratch, c1, Operand(static_cast<int>('0')));
+  __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
+  __ b(hi, &not_array_index);
+  __ sub(scratch, c2, Operand(static_cast<int>('0')));
+  __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
+
+  // If check failed combine both characters into single halfword.
+  // This is required by the contract of the method: code at the
+  // not_found branch expects this combination in c1 register
+  __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls);
+  __ b(ls, not_found);
+
+  __ bind(&not_array_index);
+  // Calculate the two character string hash.
+  Register hash = scratch1;
+  StringHelper::GenerateHashInit(masm, hash, c1);
+  StringHelper::GenerateHashAddCharacter(masm, hash, c2);
+  StringHelper::GenerateHashGetHash(masm, hash);
+
+  // Collect the two characters in a register.
+  Register chars = c1;
+  __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte));
+
+  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+  // hash:  hash of two character string.
+
+  // Load symbol table
+  // Load address of first element of the symbol table.
+  Register symbol_table = c2;
+  __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
+
+  // Load undefined value
+  Register undefined = scratch4;
+  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+  // Calculate capacity mask from the symbol table capacity.
+  Register mask = scratch2;
+  __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
+  __ mov(mask, Operand(mask, ASR, 1));
+  __ sub(mask, mask, Operand(1));
+
+  // Calculate untagged address of the first element of the symbol table.
+  Register first_symbol_table_element = symbol_table;
+  __ add(first_symbol_table_element, symbol_table,
+         Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
+
+  // Registers
+  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+  // hash:  hash of two character string
+  // mask:  capacity mask
+  // first_symbol_table_element: address of the first element of
+  //                             the symbol table
+  // scratch: -
+
+  // Perform a number of probes in the symbol table.
+  static const int kProbes = 4;
+  Label found_in_symbol_table;
+  Label next_probe[kProbes];
+  for (int i = 0; i < kProbes; i++) {
+    Register candidate = scratch5;  // Scratch register contains candidate.
+
+    // Calculate entry in symbol table.
+    if (i > 0) {
+      __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
+    } else {
+      __ mov(candidate, hash);
+    }
+
+    __ and_(candidate, candidate, Operand(mask));
+
+    // Load the entry from the symble table.
+    ASSERT_EQ(1, SymbolTable::kEntrySize);
+    __ ldr(candidate,
+           MemOperand(first_symbol_table_element,
+                      candidate,
+                      LSL,
+                      kPointerSizeLog2));
+
+    // If entry is undefined no string with this hash can be found.
+    __ cmp(candidate, undefined);
+    __ b(eq, not_found);
+
+    // If length is not 2 the string is not a candidate.
+    __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
+    __ cmp(scratch, Operand(Smi::FromInt(2)));
+    __ b(ne, &next_probe[i]);
+
+    // Check that the candidate is a non-external ascii string.
+    __ ldr(scratch, FieldMemOperand(candidate, HeapObject::kMapOffset));
+    __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+    __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch,
+                                              &next_probe[i]);
+
+    // Check if the two characters match.
+    // Assumes that word load is little endian.
+    __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
+    __ cmp(chars, scratch);
+    __ b(eq, &found_in_symbol_table);
+    __ bind(&next_probe[i]);
+  }
+
+  // No matching 2 character string found by probing.
+  __ jmp(not_found);
+
+  // Scratch register contains result when we fall through to here.
+  Register result = scratch;
+  __ bind(&found_in_symbol_table);
+  __ Move(r0, result);
+}
+
+
+void StringHelper::GenerateHashInit(MacroAssembler* masm,
+                                    Register hash,
+                                    Register character) {
+  // hash = character + (character << 10);
+  __ add(hash, character, Operand(character, LSL, 10));
+  // hash ^= hash >> 6;
+  __ eor(hash, hash, Operand(hash, ASR, 6));
+}
+
+
+void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
+                                            Register hash,
+                                            Register character) {
+  // hash += character;
+  __ add(hash, hash, Operand(character));
+  // hash += hash << 10;
+  __ add(hash, hash, Operand(hash, LSL, 10));
+  // hash ^= hash >> 6;
+  __ eor(hash, hash, Operand(hash, ASR, 6));
+}
+
+
+void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
+                                       Register hash) {
+  // hash += hash << 3;
+  __ add(hash, hash, Operand(hash, LSL, 3));
+  // hash ^= hash >> 11;
+  __ eor(hash, hash, Operand(hash, ASR, 11));
+  // hash += hash << 15;
+  __ add(hash, hash, Operand(hash, LSL, 15), SetCC);
+
+  // if (hash == 0) hash = 27;
+  __ mov(hash, Operand(27), LeaveCC, nz);
+}
+
+
 void SubStringStub::Generate(MacroAssembler* masm) {
   Label runtime;
 
@@ -7234,11 +9378,14 @@
 
   __ sub(r2, r2, Operand(r3), SetCC);
   __ b(mi, &runtime);  // Fail if from > to.
-  // Handle sub-strings of length 2 and less in the runtime system.
+  // Special handling of sub-strings of length 1 and 2. One character strings
+  // are handled in the runtime system (looked up in the single character
+  // cache). Two character strings are looked for in the symbol cache.
   __ cmp(r2, Operand(2));
-  __ b(le, &runtime);
+  __ b(lt, &runtime);
 
   // r2: length
+  // r3: from index (untaged smi)
   // r6: from (smi)
   // r7: to (smi)
 
@@ -7252,6 +9399,7 @@
 
   // r1: instance type
   // r2: length
+  // r3: from index (untaged smi)
   // r5: string
   // r6: from (smi)
   // r7: to (smi)
@@ -7278,15 +9426,17 @@
 
   // r1: instance type.
   // r2: length
+  // r3: from index (untaged smi)
   // r5: string
   // r6: from (smi)
   // r7: to (smi)
   __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset));
-  __ cmp(r4, Operand(r7, ASR, 1));
+  __ cmp(r4, Operand(r7));
   __ b(lt, &runtime);  // Fail if to > length.
 
   // r1: instance type.
   // r2: result string length.
+  // r3: from index (untaged smi)
   // r5: string.
   // r6: from offset (smi)
   // Check for flat ascii string.
@@ -7295,6 +9445,35 @@
   ASSERT_EQ(0, kTwoByteStringTag);
   __ b(eq, &non_ascii_flat);
 
+  Label result_longer_than_two;
+  __ cmp(r2, Operand(2));
+  __ b(gt, &result_longer_than_two);
+
+  // Sub string of length 2 requested.
+  // Get the two characters forming the sub string.
+  __ add(r5, r5, Operand(r3));
+  __ ldrb(r3, FieldMemOperand(r5, SeqAsciiString::kHeaderSize));
+  __ ldrb(r4, FieldMemOperand(r5, SeqAsciiString::kHeaderSize + 1));
+
+  // Try to lookup two character string in symbol table.
+  Label make_two_character_string;
+  StringHelper::GenerateTwoCharacterSymbolTableProbe(
+      masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
+  __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
+  __ add(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  // r2: result string length.
+  // r3: two characters combined into halfword in little endian byte order.
+  __ bind(&make_two_character_string);
+  __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime);
+  __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
+  __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
+  __ add(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  __ bind(&result_longer_than_two);
+
   // Allocate the result.
   __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime);
 
@@ -7313,8 +9492,8 @@
   // r2: result string length.
   // r5: first character of sub string to copy.
   ASSERT_EQ(0, SeqAsciiString::kHeaderSize & kObjectAlignmentMask);
-  GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
-                             COPY_ASCII | DEST_ALWAYS_ALIGNED);
+  StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
+                                           COPY_ASCII | DEST_ALWAYS_ALIGNED);
   __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
   __ add(sp, sp, Operand(3 * kPointerSize));
   __ Ret();
@@ -7344,15 +9523,15 @@
   // r2: result length.
   // r5: first character of string to copy.
   ASSERT_EQ(0, SeqTwoByteString::kHeaderSize & kObjectAlignmentMask);
-  GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
-                             DEST_ALWAYS_ALIGNED);
+  StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
+                                           DEST_ALWAYS_ALIGNED);
   __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
   __ add(sp, sp, Operand(3 * kPointerSize));
   __ Ret();
 
   // Just jump to runtime to create the sub string.
   __ bind(&runtime);
-  __ TailCallRuntime(ExternalReference(Runtime::kSubString), 3, 1);
+  __ TailCallRuntime(Runtime::kSubString, 3, 1);
 }
 
 
@@ -7371,9 +9550,13 @@
   Register length_delta = scratch3;
   __ mov(scratch1, scratch2, LeaveCC, gt);
   Register min_length = scratch1;
+  ASSERT(kSmiTag == 0);
   __ tst(min_length, Operand(min_length));
   __ b(eq, &compare_lengths);
 
+  // Untag smi.
+  __ mov(min_length, Operand(min_length, ASR, kSmiTagSize));
+
   // Setup registers so that we only need to increment one register
   // in the loop.
   __ add(scratch2, min_length,
@@ -7443,7 +9626,7 @@
   // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
   // tagged as a small integer.
   __ bind(&runtime);
-  __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1);
+  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
 }
 
 
@@ -7483,9 +9666,12 @@
     // Check if either of the strings are empty. In that case return the other.
     __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
     __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
-    __ cmp(r2, Operand(0));  // Test if first string is empty.
+    ASSERT(kSmiTag == 0);
+    __ cmp(r2, Operand(Smi::FromInt(0)));  // Test if first string is empty.
     __ mov(r0, Operand(r1), LeaveCC, eq);  // If first is empty, return second.
-    __ cmp(r3, Operand(0), ne);  // Else test if second string is empty.
+    ASSERT(kSmiTag == 0);
+     // Else test if second string is empty.
+    __ cmp(r3, Operand(Smi::FromInt(0)), ne);
     __ b(ne, &strings_not_empty);  // If either string was empty, return r0.
 
     __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
@@ -7495,6 +9681,8 @@
     __ bind(&strings_not_empty);
   }
 
+  __ mov(r2, Operand(r2, ASR, kSmiTagSize));
+  __ mov(r3, Operand(r3, ASR, kSmiTagSize));
   // Both strings are non-empty.
   // r0: first string
   // r1: second string
@@ -7503,14 +9691,52 @@
   // r4: first string instance type (if string_check_)
   // r5: second string instance type (if string_check_)
   // Look at the length of the result of adding the two strings.
-  Label string_add_flat_result;
+  Label string_add_flat_result, longer_than_two;
   // Adding two lengths can't overflow.
   ASSERT(String::kMaxLength * 2 > String::kMaxLength);
   __ add(r6, r2, Operand(r3));
   // Use the runtime system when adding two one character strings, as it
   // contains optimizations for this specific case using the symbol table.
   __ cmp(r6, Operand(2));
-  __ b(eq, &string_add_runtime);
+  __ b(ne, &longer_than_two);
+
+  // Check that both strings are non-external ascii strings.
+  if (!string_check_) {
+    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
+    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
+  }
+  __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
+                                                  &string_add_runtime);
+
+  // Get the two characters forming the sub string.
+  __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
+  __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
+
+  // Try to lookup two character string in symbol table. If it is not found
+  // just allocate a new one.
+  Label make_two_character_string;
+  StringHelper::GenerateTwoCharacterSymbolTableProbe(
+      masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
+  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+  __ add(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  __ bind(&make_two_character_string);
+  // Resulting string has length 2 and first chars of two strings
+  // are combined into single halfword in r2 register.
+  // So we can fill resulting string without two loops by a single
+  // halfword store instruction (which assumes that processor is
+  // in a little endian mode)
+  __ mov(r6, Operand(2));
+  __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime);
+  __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
+  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
+  __ add(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  __ bind(&longer_than_two);
   // Check if resulting string will be flat.
   __ cmp(r6, Operand(String::kMinNonFlatLength));
   __ b(lt, &string_add_flat_result);
@@ -7589,6 +9815,7 @@
 
   // Both strings are sequential ASCII strings. We also know that they are
   // short (since the sum of the lengths is less than kMinNonFlatLength).
+  // r6: length of resulting flat string
   __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime);
   // Locate first character of result.
   __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
@@ -7600,7 +9827,7 @@
   // r3: length of second string.
   // r6: first character of result.
   // r7: result string.
-  GenerateCopyCharacters(masm, r6, r0, r2, r4, true);
+  StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true);
 
   // Load second argument and locate first character.
   __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
@@ -7608,7 +9835,7 @@
   // r3: length of second string.
   // r6: next character of result.
   // r7: result string.
-  GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
+  StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
   __ mov(r0, Operand(r7));
   __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
   __ add(sp, sp, Operand(2 * kPointerSize));
@@ -7639,7 +9866,7 @@
   // r3: length of second string.
   // r6: first character of result.
   // r7: result string.
-  GenerateCopyCharacters(masm, r6, r0, r2, r4, false);
+  StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, false);
 
   // Locate first character of second argument.
   __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
@@ -7648,7 +9875,7 @@
   // r3: length of second string.
   // r6: next character of result (after copy of first string).
   // r7: result string.
-  GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
+  StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
 
   __ mov(r0, Operand(r7));
   __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
@@ -7657,7 +9884,7 @@
 
   // Just jump to runtime to add the two strings.
   __ bind(&string_add_runtime);
-  __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
+  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
 }
 
 
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 22dd854..bb76b63 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -28,6 +28,8 @@
 #ifndef V8_ARM_CODEGEN_ARM_H_
 #define V8_ARM_CODEGEN_ARM_H_
 
+#include "ic-inl.h"
+
 namespace v8 {
 namespace internal {
 
@@ -90,10 +92,6 @@
   // If the reference is not consumed, it is left in place under its value.
   void GetValue();
 
-  // Generate code to pop a reference, push the value of the reference,
-  // and then spill the stack frame.
-  inline void GetValueAndSpill();
-
   // Generate code to store the value on top of the expression stack in the
   // reference.  The reference is expected to be immediately below the value
   // on the expression stack.  The  value is stored in the location specified
@@ -146,6 +144,24 @@
 
 
 // -------------------------------------------------------------------------
+// Arguments allocation mode
+
+enum ArgumentsAllocationMode {
+  NO_ARGUMENTS_ALLOCATION,
+  EAGER_ARGUMENTS_ALLOCATION,
+  LAZY_ARGUMENTS_ALLOCATION
+};
+
+
+// Different nop operations are used by the code generator to detect certain
+// states of the generated code.
+enum NopMarkerTypes {
+  NON_MARKING_NOP = 0,
+  PROPERTY_ACCESS_INLINED
+};
+
+
+// -------------------------------------------------------------------------
 // CodeGenerator
 
 class CodeGenerator: public AstVisitor {
@@ -197,13 +213,17 @@
 
   static const int kUnknownIntValue = -1;
 
+  // If the name is an inline runtime function call return the number of
+  // expected arguments. Otherwise return -1.
+  static int InlineRuntimeCallArgumentsCount(Handle<String> name);
+
  private:
   // Construction/Destruction
   explicit CodeGenerator(MacroAssembler* masm);
 
   // Accessors
   inline bool is_eval();
-  Scope* scope();
+  inline Scope* scope();
 
   // Generating deferred code.
   void ProcessDeferred();
@@ -213,8 +233,10 @@
   JumpTarget* true_target() const  { return state_->true_target(); }
   JumpTarget* false_target() const  { return state_->false_target(); }
 
-  // We don't track loop nesting level on ARM yet.
-  int loop_nesting() const { return 0; }
+  // Track loop nesting level.
+  int loop_nesting() const { return loop_nesting_; }
+  void IncrementLoopNesting() { loop_nesting_++; }
+  void DecrementLoopNesting() { loop_nesting_--; }
 
   // Node visitors.
   void VisitStatements(ZoneList<Statement*>* statements);
@@ -237,6 +259,12 @@
   // Main code generation function
   void Generate(CompilationInfo* info);
 
+  // Returns the arguments allocation mode.
+  ArgumentsAllocationMode ArgumentsMode();
+
+  // Store the arguments object and allocate it if necessary.
+  void StoreArgumentsObject(bool initial);
+
   // The following are used by class Reference.
   void LoadReference(Reference* ref);
   void UnloadReference(Reference* ref);
@@ -280,16 +308,34 @@
 
   // Read a value from a slot and leave it on top of the expression stack.
   void LoadFromSlot(Slot* slot, TypeofState typeof_state);
+  void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
   // Store the value on top of the stack to a slot.
   void StoreToSlot(Slot* slot, InitState init_state);
+
+  // Support for compiling assignment expressions.
+  void EmitSlotAssignment(Assignment* node);
+  void EmitNamedPropertyAssignment(Assignment* node);
+  void EmitKeyedPropertyAssignment(Assignment* node);
+
+  // Load a named property, returning it in r0. The receiver is passed on the
+  // stack, and remains there.
+  void EmitNamedLoad(Handle<String> name, bool is_contextual);
+
+  // Store to a named property. If the store is contextual, value is passed on
+  // the frame and consumed. Otherwise, receiver and value are passed on the
+  // frame and consumed. The result is returned in r0.
+  void EmitNamedStore(Handle<String> name, bool is_contextual);
+
   // Load a keyed property, leaving it in r0.  The receiver and key are
   // passed on the stack, and remain there.
-  void EmitKeyedLoad(bool is_global);
+  void EmitKeyedLoad();
+
+  // Store a keyed property. Key and receiver are on the stack and the value is
+  // in r0. Result is returned in r0.
+  void EmitKeyedStore(StaticType* key_type);
 
   void LoadFromGlobalSlotCheckExtensions(Slot* slot,
                                          TypeofState typeof_state,
-                                         Register tmp,
-                                         Register tmp2,
                                          JumpTarget* slow);
 
   // Special code for typeof expressions: Unfortunately, we must
@@ -302,9 +348,15 @@
 
   void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
 
+  // Generate code that computes a shortcutting logical operation.
+  void GenerateLogicalBooleanOperation(BinaryOperation* node);
+
   void GenericBinaryOperation(Token::Value op,
                               OverwriteMode overwrite_mode,
                               int known_rhs = kUnknownIntValue);
+  void VirtualFrameBinaryOperation(Token::Value op,
+                                   OverwriteMode overwrite_mode,
+                                   int known_rhs = kUnknownIntValue);
   void Comparison(Condition cc,
                   Expression* left,
                   Expression* right,
@@ -319,6 +371,14 @@
                          CallFunctionFlags flags,
                          int position);
 
+  // An optimized implementation of expressions of the form
+  // x.apply(y, arguments).  We call x the applicand and y the receiver.
+  // The optimization avoids allocating an arguments object if possible.
+  void CallApplyLazy(Expression* applicand,
+                     Expression* receiver,
+                     VariableProxy* arguments,
+                     int position);
+
   // Control flow
   void Branch(bool if_true, JumpTarget* target);
   void CheckStack();
@@ -326,6 +386,7 @@
   struct InlineRuntimeLUT {
     void (CodeGenerator::*method)(ZoneList<Expression*>*);
     const char* name;
+    int nargs;
   };
 
   static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
@@ -343,8 +404,8 @@
   // name/value pairs.
   void DeclareGlobals(Handle<FixedArray> pairs);
 
-  // Instantiate the function boilerplate.
-  void InstantiateBoilerplate(Handle<JSFunction> boilerplate);
+  // Instantiate the function based on the shared function info.
+  void InstantiateFunction(Handle<SharedFunctionInfo> function_info);
 
   // Support for type checks.
   void GenerateIsSmi(ZoneList<Expression*>* args);
@@ -360,7 +421,7 @@
 
   // Support for arguments.length and arguments[?].
   void GenerateArgumentsLength(ZoneList<Expression*>* args);
-  void GenerateArgumentsAccess(ZoneList<Expression*>* args);
+  void GenerateArguments(ZoneList<Expression*>* args);
 
   // Support for accessing the class and value fields of an object.
   void GenerateClassOf(ZoneList<Expression*>* args);
@@ -370,13 +431,16 @@
   // Fast support for charCodeAt(n).
   void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
 
+  // Fast support for string.charAt(n) and string[n].
+  void GenerateCharFromCode(ZoneList<Expression*>* args);
+
   // Fast support for object equality testing.
   void GenerateObjectEquals(ZoneList<Expression*>* args);
 
   void GenerateLog(ZoneList<Expression*>* args);
 
   // Fast support for Math.random().
-  void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
+  void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
 
   // Fast support for StringAdd.
   void GenerateStringAdd(ZoneList<Expression*>* args);
@@ -390,12 +454,25 @@
   // Support for direct calls from JavaScript to native RegExp code.
   void GenerateRegExpExec(ZoneList<Expression*>* args);
 
+  void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
+
+  // Support for fast native caches.
+  void GenerateGetFromCache(ZoneList<Expression*>* args);
+
   // Fast support for number to string.
   void GenerateNumberToString(ZoneList<Expression*>* args);
 
-  // Fast call to sine function.
+  // Fast swapping of elements.
+  void GenerateSwapElements(ZoneList<Expression*>* args);
+
+  // Fast call for custom callbacks.
+  void GenerateCallFunction(ZoneList<Expression*>* args);
+
+  // Fast call to math functions.
+  void GenerateMathPow(ZoneList<Expression*>* args);
   void GenerateMathSin(ZoneList<Expression*>* args);
   void GenerateMathCos(ZoneList<Expression*>* args);
+  void GenerateMathSqrt(ZoneList<Expression*>* args);
 
   // Simple condition analysis.
   enum ConditionAnalysis {
@@ -431,6 +508,7 @@
   RegisterAllocator* allocator_;
   Condition cc_reg_;
   CodeGenState* state_;
+  int loop_nesting_;
 
   // Jump targets
   BreakTarget function_return_;
@@ -457,37 +535,68 @@
  public:
   GenericBinaryOpStub(Token::Value op,
                       OverwriteMode mode,
+                      Register lhs,
+                      Register rhs,
                       int constant_rhs = CodeGenerator::kUnknownIntValue)
       : op_(op),
         mode_(mode),
+        lhs_(lhs),
+        rhs_(rhs),
         constant_rhs_(constant_rhs),
         specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
+        runtime_operands_type_(BinaryOpIC::DEFAULT),
+        name_(NULL) { }
+
+  GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
+      : op_(OpBits::decode(key)),
+        mode_(ModeBits::decode(key)),
+        lhs_(LhsRegister(RegisterBits::decode(key))),
+        rhs_(RhsRegister(RegisterBits::decode(key))),
+        constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
+        specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
+        runtime_operands_type_(type_info),
         name_(NULL) { }
 
  private:
   Token::Value op_;
   OverwriteMode mode_;
+  Register lhs_;
+  Register rhs_;
   int constant_rhs_;
   bool specialized_on_rhs_;
+  BinaryOpIC::TypeInfo runtime_operands_type_;
   char* name_;
 
   static const int kMaxKnownRhs = 0x40000000;
+  static const int kKnownRhsKeyBits = 6;
 
-  // Minor key encoding in 16 bits.
+  // Minor key encoding in 17 bits.
   class ModeBits: public BitField<OverwriteMode, 0, 2> {};
   class OpBits: public BitField<Token::Value, 2, 6> {};
-  class KnownIntBits: public BitField<int, 8, 8> {};
+  class TypeInfoBits: public BitField<int, 8, 2> {};
+  class RegisterBits: public BitField<bool, 10, 1> {};
+  class KnownIntBits: public BitField<int, 11, kKnownRhsKeyBits> {};
 
   Major MajorKey() { return GenericBinaryOp; }
   int MinorKey() {
-    // Encode the parameters in a unique 16 bit value.
+    ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
+           (lhs_.is(r1) && rhs_.is(r0)));
+    // Encode the parameters in a unique 18 bit value.
     return OpBits::encode(op_)
            | ModeBits::encode(mode_)
-           | KnownIntBits::encode(MinorKeyForKnownInt());
+           | KnownIntBits::encode(MinorKeyForKnownInt())
+           | TypeInfoBits::encode(runtime_operands_type_)
+           | RegisterBits::encode(lhs_.is(r0));
   }
 
   void Generate(MacroAssembler* masm);
-  void HandleNonSmiBitwiseOp(MacroAssembler* masm);
+  void HandleNonSmiBitwiseOp(MacroAssembler* masm, Register lhs, Register rhs);
+  void HandleBinaryOpSlowCases(MacroAssembler* masm,
+                               Label* not_smi,
+                               Register lhs,
+                               Register rhs,
+                               const Builtins::JavaScript& builtin);
+  void GenerateTypeTransition(MacroAssembler* masm);
 
   static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
     if (constant_rhs == CodeGenerator::kUnknownIntValue) return false;
@@ -511,9 +620,45 @@
       key++;
       d >>= 1;
     }
+    ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
     return key;
   }
 
+  int KnownBitsForMinorKey(int key) {
+    if (!key) return 0;
+    if (key <= 11) return key - 1;
+    int d = 1;
+    while (key != 12) {
+      key--;
+      d <<= 1;
+    }
+    return d;
+  }
+
+  Register LhsRegister(bool lhs_is_r0) {
+    return lhs_is_r0 ? r0 : r1;
+  }
+
+  Register RhsRegister(bool lhs_is_r0) {
+    return lhs_is_r0 ? r1 : r0;
+  }
+
+  bool ShouldGenerateSmiCode() {
+    return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
+        runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
+        runtime_operands_type_ != BinaryOpIC::STRINGS;
+  }
+
+  bool ShouldGenerateFPCode() {
+    return runtime_operands_type_ != BinaryOpIC::STRINGS;
+  }
+
+  virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+
+  virtual InlineCacheState GetICState() {
+    return BinaryOpIC::ToState(runtime_operands_type_);
+  }
+
   const char* GetName();
 
 #ifdef DEBUG
@@ -530,34 +675,99 @@
 };
 
 
-class StringStubBase: public CodeStub {
+class StringHelper : public AllStatic {
  public:
+  // Generates fast code for getting a char code out of a string
+  // object at the given index. May bail out for four reasons (in the
+  // listed order):
+  //   * Receiver is not a string (receiver_not_string label).
+  //   * Index is not a smi (index_not_smi label).
+  //   * Index is out of range (index_out_of_range).
+  //   * Some other reason (slow_case label). In this case it's
+  //     guaranteed that the above conditions are not violated,
+  //     e.g. it's safe to assume the receiver is a string and the
+  //     index is a non-negative smi < length.
+  // When successful, object, index, and scratch are clobbered.
+  // Otherwise, scratch and result are clobbered.
+  static void GenerateFastCharCodeAt(MacroAssembler* masm,
+                                     Register object,
+                                     Register index,
+                                     Register scratch,
+                                     Register result,
+                                     Label* receiver_not_string,
+                                     Label* index_not_smi,
+                                     Label* index_out_of_range,
+                                     Label* slow_case);
+
+  // Generates code for creating a one-char string from the given char
+  // code. May do a runtime call, so any register can be clobbered
+  // and, if the given invoke flag specifies a call, an internal frame
+  // is required. In tail call mode the result must be r0 register.
+  static void GenerateCharFromCode(MacroAssembler* masm,
+                                   Register code,
+                                   Register scratch,
+                                   Register result,
+                                   InvokeFlag flag);
+
   // Generate code for copying characters using a simple loop. This should only
   // be used in places where the number of characters is small and the
   // additional setup and checking in GenerateCopyCharactersLong adds too much
   // overhead. Copying of overlapping regions is not supported.
   // Dest register ends at the position after the last character written.
-  void GenerateCopyCharacters(MacroAssembler* masm,
-                              Register dest,
-                              Register src,
-                              Register count,
-                              Register scratch,
-                              bool ascii);
+  static void GenerateCopyCharacters(MacroAssembler* masm,
+                                     Register dest,
+                                     Register src,
+                                     Register count,
+                                     Register scratch,
+                                     bool ascii);
 
   // Generate code for copying a large number of characters. This function
   // is allowed to spend extra time setting up conditions to make copying
   // faster. Copying of overlapping regions is not supported.
   // Dest register ends at the position after the last character written.
-  void GenerateCopyCharactersLong(MacroAssembler* masm,
-                                  Register dest,
-                                  Register src,
-                                  Register count,
-                                  Register scratch1,
-                                  Register scratch2,
-                                  Register scratch3,
-                                  Register scratch4,
-                                  Register scratch5,
-                                  int flags);
+  static void GenerateCopyCharactersLong(MacroAssembler* masm,
+                                         Register dest,
+                                         Register src,
+                                         Register count,
+                                         Register scratch1,
+                                         Register scratch2,
+                                         Register scratch3,
+                                         Register scratch4,
+                                         Register scratch5,
+                                         int flags);
+
+
+  // Probe the symbol table for a two character string. If the string is
+  // not found by probing a jump to the label not_found is performed. This jump
+  // does not guarantee that the string is not in the symbol table. If the
+  // string is found the code falls through with the string in register r0.
+  // Contents of both c1 and c2 registers are modified. At the exit c1 is
+  // guaranteed to contain halfword with low and high bytes equal to
+  // initial contents of c1 and c2 respectively.
+  static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+                                                   Register c1,
+                                                   Register c2,
+                                                   Register scratch1,
+                                                   Register scratch2,
+                                                   Register scratch3,
+                                                   Register scratch4,
+                                                   Register scratch5,
+                                                   Label* not_found);
+
+  // Generate string hash.
+  static void GenerateHashInit(MacroAssembler* masm,
+                               Register hash,
+                               Register character);
+
+  static void GenerateHashAddCharacter(MacroAssembler* masm,
+                                       Register hash,
+                                       Register character);
+
+  static void GenerateHashGetHash(MacroAssembler* masm,
+                                  Register hash);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
 };
 
 
@@ -568,7 +778,7 @@
 };
 
 
-class StringAddStub: public StringStubBase {
+class StringAddStub: public CodeStub {
  public:
   explicit StringAddStub(StringAddFlags flags) {
     string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
@@ -585,7 +795,7 @@
 };
 
 
-class SubStringStub: public StringStubBase {
+class SubStringStub: public CodeStub {
  public:
   SubStringStub() {}
 
@@ -620,6 +830,117 @@
 };
 
 
+// This stub can convert a signed int32 to a heap number (double).  It does
+// not work for int32s that are in Smi range!  No GC occurs during this stub
+// so you don't have to set up the frame.
+class WriteInt32ToHeapNumberStub : public CodeStub {
+ public:
+  WriteInt32ToHeapNumberStub(Register the_int,
+                             Register the_heap_number,
+                             Register scratch)
+      : the_int_(the_int),
+        the_heap_number_(the_heap_number),
+        scratch_(scratch) { }
+
+ private:
+  Register the_int_;
+  Register the_heap_number_;
+  Register scratch_;
+
+  // Minor key encoding in 16 bits.
+  class IntRegisterBits: public BitField<int, 0, 4> {};
+  class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
+  class ScratchRegisterBits: public BitField<int, 8, 4> {};
+
+  Major MajorKey() { return WriteInt32ToHeapNumber; }
+  int MinorKey() {
+    // Encode the parameters in a unique 16 bit value.
+    return IntRegisterBits::encode(the_int_.code())
+           | HeapNumberRegisterBits::encode(the_heap_number_.code())
+           | ScratchRegisterBits::encode(scratch_.code());
+  }
+
+  void Generate(MacroAssembler* masm);
+
+  const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
+
+#ifdef DEBUG
+  void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
+#endif
+};
+
+
+class NumberToStringStub: public CodeStub {
+ public:
+  NumberToStringStub() { }
+
+  // Generate code to do a lookup in the number string cache. If the number in
+  // the register object is found in the cache the generated code falls through
+  // with the result in the result register. The object and the result register
+  // can be the same. If the number is not found in the cache the code jumps to
+  // the label not_found with only the content of register object unchanged.
+  static void GenerateLookupNumberStringCache(MacroAssembler* masm,
+                                              Register object,
+                                              Register result,
+                                              Register scratch1,
+                                              Register scratch2,
+                                              Register scratch3,
+                                              bool object_is_smi,
+                                              Label* not_found);
+
+ private:
+  Major MajorKey() { return NumberToString; }
+  int MinorKey() { return 0; }
+
+  void Generate(MacroAssembler* masm);
+
+  const char* GetName() { return "NumberToStringStub"; }
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("NumberToStringStub\n");
+  }
+#endif
+};
+
+
+class RecordWriteStub : public CodeStub {
+ public:
+  RecordWriteStub(Register object, Register offset, Register scratch)
+      : object_(object), offset_(offset), scratch_(scratch) { }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  Register object_;
+  Register offset_;
+  Register scratch_;
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("RecordWriteStub (object reg %d), (offset reg %d),"
+           " (scratch reg %d)\n",
+           object_.code(), offset_.code(), scratch_.code());
+  }
+#endif
+
+  // Minor key encoding in 12 bits. 4 bits for each of the three
+  // registers (object, offset and scratch) OOOOAAAASSSS.
+  class ScratchBits: public BitField<uint32_t, 0, 4> {};
+  class OffsetBits: public BitField<uint32_t, 4, 4> {};
+  class ObjectBits: public BitField<uint32_t, 8, 4> {};
+
+  Major MajorKey() { return RecordWrite; }
+
+  int MinorKey() {
+    // Encode the registers.
+    return ObjectBits::encode(object_.code()) |
+           OffsetBits::encode(offset_.code()) |
+           ScratchBits::encode(scratch_.code());
+  }
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_ARM_CODEGEN_ARM_H_
diff --git a/src/arm/constants-arm.cc b/src/arm/constants-arm.cc
index 89ff7c0..2e37120 100644
--- a/src/arm/constants-arm.cc
+++ b/src/arm/constants-arm.cc
@@ -81,9 +81,27 @@
 };
 
 
-const char* VFPRegisters::Name(int reg) {
+const char* VFPRegisters::Name(int reg, bool is_double) {
   ASSERT((0 <= reg) && (reg < kNumVFPRegisters));
-  return names_[reg];
+  return names_[reg + is_double ? kNumVFPSingleRegisters : 0];
+}
+
+
+int VFPRegisters::Number(const char* name, bool* is_double) {
+  for (int i = 0; i < kNumVFPRegisters; i++) {
+    if (strcmp(names_[i], name) == 0) {
+      if (i < kNumVFPSingleRegisters) {
+        *is_double = false;
+        return i;
+      } else {
+        *is_double = true;
+        return i - kNumVFPSingleRegisters;
+      }
+    }
+  }
+
+  // No register with the requested name found.
+  return kNoRegister;
 }
 
 
@@ -104,7 +122,7 @@
     i++;
   }
 
-  // No register with the reguested name found.
+  // No register with the requested name found.
   return kNoRegister;
 }
 
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 8a32c95..5eed13f 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -72,6 +72,11 @@
 # define CAN_USE_THUMB_INSTRUCTIONS 1
 #endif
 
+// Using blx may yield better code, so use it when required or when available
+#if defined(USE_THUMB_INTERWORK) || defined(CAN_USE_ARMV5_INSTRUCTIONS)
+#define USE_BLX 1
+#endif
+
 namespace assembler {
 namespace arm {
 
@@ -79,7 +84,10 @@
 static const int kNumRegisters = 16;
 
 // VFP support.
-static const int kNumVFPRegisters = 48;
+static const int kNumVFPSingleRegisters = 32;
+static const int kNumVFPDoubleRegisters = 16;
+static const int kNumVFPRegisters =
+    kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
 
 // PC is register 15.
 static const int kPCRegister = 15;
@@ -143,24 +151,19 @@
 };
 
 
-// Some special instructions encoded as a TEQ with S=0 (bit 20).
-enum Opcode9Bits {
+// The bits for bit 7-4 for some type 0 miscellaneous instructions.
+enum MiscInstructionsBits74 {
+  // With bits 22-21 01.
   BX   =  1,
   BXJ  =  2,
   BLX  =  3,
-  BKPT =  7
-};
+  BKPT =  7,
 
-
-// Some special instructions encoded as a CMN with S=0 (bit 20).
-enum Opcode11Bits {
+  // With bits 22-21 11.
   CLZ  =  1
 };
 
 
-// S
-
-
 // Shifter types for Data-processing operands as defined in section A5.1.2.
 enum Shift {
   no_shift = -1,
@@ -249,6 +252,14 @@
   inline int RtField() const { return Bits(15, 12); }
   inline int PField() const { return Bit(24); }
   inline int UField() const { return Bit(23); }
+  inline int Opc1Field() const { return (Bit(23) << 2) | Bits(21, 20); }
+  inline int Opc2Field() const { return Bits(19, 16); }
+  inline int Opc3Field() const { return Bits(7, 6); }
+  inline int SzField() const { return Bit(8); }
+  inline int VLField() const { return Bit(20); }
+  inline int VCField() const { return Bit(8); }
+  inline int VAField() const { return Bits(23, 21); }
+  inline int VBField() const { return Bits(6, 5); }
 
   // Fields used in Data processing instructions
   inline Opcode OpcodeField() const {
@@ -294,6 +305,12 @@
   // as well as multiplications).
   inline bool IsSpecialType0() const { return (Bit(7) == 1) && (Bit(4) == 1); }
 
+  // Test for miscellaneous instructions encodings of type 0 instructions.
+  inline bool IsMiscType0() const { return (Bit(24) == 1)
+                                           && (Bit(23) == 0)
+                                           && (Bit(20) == 0)
+                                           && ((Bit(7) == 0)); }
+
   // Special accessors that test for existence of a value.
   inline bool HasS()    const { return SField() == 1; }
   inline bool HasB()    const { return BField() == 1; }
@@ -339,7 +356,12 @@
 class VFPRegisters {
  public:
   // Return the name of the register.
-  static const char* Name(int reg);
+  static const char* Name(int reg, bool is_double);
+
+  // Lookup the register number for the name provided.
+  // Set flag pointed by is_double to true if register
+  // is double-precision.
+  static int Number(const char* name, bool* is_double);
 
  private:
   static const char* names_[kNumVFPRegisters];
diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc
index 55f31d4..d50c203 100644
--- a/src/arm/cpu-arm.cc
+++ b/src/arm/cpu-arm.cc
@@ -26,7 +26,7 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // CPU specific code for arm independent of OS goes here.
-#if defined(__arm__)
+#ifdef __arm__
 #include <sys/syscall.h>  // for cache flushing.
 #endif
 
@@ -35,6 +35,10 @@
 #include "cpu.h"
 #include "macro-assembler.h"
 
+#ifndef __arm__
+#include "simulator-arm.h"  // for cache flushing.
+#endif
+
 namespace v8 {
 namespace internal {
 
@@ -46,9 +50,11 @@
 void CPU::FlushICache(void* start, size_t size) {
 #if !defined (__arm__)
   // Not generating ARM instructions for C-code. This means that we are
-  // building an ARM emulator based target. No I$ flushes are necessary.
+  // building an ARM emulator based target.  We should notify the simulator
+  // that the Icache was flushed.
   // None of this code ends up in the snapshot so there are no issues
   // around whether or not to generate the code when building snapshots.
+  assembler::arm::Simulator::FlushICache(start, size);
 #else
   // Ideally, we would call
   //   syscall(__ARM_NR_cacheflush, start,
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index e6b61b4..d02ba76 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -46,13 +46,23 @@
   //   add sp, sp, #4
   //   bx lr
   // to a call to the debug break return code.
+  // #if USE_BLX
+  //   ldr ip, [pc, #0]
+  //   blx ip
+  // #else
   //   mov lr, pc
   //   ldr pc, [pc, #-4]
+  // #endif
   //   <debug break return code entry point address>
   //   bktp 0
   CodePatcher patcher(rinfo()->pc(), 4);
+#ifdef USE_BLX
+  patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
+  patcher.masm()->blx(v8::internal::ip);
+#else
   patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
   patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
+#endif
   patcher.Emit(Debug::debug_break_return()->entry());
   patcher.masm()->bkpt(0);
 }
@@ -123,9 +133,9 @@
 void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
   // Calling convention for IC load (from ic-arm.cc).
   // ----------- S t a t e -------------
-  //  -- r0    : receiver
   //  -- r2    : name
   //  -- lr    : return address
+  //  -- r0    : receiver
   //  -- [sp]  : receiver
   // -----------------------------------
   // Registers r0 and r2 contain objects that need to be pushed on the
@@ -151,9 +161,10 @@
 void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
   // ---------- S t a t e --------------
   //  -- lr     : return address
+  //  -- r0     : key
   //  -- sp[0]  : key
   //  -- sp[4]  : receiver
-  Generate_DebugBreakCallHelper(masm, 0);
+  Generate_DebugBreakCallHelper(masm, r0.bit());
 }
 
 
@@ -206,8 +217,23 @@
 }
 
 
+void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+  masm->Abort("LiveEdit frame dropping is not supported on arm");
+}
+
+void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+  masm->Abort("LiveEdit frame dropping is not supported on arm");
+}
+
 #undef __
 
+
+void Debug::SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
+                                   Handle<Code> code) {
+  UNREACHABLE();
+}
+const int Debug::kFrameDropperFrameSize = -1;
+
 #endif  // ENABLE_DEBUGGER_SUPPORT
 
 } }  // namespace v8::internal
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 127c160..4ba3094 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -34,10 +34,9 @@
 //   NameConverter converter;
 //   Disassembler d(converter);
 //   for (byte* pc = begin; pc < end;) {
-//     char buffer[128];
-//     buffer[0] = '\0';
+//     v8::internal::EmbeddedVector<char, 256> buffer;
 //     byte* prev_pc = pc;
-//     pc += d.InstructionDecode(buffer, sizeof buffer, pc);
+//     pc += d.InstructionDecode(buffer, pc);
 //     printf("%p    %08x      %s\n",
 //            prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
 //   }
@@ -129,6 +128,10 @@
   void DecodeTypeVFP(Instr* instr);
   void DecodeType6CoprocessorIns(Instr* instr);
 
+  void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr);
+  void DecodeVCMP(Instr* instr);
+  void DecodeVCVTBetweenDoubleAndSingle(Instr* instr);
+  void DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr);
 
   const disasm::NameConverter& converter_;
   v8::internal::Vector<char> out_buffer_;
@@ -181,12 +184,12 @@
 
 // Print the VFP S register name according to the active name converter.
 void Decoder::PrintSRegister(int reg) {
-  Print(assembler::arm::VFPRegisters::Name(reg));
+  Print(assembler::arm::VFPRegisters::Name(reg, false));
 }
 
 // Print the  VFP D register name according to the active name converter.
 void Decoder::PrintDRegister(int reg) {
-  Print(assembler::arm::VFPRegisters::Name(reg + 32));
+  Print(assembler::arm::VFPRegisters::Name(reg, true));
 }
 
 
@@ -445,6 +448,14 @@
         out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
                                             "%d", instr->ShiftAmountField());
         return 8;
+      } else if (format[3] == '0') {
+        // 'off0to3and8to19 16-bit immediate encoded in bits 19-8 and 3-0.
+        ASSERT(STRING_STARTS_WITH(format, "off0to3and8to19"));
+        out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+                                            "%d",
+                                            (instr->Bits(19, 8) << 4) +
+                                                instr->Bits(3, 0));
+        return 15;
       }
       // 'off8: 8-bit offset for extra load and store instructions
       ASSERT(STRING_STARTS_WITH(format, "off8"));
@@ -646,6 +657,34 @@
       }
       return;
     }
+  } else if ((type == 0) && instr->IsMiscType0()) {
+    if (instr->Bits(22, 21) == 1) {
+      switch (instr->Bits(7, 4)) {
+        case BX:
+          Format(instr, "bx'cond 'rm");
+          break;
+        case BLX:
+          Format(instr, "blx'cond 'rm");
+          break;
+        case BKPT:
+          Format(instr, "bkpt 'off0to3and8to19");
+          break;
+        default:
+          Unknown(instr);  // not used by V8
+          break;
+      }
+    } else if (instr->Bits(22, 21) == 3) {
+      switch (instr->Bits(7, 4)) {
+        case CLZ:
+          Format(instr, "clz'cond 'rd, 'rm");
+          break;
+        default:
+          Unknown(instr);  // not used by V8
+          break;
+      }
+    } else {
+      Unknown(instr);  // not used by V8
+    }
   } else {
     switch (instr->OpcodeField()) {
       case AND: {
@@ -692,17 +731,9 @@
         if (instr->HasS()) {
           Format(instr, "teq'cond 'rn, 'shift_op");
         } else {
-          switch (instr->Bits(7, 4)) {
-            case BX:
-              Format(instr, "bx'cond 'rm");
-              break;
-            case BLX:
-              Format(instr, "blx'cond 'rm");
-              break;
-            default:
-              Unknown(instr);  // not used by V8
-              break;
-          }
+          // Other instructions matching this pattern are handled in the
+          // miscellaneous instructions part above.
+          UNREACHABLE();
         }
         break;
       }
@@ -718,14 +749,9 @@
         if (instr->HasS()) {
           Format(instr, "cmn'cond 'rn, 'shift_op");
         } else {
-          switch (instr->Bits(7, 4)) {
-            case CLZ:
-              Format(instr, "clz'cond 'rd, 'rm");
-              break;
-            default:
-              Unknown(instr);  // not used by V8
-              break;
-          }
+          // Other instructions matching this pattern are handled in the
+          // miscellaneous instructions part above.
+          UNREACHABLE();
         }
         break;
       }
@@ -930,85 +956,61 @@
 // VMRS
 void Decoder::DecodeTypeVFP(Instr* instr) {
   ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
+  ASSERT(instr->Bits(11, 9) == 0x5);
 
-  if (instr->Bit(23) == 1) {
-    if ((instr->Bits(21, 19) == 0x7) &&
-        (instr->Bits(18, 16) == 0x5) &&
-        (instr->Bits(11, 9) == 0x5) &&
-        (instr->Bit(8) == 1) &&
-        (instr->Bit(6) == 1) &&
-        (instr->Bit(4) == 0)) {
-      Format(instr, "vcvt.s32.f64'cond 'Sd, 'Dm");
-    } else if ((instr->Bits(21, 19) == 0x7) &&
-               (instr->Bits(18, 16) == 0x0) &&
-               (instr->Bits(11, 9) == 0x5) &&
-               (instr->Bit(8) == 1) &&
-               (instr->Bit(7) == 1) &&
-               (instr->Bit(6) == 1) &&
-               (instr->Bit(4) == 0)) {
-      Format(instr, "vcvt.f64.s32'cond 'Dd, 'Sm");
-    } else if ((instr->Bit(21) == 0x0) &&
-               (instr->Bit(20) == 0x0) &&
-               (instr->Bits(11, 9) == 0x5) &&
-               (instr->Bit(8) == 1) &&
-               (instr->Bit(6) == 0) &&
-               (instr->Bit(4) == 0)) {
+  if (instr->Bit(4) == 0) {
+    if (instr->Opc1Field() == 0x7) {
+      // Other data processing instructions
+      if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
+        DecodeVCVTBetweenDoubleAndSingle(instr);
+      } else if ((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) {
+        DecodeVCVTBetweenFloatingPointAndInteger(instr);
+      } else if (((instr->Opc2Field() >> 1) == 0x6) &&
+                 (instr->Opc3Field() & 0x1)) {
+        DecodeVCVTBetweenFloatingPointAndInteger(instr);
+      } else if (((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
+                 (instr->Opc3Field() & 0x1)) {
+        DecodeVCMP(instr);
+      } else {
+        Unknown(instr);  // Not used by V8.
+      }
+    } else if (instr->Opc1Field() == 0x3) {
+      if (instr->SzField() == 0x1) {
+        if (instr->Opc3Field() & 0x1) {
+          Format(instr, "vsub.f64'cond 'Dd, 'Dn, 'Dm");
+        } else {
+          Format(instr, "vadd.f64'cond 'Dd, 'Dn, 'Dm");
+        }
+      } else {
+        Unknown(instr);  // Not used by V8.
+      }
+    } else if ((instr->Opc1Field() == 0x2) && !(instr->Opc3Field() & 0x1)) {
+      if (instr->SzField() == 0x1) {
+        Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm");
+      } else {
+        Unknown(instr);  // Not used by V8.
+      }
+    } else if ((instr->Opc1Field() == 0x4) && !(instr->Opc3Field() & 0x1)) {
+      if (instr->SzField() == 0x1) {
         Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm");
-    } else if ((instr->Bits(21, 20) == 0x3) &&
-               (instr->Bits(19, 16) == 0x4) &&
-               (instr->Bits(11, 9) == 0x5) &&
-               (instr->Bit(8) == 0x1) &&
-               (instr->Bit(6) == 0x1) &&
-               (instr->Bit(4) == 0x0)) {
-      Format(instr, "vcmp.f64'cond 'Dd, 'Dm");
-    } else if ((instr->Bits(23, 20) == 0xF) &&
-               (instr->Bits(19, 16) == 0x1) &&
-               (instr->Bits(11, 8) == 0xA) &&
-               (instr->Bits(7, 5) == 0x0) &&
-               (instr->Bit(4) == 0x1)    &&
-               (instr->Bits(3, 0) == 0x0)) {
-        if (instr->Bits(15, 12) == 0xF)
-          Format(instr, "vmrs'cond APSR, FPSCR");
-        else
-          Unknown(instr);  // Not used by V8.
-    } else {
-      Unknown(instr);  // Not used by V8.
-    }
-  } else if (instr->Bit(21) == 1) {
-    if ((instr->Bit(20) == 0x1) &&
-        (instr->Bits(11, 9) == 0x5) &&
-        (instr->Bit(8) == 0x1) &&
-        (instr->Bit(6) == 0) &&
-        (instr->Bit(4) == 0)) {
-      Format(instr, "vadd.f64'cond 'Dd, 'Dn, 'Dm");
-    } else if ((instr->Bit(20) == 0x1) &&
-               (instr->Bits(11, 9) == 0x5) &&
-               (instr->Bit(8) == 0x1) &&
-               (instr->Bit(6) == 1) &&
-               (instr->Bit(4) == 0)) {
-      Format(instr, "vsub.f64'cond 'Dd, 'Dn, 'Dm");
-    } else if ((instr->Bit(20) == 0x0) &&
-               (instr->Bits(11, 9) == 0x5) &&
-               (instr->Bit(8) == 0x1) &&
-               (instr->Bit(6) == 0) &&
-               (instr->Bit(4) == 0)) {
-      Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm");
+      } else {
+        Unknown(instr);  // Not used by V8.
+      }
     } else {
       Unknown(instr);  // Not used by V8.
     }
   } else {
-    if ((instr->Bit(20) == 0x0) &&
-        (instr->Bits(11, 8) == 0xA) &&
-        (instr->Bits(6, 5) == 0x0) &&
-        (instr->Bit(4) == 1) &&
-        (instr->Bits(3, 0) == 0x0)) {
-      Format(instr, "vmov'cond 'Sn, 'rt");
-    } else if ((instr->Bit(20) == 0x1) &&
-               (instr->Bits(11, 8) == 0xA) &&
-               (instr->Bits(6, 5) == 0x0) &&
-               (instr->Bit(4) == 1) &&
-               (instr->Bits(3, 0) == 0x0)) {
-      Format(instr, "vmov'cond 'rt, 'Sn");
+    if ((instr->VCField() == 0x0) &&
+        (instr->VAField() == 0x0)) {
+      DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
+    } else if ((instr->VLField() == 0x1) &&
+               (instr->VCField() == 0x0) &&
+               (instr->VAField() == 0x7) &&
+               (instr->Bits(19, 16) == 0x1)) {
+      if (instr->Bits(15, 12) == 0xF)
+        Format(instr, "vmrs'cond APSR, FPSCR");
+      else
+        Unknown(instr);  // Not used by V8.
     } else {
       Unknown(instr);  // Not used by V8.
     }
@@ -1016,6 +1018,94 @@
 }
 
 
+void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr) {
+  ASSERT((instr->Bit(4) == 1) && (instr->VCField() == 0x0) &&
+         (instr->VAField() == 0x0));
+
+  bool to_arm_register = (instr->VLField() == 0x1);
+
+  if (to_arm_register) {
+    Format(instr, "vmov'cond 'rt, 'Sn");
+  } else {
+    Format(instr, "vmov'cond 'Sn, 'rt");
+  }
+}
+
+
+void Decoder::DecodeVCMP(Instr* instr) {
+  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
+  ASSERT(((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
+         (instr->Opc3Field() & 0x1));
+
+  // Comparison.
+  bool dp_operation = (instr->SzField() == 1);
+  bool raise_exception_for_qnan = (instr->Bit(7) == 0x1);
+
+  if (dp_operation && !raise_exception_for_qnan) {
+    Format(instr, "vcmp.f64'cond 'Dd, 'Dm");
+  } else {
+    Unknown(instr);  // Not used by V8.
+  }
+}
+
+
+void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instr* instr) {
+  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
+  ASSERT((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3));
+
+  bool double_to_single = (instr->SzField() == 1);
+
+  if (double_to_single) {
+    Format(instr, "vcvt.f32.f64'cond 'Sd, 'Dm");
+  } else {
+    Format(instr, "vcvt.f64.f32'cond 'Dd, 'Sm");
+  }
+}
+
+
+void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
+  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
+  ASSERT(((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) ||
+         (((instr->Opc2Field() >> 1) == 0x6) && (instr->Opc3Field() & 0x1)));
+
+  bool to_integer = (instr->Bit(18) == 1);
+  bool dp_operation = (instr->SzField() == 1);
+  if (to_integer) {
+    bool unsigned_integer = (instr->Bit(16) == 0);
+
+    if (dp_operation) {
+      if (unsigned_integer) {
+        Format(instr, "vcvt.u32.f64'cond 'Sd, 'Dm");
+      } else {
+        Format(instr, "vcvt.s32.f64'cond 'Sd, 'Dm");
+      }
+    } else {
+      if (unsigned_integer) {
+        Format(instr, "vcvt.u32.f32'cond 'Sd, 'Sm");
+      } else {
+        Format(instr, "vcvt.s32.f32'cond 'Sd, 'Sm");
+      }
+    }
+  } else {
+    bool unsigned_integer = (instr->Bit(7) == 0);
+
+    if (dp_operation) {
+      if (unsigned_integer) {
+        Format(instr, "vcvt.f64.u32'cond 'Dd, 'Sm");
+      } else {
+        Format(instr, "vcvt.f64.s32'cond 'Dd, 'Sm");
+      }
+    } else {
+      if (unsigned_integer) {
+        Format(instr, "vcvt.f32.u32'cond 'Sd, 'Sm");
+      } else {
+        Format(instr, "vcvt.f32.s32'cond 'Sd, 'Sm");
+      }
+    }
+  }
+}
+
+
 // Decode Type 6 coprocessor instructions.
 // Dm = vmov(Rt, Rt2)
 // <Rt, Rt2> = vmov(Dm)
@@ -1024,9 +1114,27 @@
 void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
   ASSERT((instr->TypeField() == 6));
 
-  if (instr->CoprocessorField() != 0xB) {
-    Unknown(instr);  // Not used by V8.
-  } else {
+  if (instr->CoprocessorField() == 0xA) {
+    switch (instr->OpcodeField()) {
+      case 0x8:
+        if (instr->HasL()) {
+          Format(instr, "vldr'cond 'Sd, ['rn - 4*'off8]");
+        } else {
+          Format(instr, "vstr'cond 'Sd, ['rn - 4*'off8]");
+        }
+        break;
+      case 0xC:
+        if (instr->HasL()) {
+          Format(instr, "vldr'cond 'Sd, ['rn + 4*'off8]");
+        } else {
+          Format(instr, "vstr'cond 'Sd, ['rn + 4*'off8]");
+        }
+        break;
+      default:
+        Unknown(instr);  // Not used by V8.
+        break;
+    }
+  } else if (instr->CoprocessorField() == 0xB) {
     switch (instr->OpcodeField()) {
       case 0x2:
         // Load and store double to two GP registers
@@ -1056,6 +1164,8 @@
         Unknown(instr);  // Not used by V8.
         break;
     }
+  } else {
+    UNIMPLEMENTED();  // Not used by V8.
   }
 }
 
diff --git a/src/arm/fast-codegen-arm.cc b/src/arm/fast-codegen-arm.cc
index aa7128f..5dedc29 100644
--- a/src/arm/fast-codegen-arm.cc
+++ b/src/arm/fast-codegen-arm.cc
@@ -29,6 +29,7 @@
 
 #include "codegen-inl.h"
 #include "fast-codegen.h"
+#include "scopes.h"
 
 namespace v8 {
 namespace internal {
@@ -39,6 +40,7 @@
 Register FastCodeGenerator::accumulator1() { return r1; }
 Register FastCodeGenerator::scratch0() { return r3; }
 Register FastCodeGenerator::scratch1() { return r4; }
+Register FastCodeGenerator::scratch2() { return r5; }
 Register FastCodeGenerator::receiver_reg() { return r2; }
 Register FastCodeGenerator::context_reg() { return cp; }
 
@@ -99,7 +101,7 @@
 
   if (needs_write_barrier) {
     __ mov(scratch1(), Operand(offset));
-    __ RecordWrite(scratch0(), scratch1(), ip);
+    __ RecordWrite(scratch0(), scratch1(), scratch2());
   }
 
   if (destination().is(accumulator1())) {
@@ -179,6 +181,7 @@
 void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
   ASSERT(info_ == NULL);
   info_ = compilation_info;
+  Comment cmnt(masm_, "[ function compiled by fast code generator");
 
   // Save the caller's frame pointer and set up our own.
   Comment prologue_cmnt(masm(), ";; Prologue");
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 670de09..e9bdfe5 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -32,6 +32,7 @@
 #include "debug.h"
 #include "full-codegen.h"
 #include "parser.h"
+#include "scopes.h"
 
 namespace v8 {
 namespace internal {
@@ -56,6 +57,7 @@
   ASSERT(info_ == NULL);
   info_ = info;
   SetFunctionPosition(function());
+  Comment cmnt(masm_, "[ function compiled by full code generator");
 
   if (mode == PRIMARY) {
     int locals_count = scope()->num_stack_slots();
@@ -123,7 +125,7 @@
       __ add(r2, fp,
              Operand(StandardFrameConstants::kCallerSPOffset + offset));
       __ mov(r1, Operand(Smi::FromInt(scope()->num_parameters())));
-      __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
+      __ Push(r3, r2, r1);
 
       // Arguments to ArgumentsAccessStub:
       //   function, receiver address, parameter count.
@@ -192,36 +194,34 @@
       __ CallRuntime(Runtime::kTraceExit, 1);
     }
 
+#ifdef DEBUG
     // Add a label for checking the size of the code used for returning.
     Label check_exit_codesize;
     masm_->bind(&check_exit_codesize);
-
-    // Calculate the exact length of the return sequence and make sure that
-    // the constant pool is not emitted inside of the return sequence.
-    int num_parameters = scope()->num_parameters();
-    int32_t sp_delta = (num_parameters + 1) * kPointerSize;
-    int return_sequence_length = Assembler::kJSReturnSequenceLength;
-    if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
-      // Additional mov instruction generated.
-      return_sequence_length++;
+#endif
+    // Make sure that the constant pool is not emitted inside of the return
+    // sequence.
+    { Assembler::BlockConstPoolScope block_const_pool(masm_);
+      // Here we use masm_-> instead of the __ macro to avoid the code coverage
+      // tool from instrumenting as we rely on the code size here.
+      int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
+      CodeGenerator::RecordPositions(masm_, position);
+      __ RecordJSReturn();
+      masm_->mov(sp, fp);
+      masm_->ldm(ia_w, sp, fp.bit() | lr.bit());
+      masm_->add(sp, sp, Operand(sp_delta));
+      masm_->Jump(lr);
     }
-    masm_->BlockConstPoolFor(return_sequence_length);
 
-    CodeGenerator::RecordPositions(masm_, position);
-    __ RecordJSReturn();
-    __ mov(sp, fp);
-    __ ldm(ia_w, sp, fp.bit() | lr.bit());
-    __ add(sp, sp, Operand(sp_delta));
-    __ Jump(lr);
-
+#ifdef DEBUG
     // Check that the size of the code used for returning matches what is
-    // expected by the debugger. The add instruction above is an addressing
-    // mode 1 instruction where there are restrictions on which immediate values
-    // can be encoded in the instruction and which immediate values requires
-    // use of an additional instruction for moving the immediate to a temporary
-    // register.
-    ASSERT_EQ(return_sequence_length,
-              masm_->InstructionsGeneratedSince(&check_exit_codesize));
+    // expected by the debugger. If the sp_delts above cannot be encoded in the
+    // add instruction the add will generate two instructions.
+    int return_sequence_length =
+        masm_->InstructionsGeneratedSince(&check_exit_codesize);
+    CHECK(return_sequence_length == Assembler::kJSReturnSequenceLength ||
+          return_sequence_length == Assembler::kJSReturnSequenceLength + 1);
+#endif
   }
 }
 
@@ -664,15 +664,14 @@
 void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
   Comment cmnt(masm_, "[ FunctionLiteral");
 
-  // Build the function boilerplate and instantiate it.
-  Handle<JSFunction> boilerplate =
-      Compiler::BuildBoilerplate(expr, script(), this);
+  // Build the shared function info and instantiate the function based
+  // on it.
+  Handle<SharedFunctionInfo> function_info =
+      Compiler::BuildFunctionInfo(expr, script(), this);
   if (HasStackOverflow()) return;
 
-  ASSERT(boilerplate->IsBoilerplate());
-
   // Create a new closure.
-  __ mov(r0, Operand(boilerplate));
+  __ mov(r0, Operand(function_info));
   __ stm(db_w, sp, cp.bit() | r0.bit());
   __ CallRuntime(Runtime::kNewClosure, 2);
   Apply(context_, r0);
@@ -697,8 +696,8 @@
     Comment cmnt(masm_, "Global variable");
     // Use inline caching. Variable name is passed in r2 and the global
     // object on the stack.
-    __ ldr(ip, CodeGenerator::GlobalObject());
-    __ push(ip);
+    __ ldr(r0, CodeGenerator::GlobalObject());
+    __ push(r0);
     __ mov(r2, Operand(var->name()));
     Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
     __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
@@ -729,7 +728,7 @@
     ASSERT_NOT_NULL(object_slot);
 
     // Load the object.
-    Move(r2, object_slot);
+    Move(r1, object_slot);
 
     // Assert that the key is a smi.
     Literal* key_literal = property->key()->AsLiteral();
@@ -737,12 +736,12 @@
     ASSERT(key_literal->handle()->IsSmi());
 
     // Load the key.
-    __ mov(r1, Operand(key_literal->handle()));
+    __ mov(r0, Operand(key_literal->handle()));
 
     // Push both as arguments to ic.
-    __ stm(db_w, sp, r2.bit() | r1.bit());
+    __ Push(r1, r0);
 
-    // Do a keyed property load.
+    // Call keyed load IC. It has all arguments on the stack and the key in r0.
     Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
     __ Call(ic, RelocInfo::CODE_TARGET);
 
@@ -772,7 +771,7 @@
   __ mov(r3, Operand(Smi::FromInt(expr->literal_index())));
   __ mov(r2, Operand(expr->pattern()));
   __ mov(r1, Operand(expr->flags()));
-  __ stm(db_w, sp, r4.bit() | r3.bit() | r2.bit() | r1.bit());
+  __ Push(r4, r3, r2, r1);
   __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
   __ bind(&done);
   Apply(context_, r0);
@@ -781,15 +780,16 @@
 
 void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
-  __ ldr(r2, MemOperand(fp,  JavaScriptFrameConstants::kFunctionOffset));
-  __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
-  __ mov(r1, Operand(Smi::FromInt(expr->literal_index())));
-  __ mov(r0, Operand(expr->constant_properties()));
-  __ stm(db_w, sp, r2.bit() | r1.bit() | r0.bit());
+  __ ldr(r3, MemOperand(fp,  JavaScriptFrameConstants::kFunctionOffset));
+  __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
+  __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
+  __ mov(r1, Operand(expr->constant_properties()));
+  __ mov(r0, Operand(Smi::FromInt(expr->fast_elements() ? 1 : 0)));
+  __ Push(r3, r2, r1, r0);
   if (expr->depth() > 1) {
-    __ CallRuntime(Runtime::kCreateObjectLiteral, 3);
+    __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
   } else {
-    __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
+    __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
   }
 
   // If result_saved is true the result is on top of the stack.  If
@@ -860,7 +860,7 @@
   __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
   __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
   __ mov(r1, Operand(expr->constant_elements()));
-  __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
+  __ Push(r3, r2, r1);
   if (expr->depth() > 1) {
     __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
   } else {
@@ -997,6 +997,7 @@
   SetSourcePosition(prop->position());
   Literal* key = prop->key()->AsLiteral();
   __ mov(r2, Operand(key->handle()));
+  __ ldr(r0, MemOperand(sp, 0));
   Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
   __ Call(ic, RelocInfo::CODE_TARGET);
 }
@@ -1004,6 +1005,8 @@
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
+  // Call keyed load IC. It has all arguments on the stack and the key in r0.
+  __ ldr(r0, MemOperand(sp, 0));
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
   __ Call(ic, RelocInfo::CODE_TARGET);
 }
@@ -1012,7 +1015,7 @@
 void FullCodeGenerator::EmitBinaryOp(Token::Value op,
                                      Expression::Context context) {
   __ pop(r1);
-  GenericBinaryOpStub stub(op, NO_OVERWRITE);
+  GenericBinaryOpStub stub(op, NO_OVERWRITE, r1, r0);
   __ CallStub(&stub);
   Apply(context, r0);
 }
@@ -1246,6 +1249,9 @@
       VisitForValue(prop->key(), kStack);
       // Record source code position for IC call.
       SetSourcePosition(prop->position());
+      // Call keyed load IC. It has all arguments on the stack and the key in
+      // r0.
+      __ ldr(r0, MemOperand(sp, 0));
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
       __ Call(ic, RelocInfo::CODE_TARGET);
       // Load receiver object into r1.
@@ -1605,7 +1611,7 @@
     __ sub(r0, r0, Operand(Smi::FromInt(count_value)));
   }
   __ mov(r1, Operand(Smi::FromInt(count_value)));
-  GenericBinaryOpStub stub(Token::ADD, NO_OVERWRITE);
+  GenericBinaryOpStub stub(Token::ADD, NO_OVERWRITE, r1, r0);
   __ CallStub(&stub);
   __ bind(&done);
 
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 2a1fef9..5b1915f 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -27,7 +27,9 @@
 
 #include "v8.h"
 
+#include "assembler-arm.h"
 #include "codegen-inl.h"
+#include "disasm.h"
 #include "ic-inl.h"
 #include "runtime.h"
 #include "stub-cache.h"
@@ -42,7 +44,6 @@
 
 #define __ ACCESS_MASM(masm)
 
-
 // Helper function used from LoadIC/CallIC GenerateNormal.
 static void GenerateDictionaryLoad(MacroAssembler* masm,
                                    Label* miss,
@@ -60,17 +61,18 @@
   //      dictionary.
   //
   // r2 - holds the name of the property and is unchanged.
+  // r4 - used as temporary.
 
   Label done;
 
   // Check for the absence of an interceptor.
   // Load the map into t0.
   __ ldr(t0, FieldMemOperand(t1, JSObject::kMapOffset));
-  // Test the has_named_interceptor bit in the map.
-  __ ldr(r3, FieldMemOperand(t0, Map::kInstanceAttributesOffset));
-  __ tst(r3, Operand(1 << (Map::kHasNamedInterceptor + (3 * 8))));
-  // Jump to miss if the interceptor bit is set.
-  __ b(ne, miss);
+
+  // Bail out if the receiver has a named interceptor.
+  __ ldrb(r3, FieldMemOperand(t0, Map::kBitFieldOffset));
+  __ tst(r3, Operand(1 << Map::kHasNamedInterceptor));
+  __ b(nz, miss);
 
   // Bail out if we have a JS global proxy object.
   __ ldrb(r3, FieldMemOperand(t0, Map::kInstanceTypeOffset));
@@ -107,25 +109,25 @@
   static const int kProbes = 4;
   for (int i = 0; i < kProbes; i++) {
     // Compute the masked index: (hash + i + i * i) & mask.
-    __ ldr(t1, FieldMemOperand(r2, String::kHashFieldOffset));
+    __ ldr(r4, FieldMemOperand(r2, String::kHashFieldOffset));
     if (i > 0) {
       // Add the probe offset (i + i * i) left shifted to avoid right shifting
       // the hash in a separate instruction. The value hash + i + i * i is right
       // shifted in the following and instruction.
       ASSERT(StringDictionary::GetProbeOffset(i) <
              1 << (32 - String::kHashFieldOffset));
-      __ add(t1, t1, Operand(
+      __ add(r4, r4, Operand(
           StringDictionary::GetProbeOffset(i) << String::kHashShift));
     }
-    __ and_(t1, r3, Operand(t1, LSR, String::kHashShift));
+    __ and_(r4, r3, Operand(r4, LSR, String::kHashShift));
 
     // Scale the index by multiplying by the element size.
     ASSERT(StringDictionary::kEntrySize == 3);
-    __ add(t1, t1, Operand(t1, LSL, 1));  // t1 = t1 * 3
+    __ add(r4, r4, Operand(r4, LSL, 1));  // r4 = r4 * 3
 
     // Check if the key is identical to the name.
-    __ add(t1, t0, Operand(t1, LSL, 2));
-    __ ldr(ip, FieldMemOperand(t1, kElementsStartOffset));
+    __ add(r4, t0, Operand(r4, LSL, 2));
+    __ ldr(ip, FieldMemOperand(r4, kElementsStartOffset));
     __ cmp(r2, Operand(ip));
     if (i != kProbes - 1) {
       __ b(eq, &done);
@@ -135,13 +137,102 @@
   }
 
   // Check that the value is a normal property.
-  __ bind(&done);  // t1 == t0 + 4*index
-  __ ldr(r3, FieldMemOperand(t1, kElementsStartOffset + 2 * kPointerSize));
+  __ bind(&done);  // r4 == t0 + 4*index
+  __ ldr(r3, FieldMemOperand(r4, kElementsStartOffset + 2 * kPointerSize));
   __ tst(r3, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
   __ b(ne, miss);
 
   // Get the value at the masked, scaled index and return.
-  __ ldr(t1, FieldMemOperand(t1, kElementsStartOffset + 1 * kPointerSize));
+  __ ldr(t1, FieldMemOperand(r4, kElementsStartOffset + 1 * kPointerSize));
+}
+
+
+static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
+                                         Label* miss,
+                                         Register elements,
+                                         Register key,
+                                         Register t0,
+                                         Register t1,
+                                         Register t2) {
+  // Register use:
+  //
+  // elements - holds the slow-case elements of the receiver and is unchanged.
+  //
+  // key      - holds the smi key on entry and is unchanged if a branch is
+  //            performed to the miss label.
+  //
+  // Scratch registers:
+  //
+  // t0 - holds the untagged key on entry and holds the hash once computed.
+  //      Holds the result on exit if the load succeeded.
+  //
+  // t1 - used to hold the capacity mask of the dictionary
+  //
+  // t2 - used for the index into the dictionary.
+  Label done;
+
+  // Compute the hash code from the untagged key.  This must be kept in sync
+  // with ComputeIntegerHash in utils.h.
+  //
+  // hash = ~hash + (hash << 15);
+  __ mvn(t1, Operand(t0));
+  __ add(t0, t1, Operand(t0, LSL, 15));
+  // hash = hash ^ (hash >> 12);
+  __ eor(t0, t0, Operand(t0, LSR, 12));
+  // hash = hash + (hash << 2);
+  __ add(t0, t0, Operand(t0, LSL, 2));
+  // hash = hash ^ (hash >> 4);
+  __ eor(t0, t0, Operand(t0, LSR, 4));
+  // hash = hash * 2057;
+  __ mov(t1, Operand(2057));
+  __ mul(t0, t0, t1);
+  // hash = hash ^ (hash >> 16);
+  __ eor(t0, t0, Operand(t0, LSR, 16));
+
+  // Compute the capacity mask.
+  __ ldr(t1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
+  __ mov(t1, Operand(t1, ASR, kSmiTagSize));  // convert smi to int
+  __ sub(t1, t1, Operand(1));
+
+  // Generate an unrolled loop that performs a few probes before giving up.
+  static const int kProbes = 4;
+  for (int i = 0; i < kProbes; i++) {
+    // Use t2 for index calculations and keep the hash intact in t0.
+    __ mov(t2, t0);
+    // Compute the masked index: (hash + i + i * i) & mask.
+    if (i > 0) {
+      __ add(t2, t2, Operand(NumberDictionary::GetProbeOffset(i)));
+    }
+    __ and_(t2, t2, Operand(t1));
+
+    // Scale the index by multiplying by the element size.
+    ASSERT(NumberDictionary::kEntrySize == 3);
+    __ add(t2, t2, Operand(t2, LSL, 1));  // t2 = t2 * 3
+
+    // Check if the key is identical to the name.
+    __ add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
+    __ ldr(ip, FieldMemOperand(t2, NumberDictionary::kElementsStartOffset));
+    __ cmp(key, Operand(ip));
+    if (i != kProbes - 1) {
+      __ b(eq, &done);
+    } else {
+      __ b(ne, miss);
+    }
+  }
+
+  __ bind(&done);
+  // Check that the value is a normal property.
+  // t2: elements + (index * kPointerSize)
+  const int kDetailsOffset =
+      NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+  __ ldr(t1, FieldMemOperand(t2, kDetailsOffset));
+  __ tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
+  __ b(ne, miss);
+
+  // Get the value at the masked, scaled index and return.
+  const int kValueOffset =
+      NumberDictionary::kElementsStartOffset + kPointerSize;
+  __ ldr(t0, FieldMemOperand(t2, kValueOffset));
 }
 
 
@@ -149,12 +240,11 @@
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
-  //  -- [sp]  : receiver
+  //  -- r0    : receiver
+  //  -- sp[0] : receiver
   // -----------------------------------
   Label miss;
 
-  __ ldr(r0, MemOperand(sp, 0));
-
   StubCompiler::GenerateLoadArrayLength(masm, r0, r3, &miss);
   __ bind(&miss);
   StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@@ -165,12 +255,11 @@
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
-  //  -- [sp]  : receiver
+  //  -- r0    : receiver
+  //  -- sp[0] : receiver
   // -----------------------------------
   Label miss;
 
-  __ ldr(r0, MemOperand(sp, 0));
-
   StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss);
   // Cache miss: Jump to runtime.
   __ bind(&miss);
@@ -182,13 +271,11 @@
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
-  //  -- [sp]  : receiver
+  //  -- r0    : receiver
+  //  -- sp[0] : receiver
   // -----------------------------------
   Label miss;
 
-  // Load receiver.
-  __ ldr(r0, MemOperand(sp, 0));
-
   StubCompiler::GenerateLoadFunctionPrototype(masm, r0, r1, r3, &miss);
   __ bind(&miss);
   StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@@ -261,7 +348,8 @@
 static void GenerateNormalHelper(MacroAssembler* masm,
                                  int argc,
                                  bool is_global_object,
-                                 Label* miss) {
+                                 Label* miss,
+                                 Register scratch) {
   // Search dictionary - put result in register r1.
   GenerateDictionaryLoad(masm, miss, r0, r1);
 
@@ -270,7 +358,7 @@
   __ b(eq, miss);
 
   // Check that the value is a JSFunction.
-  __ CompareObjectType(r1, r0, r0, JS_FUNCTION_TYPE);
+  __ CompareObjectType(r1, scratch, scratch, JS_FUNCTION_TYPE);
   __ b(ne, miss);
 
   // Patch the receiver with the global proxy if necessary.
@@ -319,7 +407,7 @@
   __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
   __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
   __ b(ne, &miss);
-  GenerateNormalHelper(masm, argc, true, &miss);
+  GenerateNormalHelper(masm, argc, true, &miss, r4);
 
   // Accessing non-global object: Check for access to global proxy.
   Label global_proxy, invoke;
@@ -332,7 +420,7 @@
   __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
   __ b(ne, &miss);
   __ bind(&invoke);
-  GenerateNormalHelper(masm, argc, false, &miss);
+  GenerateNormalHelper(masm, argc, false, &miss, r4);
 
   // Global object access: Check access rights.
   __ bind(&global_proxy);
@@ -357,7 +445,7 @@
   __ EnterInternalFrame();
 
   // Push the receiver and the name of the function.
-  __ stm(db_w, sp, r2.bit() | r3.bit());
+  __ Push(r3, r2);
 
   // Call the entry.
   __ mov(r0, Operand(2));
@@ -399,10 +487,10 @@
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
-  //  -- [sp]  : receiver
+  //  -- r0    : receiver
+  //  -- sp[0] : receiver
   // -----------------------------------
 
-  __ ldr(r0, MemOperand(sp, 0));
   // Probe the stub cache.
   Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
                                          NOT_IN_LOOP,
@@ -418,11 +506,11 @@
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
-  //  -- [sp]  : receiver
+  //  -- r0    : receiver
+  //  -- sp[0] : receiver
   // -----------------------------------
   Label miss, probe, global;
 
-  __ ldr(r0, MemOperand(sp, 0));
   // Check that the receiver isn't a smi.
   __ tst(r0, Operand(kSmiTagMask));
   __ b(eq, &miss);
@@ -461,34 +549,130 @@
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
-  //  -- [sp]  : receiver
+  //  -- r0    : receiver
+  //  -- sp[0] : receiver
   // -----------------------------------
 
-  __ ldr(r3, MemOperand(sp, 0));
-  __ stm(db_w, sp, r2.bit() | r3.bit());
+  __ mov(r3, r0);
+  __ Push(r3, r2);
 
   // Perform tail call to the entry.
-  __ TailCallRuntime(ExternalReference(IC_Utility(kLoadIC_Miss)), 2, 1);
+  ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss));
+  __ TailCallExternalReference(ref, 2, 1);
 }
 
 
-// TODO(181): Implement map patching once loop nesting is tracked on the
-// ARM platform so we can generate inlined fast-case code loads in
-// loops.
-void LoadIC::ClearInlinedVersion(Address address) {}
+static inline bool IsInlinedICSite(Address address,
+                                   Address* inline_end_address) {
+  // If the instruction after the call site is not the pseudo instruction nop1
+  // then this is not related to an inlined in-object property load. The nop1
+  // instruction is located just after the call to the IC in the deferred code
+  // handling the miss in the inlined code. After the nop1 instruction there is
+  // a branch instruction for jumping back from the deferred code.
+  Address address_after_call = address + Assembler::kCallTargetAddressOffset;
+  Instr instr_after_call = Assembler::instr_at(address_after_call);
+  if (!Assembler::IsNop(instr_after_call, PROPERTY_ACCESS_INLINED)) {
+    return false;
+  }
+  Address address_after_nop = address_after_call + Assembler::kInstrSize;
+  Instr instr_after_nop = Assembler::instr_at(address_after_nop);
+  ASSERT(Assembler::IsBranch(instr_after_nop));
+
+  // Find the end of the inlined code for handling the load.
+  int b_offset =
+      Assembler::GetBranchOffset(instr_after_nop) + Assembler::kPcLoadDelta;
+  ASSERT(b_offset < 0);  // Jumping back from deferred code.
+  *inline_end_address = address_after_nop + b_offset;
+
+  return true;
+}
+
+
+void LoadIC::ClearInlinedVersion(Address address) {
+  // Reset the map check of the inlined in-object property load (if present) to
+  // guarantee failure by holding an invalid map (the null value). The offset
+  // can be patched to anything.
+  PatchInlinedLoad(address, Heap::null_value(), 0);
+}
+
+
 bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
-  return false;
+  // Find the end of the inlined code for handling the load if this is an
+  // inlined IC call site.
+  Address inline_end_address;
+  if (!IsInlinedICSite(address, &inline_end_address)) return false;
+
+  // Patch the offset of the property load instruction (ldr r0, [r1, #+XXX]).
+  // The immediate must be representable in 12 bits.
+  ASSERT((JSObject::kMaxInstanceSize - JSObject::kHeaderSize) < (1 << 12));
+  Address ldr_property_instr_address =
+      inline_end_address - Assembler::kInstrSize;
+  ASSERT(Assembler::IsLdrRegisterImmediate(
+      Assembler::instr_at(ldr_property_instr_address)));
+  Instr ldr_property_instr = Assembler::instr_at(ldr_property_instr_address);
+  ldr_property_instr = Assembler::SetLdrRegisterImmediateOffset(
+      ldr_property_instr, offset - kHeapObjectTag);
+  Assembler::instr_at_put(ldr_property_instr_address, ldr_property_instr);
+
+  // Indicate that code has changed.
+  CPU::FlushICache(ldr_property_instr_address, 1 * Assembler::kInstrSize);
+
+  // Patch the map check.
+  Address ldr_map_instr_address =
+      inline_end_address - 4 * Assembler::kInstrSize;
+  Assembler::set_target_address_at(ldr_map_instr_address,
+                                   reinterpret_cast<Address>(map));
+  return true;
 }
 
-void KeyedLoadIC::ClearInlinedVersion(Address address) {}
+
+void KeyedLoadIC::ClearInlinedVersion(Address address) {
+  // Reset the map check of the inlined keyed load (if present) to
+  // guarantee failure by holding an invalid map (the null value).
+  PatchInlinedLoad(address, Heap::null_value());
+}
+
+
 bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
-  return false;
+  Address inline_end_address;
+  if (!IsInlinedICSite(address, &inline_end_address)) return false;
+
+  // Patch the map check.
+  Address ldr_map_instr_address =
+      inline_end_address - 18 * Assembler::kInstrSize;
+  Assembler::set_target_address_at(ldr_map_instr_address,
+                                   reinterpret_cast<Address>(map));
+  return true;
 }
 
-void KeyedStoreIC::ClearInlinedVersion(Address address) {}
-void KeyedStoreIC::RestoreInlinedVersion(Address address) {}
+
+void KeyedStoreIC::ClearInlinedVersion(Address address) {
+  // Insert null as the elements map to check for.  This will make
+  // sure that the elements fast-case map check fails so that control
+  // flows to the IC instead of the inlined version.
+  PatchInlinedStore(address, Heap::null_value());
+}
+
+
+void KeyedStoreIC::RestoreInlinedVersion(Address address) {
+  // Restore the fast-case elements map check so that the inlined
+  // version can be used again.
+  PatchInlinedStore(address, Heap::fixed_array_map());
+}
+
+
 bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
-  return false;
+  // Find the end of the inlined code for handling the store if this is an
+  // inlined IC call site.
+  Address inline_end_address;
+  if (!IsInlinedICSite(address, &inline_end_address)) return false;
+
+  // Patch the map check.
+  Address ldr_map_instr_address =
+      inline_end_address - 5 * Assembler::kInstrSize;
+  Assembler::set_target_address_at(ldr_map_instr_address,
+                                   reinterpret_cast<Address>(map));
+  return true;
 }
 
 
@@ -498,41 +682,45 @@
 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
   // ---------- S t a t e --------------
   //  -- lr     : return address
+  //  -- r0     : key
   //  -- sp[0]  : key
   //  -- sp[4]  : receiver
   // -----------------------------------
 
-  __ ldm(ia, sp, r2.bit() | r3.bit());
-  __ stm(db_w, sp, r2.bit() | r3.bit());
+  __ ldr(r1, MemOperand(sp, kPointerSize));
+  __ Push(r1, r0);
 
-  __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedLoadIC_Miss)), 2, 1);
+  ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss));
+  __ TailCallExternalReference(ref, 2, 1);
 }
 
 
 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
   // ---------- S t a t e --------------
   //  -- lr     : return address
+  //  -- r0     : key
   //  -- sp[0]  : key
   //  -- sp[4]  : receiver
   // -----------------------------------
 
-  __ ldm(ia, sp, r2.bit() | r3.bit());
-  __ stm(db_w, sp, r2.bit() | r3.bit());
+  __ ldr(r1, MemOperand(sp, kPointerSize));
+  __ Push(r1, r0);
 
-  __ TailCallRuntime(ExternalReference(Runtime::kGetProperty), 2, 1);
+  __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
 }
 
 
 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
   // ---------- S t a t e --------------
   //  -- lr     : return address
+  //  -- r0     : key
   //  -- sp[0]  : key
   //  -- sp[4]  : receiver
   // -----------------------------------
-  Label slow, fast;
+  Label slow, fast, check_pixel_array, check_number_dictionary;
 
-  // Get the key and receiver object from the stack.
-  __ ldm(ia, sp, r0.bit() | r1.bit());
+  // Get the object from the stack.
+  __ ldr(r1, MemOperand(sp, kPointerSize));
 
   // Check that the object isn't a smi.
   __ BranchOnSmi(r1, &slow);
@@ -553,6 +741,8 @@
 
   // Check that the key is a smi.
   __ BranchOnNotSmi(r0, &slow);
+  // Save key in r2 in case we want it for the number dictionary case.
+  __ mov(r2, r0);
   __ mov(r0, Operand(r0, ASR, kSmiTagSize));
 
   // Get the elements array of the object.
@@ -561,19 +751,12 @@
   __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
   __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
   __ cmp(r3, ip);
-  __ b(ne, &slow);
+  __ b(ne, &check_pixel_array);
   // Check that the key (index) is within bounds.
   __ ldr(r3, FieldMemOperand(r1, Array::kLengthOffset));
-  __ cmp(r0, Operand(r3));
-  __ b(lo, &fast);
-
-  // Slow case: Push extra copies of the arguments (2).
-  __ bind(&slow);
-  __ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r0, r1);
-  GenerateRuntimeGetProperty(masm);
-
+  __ cmp(r0, r3);
+  __ b(hs, &slow);
   // Fast case: Do the load.
-  __ bind(&fast);
   __ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   __ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2));
   __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
@@ -581,39 +764,407 @@
   // In case the loaded value is the_hole we have to consult GetProperty
   // to ensure the prototype chain is searched.
   __ b(eq, &slow);
-
   __ Ret();
+
+  // Check whether the elements is a pixel array.
+  __ bind(&check_pixel_array);
+  __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
+  __ cmp(r3, ip);
+  __ b(ne, &check_number_dictionary);
+  __ ldr(ip, FieldMemOperand(r1, PixelArray::kLengthOffset));
+  __ cmp(r0, ip);
+  __ b(hs, &slow);
+  __ ldr(ip, FieldMemOperand(r1, PixelArray::kExternalPointerOffset));
+  __ ldrb(r0, MemOperand(ip, r0));
+  __ mov(r0, Operand(r0, LSL, kSmiTagSize));  // Tag result as smi.
+  __ Ret();
+
+  __ bind(&check_number_dictionary);
+  // Check whether the elements is a number dictionary.
+  // r0: untagged index
+  // r1: elements
+  // r2: key
+  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+  __ cmp(r3, ip);
+  __ b(ne, &slow);
+  GenerateNumberDictionaryLoad(masm, &slow, r1, r2, r0, r3, r4);
+  __ Ret();
+
+  // Slow case: Push extra copies of the arguments (2).
+  __ bind(&slow);
+  __ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r0, r1);
+  __ ldr(r0, MemOperand(sp, 0));
+  GenerateRuntimeGetProperty(masm);
 }
 
 
 void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
   // ---------- S t a t e --------------
   //  -- lr     : return address
+  //  -- r0     : key
   //  -- sp[0]  : key
   //  -- sp[4]  : receiver
   // -----------------------------------
+  Label miss;
+  Label index_not_smi;
+  Label index_out_of_range;
+  Label slow_char_code;
+  Label got_char_code;
 
+  // Get the object from the stack.
+  __ ldr(r1, MemOperand(sp, kPointerSize));
+
+  Register object = r1;
+  Register index = r0;
+  Register code = r2;
+  Register scratch = r3;
+
+  StringHelper::GenerateFastCharCodeAt(masm,
+                                       object,
+                                       index,
+                                       scratch,
+                                       code,
+                                       &miss,  // When not a string.
+                                       &index_not_smi,
+                                       &index_out_of_range,
+                                       &slow_char_code);
+
+  // If we didn't bail out, code register contains smi tagged char
+  // code.
+  __ bind(&got_char_code);
+  StringHelper::GenerateCharFromCode(masm, code, scratch, r0, JUMP_FUNCTION);
+#ifdef DEBUG
+  __ Abort("Unexpected fall-through from char from code tail call");
+#endif
+
+  // Check if key is a heap number.
+  __ bind(&index_not_smi);
+  __ CheckMap(index, scratch, Factory::heap_number_map(), &miss, true);
+
+  // Push receiver and key on the stack (now that we know they are a
+  // string and a number), and call runtime.
+  __ bind(&slow_char_code);
+  __ EnterInternalFrame();
+  __ Push(object, index);
+  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+  ASSERT(!code.is(r0));
+  __ mov(code, r0);
+  __ LeaveInternalFrame();
+
+  // Check if the runtime call returned NaN char code. If yes, return
+  // undefined. Otherwise, we can continue.
+  if (FLAG_debug_code) {
+    __ BranchOnSmi(code, &got_char_code);
+    __ ldr(scratch, FieldMemOperand(code, HeapObject::kMapOffset));
+    __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+    __ cmp(scratch, ip);
+    __ Assert(eq, "StringCharCodeAt must return smi or heap number");
+  }
+  __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+  __ cmp(code, scratch);
+  __ b(ne, &got_char_code);
+  __ bind(&index_out_of_range);
+  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+  __ Ret();
+
+  __ bind(&miss);
   GenerateGeneric(masm);
 }
 
 
+// Convert unsigned integer with specified number of leading zeroes in binary
+// representation to IEEE 754 double.
+// Integer to convert is passed in register hiword.
+// Resulting double is returned in registers hiword:loword.
+// This functions does not work correctly for 0.
+static void GenerateUInt2Double(MacroAssembler* masm,
+                                Register hiword,
+                                Register loword,
+                                Register scratch,
+                                int leading_zeroes) {
+  const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
+  const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
+
+  const int mantissa_shift_for_hi_word =
+      meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
+
+  const int mantissa_shift_for_lo_word =
+      kBitsPerInt - mantissa_shift_for_hi_word;
+
+  __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
+  if (mantissa_shift_for_hi_word > 0) {
+    __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
+    __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
+  } else {
+    __ mov(loword, Operand(0));
+    __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
+  }
+
+  // If least significant bit of biased exponent was not 1 it was corrupted
+  // by most significant bit of mantissa so we should fix that.
+  if (!(biased_exponent & 1)) {
+    __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
+  }
+}
+
+
 void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
                                         ExternalArrayType array_type) {
-  // TODO(476): port specialized code.
-  GenerateGeneric(masm);
+  // ---------- S t a t e --------------
+  //  -- lr     : return address
+  //  -- r0     : key
+  //  -- sp[0]  : key
+  //  -- sp[4]  : receiver
+  // -----------------------------------
+  Label slow, failed_allocation;
+
+  // Get the object from the stack.
+  __ ldr(r1, MemOperand(sp, kPointerSize));
+
+  // r0: key
+  // r1: receiver object
+
+  // Check that the object isn't a smi
+  __ BranchOnSmi(r1, &slow);
+
+  // Check that the key is a smi.
+  __ BranchOnNotSmi(r0, &slow);
+
+  // Check that the object is a JS object. Load map into r2.
+  __ CompareObjectType(r1, r2, r3, FIRST_JS_OBJECT_TYPE);
+  __ b(lt, &slow);
+
+  // Check that the receiver does not require access checks.  We need
+  // to check this explicitly since this generic stub does not perform
+  // map checks.
+  __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
+  __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
+  __ b(ne, &slow);
+
+  // Check that the elements array is the appropriate type of
+  // ExternalArray.
+  // r0: index (as a smi)
+  // r1: JSObject
+  __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
+  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
+  __ cmp(r2, ip);
+  __ b(ne, &slow);
+
+  // Check that the index is in range.
+  __ ldr(ip, FieldMemOperand(r1, ExternalArray::kLengthOffset));
+  __ cmp(r1, Operand(r0, ASR, kSmiTagSize));
+  // Unsigned comparison catches both negative and too-large values.
+  __ b(lo, &slow);
+
+  // r0: index (smi)
+  // r1: elements array
+  __ ldr(r1, FieldMemOperand(r1, ExternalArray::kExternalPointerOffset));
+  // r1: base pointer of external storage
+
+  // We are not untagging smi key and instead work with it
+  // as if it was premultiplied by 2.
+  ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
+
+  switch (array_type) {
+    case kExternalByteArray:
+      __ ldrsb(r0, MemOperand(r1, r0, LSR, 1));
+      break;
+    case kExternalUnsignedByteArray:
+      __ ldrb(r0, MemOperand(r1, r0, LSR, 1));
+      break;
+    case kExternalShortArray:
+      __ ldrsh(r0, MemOperand(r1, r0, LSL, 0));
+      break;
+    case kExternalUnsignedShortArray:
+      __ ldrh(r0, MemOperand(r1, r0, LSL, 0));
+      break;
+    case kExternalIntArray:
+    case kExternalUnsignedIntArray:
+      __ ldr(r0, MemOperand(r1, r0, LSL, 1));
+      break;
+    case kExternalFloatArray:
+      if (CpuFeatures::IsSupported(VFP3)) {
+        CpuFeatures::Scope scope(VFP3);
+        __ add(r0, r1, Operand(r0, LSL, 1));
+        __ vldr(s0, r0, 0);
+      } else {
+        __ ldr(r0, MemOperand(r1, r0, LSL, 1));
+      }
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  // For integer array types:
+  // r0: value
+  // For floating-point array type
+  // s0: value (if VFP3 is supported)
+  // r0: value (if VFP3 is not supported)
+
+  if (array_type == kExternalIntArray) {
+    // For the Int and UnsignedInt array types, we need to see whether
+    // the value can be represented in a Smi. If not, we need to convert
+    // it to a HeapNumber.
+    Label box_int;
+    __ cmp(r0, Operand(0xC0000000));
+    __ b(mi, &box_int);
+    __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+    __ Ret();
+
+    __ bind(&box_int);
+
+    __ mov(r1, r0);
+    // Allocate a HeapNumber for the int and perform int-to-double
+    // conversion.
+    __ AllocateHeapNumber(r0, r3, r4, &slow);
+
+    if (CpuFeatures::IsSupported(VFP3)) {
+      CpuFeatures::Scope scope(VFP3);
+      __ vmov(s0, r1);
+      __ vcvt_f64_s32(d0, s0);
+      __ sub(r1, r0, Operand(kHeapObjectTag));
+      __ vstr(d0, r1, HeapNumber::kValueOffset);
+      __ Ret();
+    } else {
+      WriteInt32ToHeapNumberStub stub(r1, r0, r3);
+      __ TailCallStub(&stub);
+    }
+  } else if (array_type == kExternalUnsignedIntArray) {
+    // The test is different for unsigned int values. Since we need
+    // the value to be in the range of a positive smi, we can't
+    // handle either of the top two bits being set in the value.
+    if (CpuFeatures::IsSupported(VFP3)) {
+      CpuFeatures::Scope scope(VFP3);
+      Label box_int, done;
+      __ tst(r0, Operand(0xC0000000));
+      __ b(ne, &box_int);
+
+      __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+      __ Ret();
+
+      __ bind(&box_int);
+      __ vmov(s0, r0);
+      __ AllocateHeapNumber(r0, r1, r2, &slow);
+
+      __ vcvt_f64_u32(d0, s0);
+      __ sub(r1, r0, Operand(kHeapObjectTag));
+      __ vstr(d0, r1, HeapNumber::kValueOffset);
+      __ Ret();
+    } else {
+      // Check whether unsigned integer fits into smi.
+      Label box_int_0, box_int_1, done;
+      __ tst(r0, Operand(0x80000000));
+      __ b(ne, &box_int_0);
+      __ tst(r0, Operand(0x40000000));
+      __ b(ne, &box_int_1);
+
+      // Tag integer as smi and return it.
+      __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+      __ Ret();
+
+      __ bind(&box_int_0);
+      // Integer does not have leading zeros.
+      GenerateUInt2Double(masm, r0, r1, r2, 0);
+      __ b(&done);
+
+      __ bind(&box_int_1);
+      // Integer has one leading zero.
+      GenerateUInt2Double(masm, r0, r1, r2, 1);
+
+      __ bind(&done);
+      // Integer was converted to double in registers r0:r1.
+      // Wrap it into a HeapNumber.
+      __ AllocateHeapNumber(r2, r3, r5, &slow);
+
+      __ str(r0, FieldMemOperand(r2, HeapNumber::kExponentOffset));
+      __ str(r1, FieldMemOperand(r2, HeapNumber::kMantissaOffset));
+
+      __ mov(r0, r2);
+
+      __ Ret();
+    }
+  } else if (array_type == kExternalFloatArray) {
+    // For the floating-point array type, we need to always allocate a
+    // HeapNumber.
+    if (CpuFeatures::IsSupported(VFP3)) {
+      CpuFeatures::Scope scope(VFP3);
+      __ AllocateHeapNumber(r0, r1, r2, &slow);
+      __ vcvt_f64_f32(d0, s0);
+      __ sub(r1, r0, Operand(kHeapObjectTag));
+      __ vstr(d0, r1, HeapNumber::kValueOffset);
+      __ Ret();
+    } else {
+      __ AllocateHeapNumber(r3, r1, r2, &slow);
+      // VFP is not available, do manual single to double conversion.
+
+      // r0: floating point value (binary32)
+
+      // Extract mantissa to r1.
+      __ and_(r1, r0, Operand(kBinary32MantissaMask));
+
+      // Extract exponent to r2.
+      __ mov(r2, Operand(r0, LSR, kBinary32MantissaBits));
+      __ and_(r2, r2, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
+
+      Label exponent_rebiased;
+      __ teq(r2, Operand(0x00));
+      __ b(eq, &exponent_rebiased);
+
+      __ teq(r2, Operand(0xff));
+      __ mov(r2, Operand(0x7ff), LeaveCC, eq);
+      __ b(eq, &exponent_rebiased);
+
+      // Rebias exponent.
+      __ add(r2,
+             r2,
+             Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
+
+      __ bind(&exponent_rebiased);
+      __ and_(r0, r0, Operand(kBinary32SignMask));
+      __ orr(r0, r0, Operand(r2, LSL, HeapNumber::kMantissaBitsInTopWord));
+
+      // Shift mantissa.
+      static const int kMantissaShiftForHiWord =
+          kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+      static const int kMantissaShiftForLoWord =
+          kBitsPerInt - kMantissaShiftForHiWord;
+
+      __ orr(r0, r0, Operand(r1, LSR, kMantissaShiftForHiWord));
+      __ mov(r1, Operand(r1, LSL, kMantissaShiftForLoWord));
+
+      __ str(r0, FieldMemOperand(r3, HeapNumber::kExponentOffset));
+      __ str(r1, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
+      __ mov(r0, r3);
+      __ Ret();
+    }
+
+  } else {
+    __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+    __ Ret();
+  }
+
+  // Slow case: Load name and receiver from stack and jump to runtime.
+  __ bind(&slow);
+  __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r0, r1);
+  __ ldr(r0, MemOperand(sp, 0));
+  GenerateRuntimeGetProperty(masm);
 }
 
 
 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
   // ---------- S t a t e --------------
   //  -- lr     : return address
+  //  -- r0     : key
   //  -- sp[0]  : key
   //  -- sp[4]  : receiver
   // -----------------------------------
   Label slow;
 
-  // Get the key and receiver object from the stack.
-  __ ldm(ia, sp, r0.bit() | r1.bit());
+  // Get the object from the stack.
+  __ ldr(r1, MemOperand(sp, kPointerSize));
 
   // Check that the receiver isn't a smi.
   __ BranchOnSmi(r1, &slow);
@@ -632,11 +1183,10 @@
   __ b(ne, &slow);
 
   // Everything is fine, call runtime.
-  __ push(r1);  // receiver
-  __ push(r0);  // key
+  __ Push(r1, r0);  // Receiver, key.
 
   // Perform tail call to the entry.
-  __ TailCallRuntime(ExternalReference(
+  __ TailCallExternalReference(ExternalReference(
         IC_Utility(kKeyedLoadPropertyWithInterceptor)), 2, 1);
 
   __ bind(&slow);
@@ -653,9 +1203,10 @@
   // -----------------------------------
 
   __ ldm(ia, sp, r2.bit() | r3.bit());
-  __ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
+  __ Push(r3, r2, r0);
 
-  __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedStoreIC_Miss)), 3, 1);
+  ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss));
+  __ TailCallExternalReference(ref, 3, 1);
 }
 
 
@@ -667,9 +1218,9 @@
   //  -- sp[1]  : receiver
   // -----------------------------------
   __ ldm(ia, sp, r1.bit() | r3.bit());  // r0 == value, r1 == key, r3 == object
-  __ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit());
+  __ Push(r3, r1, r0);
 
-  __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
+  __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
 }
 
 
@@ -680,7 +1231,7 @@
   //  -- sp[0]  : key
   //  -- sp[1]  : receiver
   // -----------------------------------
-  Label slow, fast, array, extra, exit;
+  Label slow, fast, array, extra, exit, check_pixel_array;
 
   // Get the key and the object from the stack.
   __ ldm(ia, sp, r1.bit() | r3.bit());  // r1 = key, r3 = receiver
@@ -713,7 +1264,7 @@
   __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
   __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
   __ cmp(r2, ip);
-  __ b(ne, &slow);
+  __ b(ne, &check_pixel_array);
   // Untag the key (for checking against untagged length in the fixed array).
   __ mov(r1, Operand(r1, ASR, kSmiTagSize));
   // Compute address to store into and check array bounds.
@@ -728,6 +1279,37 @@
   __ bind(&slow);
   GenerateRuntimeSetProperty(masm);
 
+  // Check whether the elements is a pixel array.
+  // r0: value
+  // r1: index (as a smi), zero-extended.
+  // r3: elements array
+  __ bind(&check_pixel_array);
+  __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
+  __ cmp(r2, ip);
+  __ b(ne, &slow);
+  // Check that the value is a smi. If a conversion is needed call into the
+  // runtime to convert and clamp.
+  __ BranchOnNotSmi(r0, &slow);
+  __ mov(r1, Operand(r1, ASR, kSmiTagSize));  // Untag the key.
+  __ ldr(ip, FieldMemOperand(r3, PixelArray::kLengthOffset));
+  __ cmp(r1, Operand(ip));
+  __ b(hs, &slow);
+  __ mov(r4, r0);  // Save the value.
+  __ mov(r0, Operand(r0, ASR, kSmiTagSize));  // Untag the value.
+  {  // Clamp the value to [0..255].
+    Label done;
+    __ tst(r0, Operand(0xFFFFFF00));
+    __ b(eq, &done);
+    __ mov(r0, Operand(0), LeaveCC, mi);  // 0 if negative.
+    __ mov(r0, Operand(255), LeaveCC, pl);  // 255 if positive.
+    __ bind(&done);
+  }
+  __ ldr(r2, FieldMemOperand(r3, PixelArray::kExternalPointerOffset));
+  __ strb(r0, MemOperand(r2, r1));
+  __ mov(r0, Operand(r4));  // Return the original value.
+  __ Ret();
+
+
   // Extra capacity case: Check if there is extra capacity to
   // perform the store and update the length. Used for adding one
   // element to the array by writing to array[array.length].
@@ -790,10 +1372,376 @@
 }
 
 
+// Convert int passed in register ival to IEE 754 single precision
+// floating point value and store it into register fval.
+// If VFP3 is available use it for conversion.
+static void ConvertIntToFloat(MacroAssembler* masm,
+                              Register ival,
+                              Register fval,
+                              Register scratch1,
+                              Register scratch2) {
+  if (CpuFeatures::IsSupported(VFP3)) {
+    CpuFeatures::Scope scope(VFP3);
+    __ vmov(s0, ival);
+    __ vcvt_f32_s32(s0, s0);
+    __ vmov(fval, s0);
+  } else {
+    Label not_special, done;
+    // Move sign bit from source to destination.  This works because the sign
+    // bit in the exponent word of the double has the same position and polarity
+    // as the 2's complement sign bit in a Smi.
+    ASSERT(kBinary32SignMask == 0x80000000u);
+
+    __ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
+    // Negate value if it is negative.
+    __ rsb(ival, ival, Operand(0), LeaveCC, ne);
+
+    // We have -1, 0 or 1, which we treat specially. Register ival contains
+    // absolute value: it is either equal to 1 (special case of -1 and 1),
+    // greater than 1 (not a special case) or less than 1 (special case of 0).
+    __ cmp(ival, Operand(1));
+    __ b(gt, &not_special);
+
+    // For 1 or -1 we need to or in the 0 exponent (biased).
+    static const uint32_t exponent_word_for_1 =
+        kBinary32ExponentBias << kBinary32ExponentShift;
+
+    __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
+    __ b(&done);
+
+    __ bind(&not_special);
+    // Count leading zeros.
+    // Gets the wrong answer for 0, but we already checked for that case above.
+    Register zeros = scratch2;
+    __ CountLeadingZeros(ival, scratch1, zeros);
+
+    // Compute exponent and or it into the exponent register.
+    __ rsb(scratch1,
+           zeros,
+           Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
+
+    __ orr(fval,
+           fval,
+           Operand(scratch1, LSL, kBinary32ExponentShift));
+
+    // Shift up the source chopping the top bit off.
+    __ add(zeros, zeros, Operand(1));
+    // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
+    __ mov(ival, Operand(ival, LSL, zeros));
+    // And the top (top 20 bits).
+    __ orr(fval,
+           fval,
+           Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
+
+    __ bind(&done);
+  }
+}
+
+
+static bool IsElementTypeSigned(ExternalArrayType array_type) {
+  switch (array_type) {
+    case kExternalByteArray:
+    case kExternalShortArray:
+    case kExternalIntArray:
+      return true;
+
+    case kExternalUnsignedByteArray:
+    case kExternalUnsignedShortArray:
+    case kExternalUnsignedIntArray:
+      return false;
+
+    default:
+      UNREACHABLE();
+      return false;
+  }
+}
+
+
 void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
                                          ExternalArrayType array_type) {
-  // TODO(476): port specialized code.
-  GenerateGeneric(masm);
+  // ---------- S t a t e --------------
+  //  -- r0     : value
+  //  -- lr     : return address
+  //  -- sp[0]  : key
+  //  -- sp[1]  : receiver
+  // -----------------------------------
+  Label slow, check_heap_number;
+
+  // Get the key and the object from the stack.
+  __ ldm(ia, sp, r1.bit() | r2.bit());  // r1 = key, r2 = receiver
+
+  // Check that the object isn't a smi.
+  __ BranchOnSmi(r2, &slow);
+
+  // Check that the object is a JS object. Load map into r3
+  __ CompareObjectType(r2, r3, r4, FIRST_JS_OBJECT_TYPE);
+  __ b(le, &slow);
+
+  // Check that the receiver does not require access checks.  We need
+  // to do this because this generic stub does not perform map checks.
+  __ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset));
+  __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
+  __ b(ne, &slow);
+
+  // Check that the key is a smi.
+  __ BranchOnNotSmi(r1, &slow);
+
+  // Check that the elements array is the appropriate type of
+  // ExternalArray.
+  // r0: value
+  // r1: index (smi)
+  // r2: object
+  __ ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
+  __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
+  __ cmp(r3, ip);
+  __ b(ne, &slow);
+
+  // Check that the index is in range.
+  __ mov(r1, Operand(r1, ASR, kSmiTagSize));  // Untag the index.
+  __ ldr(ip, FieldMemOperand(r2, ExternalArray::kLengthOffset));
+  __ cmp(r1, ip);
+  // Unsigned comparison catches both negative and too-large values.
+  __ b(hs, &slow);
+
+  // Handle both smis and HeapNumbers in the fast path. Go to the
+  // runtime for all other kinds of values.
+  // r0: value
+  // r1: index (integer)
+  // r2: array
+  __ BranchOnNotSmi(r0, &check_heap_number);
+  __ mov(r3, Operand(r0, ASR, kSmiTagSize));  // Untag the value.
+  __ ldr(r2, FieldMemOperand(r2, ExternalArray::kExternalPointerOffset));
+
+  // r1: index (integer)
+  // r2: base pointer of external storage
+  // r3: value (integer)
+  switch (array_type) {
+    case kExternalByteArray:
+    case kExternalUnsignedByteArray:
+      __ strb(r3, MemOperand(r2, r1, LSL, 0));
+      break;
+    case kExternalShortArray:
+    case kExternalUnsignedShortArray:
+      __ strh(r3, MemOperand(r2, r1, LSL, 1));
+      break;
+    case kExternalIntArray:
+    case kExternalUnsignedIntArray:
+      __ str(r3, MemOperand(r2, r1, LSL, 2));
+      break;
+    case kExternalFloatArray:
+      // Need to perform int-to-float conversion.
+      ConvertIntToFloat(masm, r3, r4, r5, r6);
+      __ str(r4, MemOperand(r2, r1, LSL, 2));
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  // r0: value
+  __ Ret();
+
+
+  // r0: value
+  // r1: index (integer)
+  // r2: external array object
+  __ bind(&check_heap_number);
+  __ CompareObjectType(r0, r3, r4, HEAP_NUMBER_TYPE);
+  __ b(ne, &slow);
+
+  __ ldr(r2, FieldMemOperand(r2, ExternalArray::kExternalPointerOffset));
+
+  // The WebGL specification leaves the behavior of storing NaN and
+  // +/-Infinity into integer arrays basically undefined. For more
+  // reproducible behavior, convert these to zero.
+  if (CpuFeatures::IsSupported(VFP3)) {
+    CpuFeatures::Scope scope(VFP3);
+
+    // vldr requires offset to be a multiple of 4 so we can not
+    // include -kHeapObjectTag into it.
+    __ sub(r3, r0, Operand(kHeapObjectTag));
+    __ vldr(d0, r3, HeapNumber::kValueOffset);
+
+    if (array_type == kExternalFloatArray) {
+      __ vcvt_f32_f64(s0, d0);
+      __ vmov(r3, s0);
+      __ str(r3, MemOperand(r2, r1, LSL, 2));
+    } else {
+      Label done;
+
+      // Need to perform float-to-int conversion.
+      // Test for NaN.
+      __ vcmp(d0, d0);
+      // Move vector status bits to normal status bits.
+      __ vmrs(v8::internal::pc);
+      __ mov(r3, Operand(0), LeaveCC, vs);  // NaN converts to 0
+      __ b(vs, &done);
+
+      // Test whether exponent equal to 0x7FF (infinity or NaN)
+      __ vmov(r4, r3, d0);
+      __ mov(r5, Operand(0x7FF00000));
+      __ and_(r3, r3, Operand(r5));
+      __ teq(r3, Operand(r5));
+      __ mov(r3, Operand(0), LeaveCC, eq);
+
+      // Not infinity or NaN simply convert to int
+      if (IsElementTypeSigned(array_type)) {
+        __ vcvt_s32_f64(s0, d0, ne);
+      } else {
+        __ vcvt_u32_f64(s0, d0, ne);
+      }
+
+      __ vmov(r3, s0, ne);
+
+      __ bind(&done);
+      switch (array_type) {
+        case kExternalByteArray:
+        case kExternalUnsignedByteArray:
+          __ strb(r3, MemOperand(r2, r1, LSL, 0));
+          break;
+        case kExternalShortArray:
+        case kExternalUnsignedShortArray:
+          __ strh(r3, MemOperand(r2, r1, LSL, 1));
+          break;
+        case kExternalIntArray:
+        case kExternalUnsignedIntArray:
+          __ str(r3, MemOperand(r2, r1, LSL, 2));
+          break;
+        default:
+          UNREACHABLE();
+          break;
+      }
+    }
+
+    // r0: original value
+    __ Ret();
+  } else {
+    // VFP3 is not available do manual conversions
+    __ ldr(r3, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+    __ ldr(r4, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+
+    if (array_type == kExternalFloatArray) {
+      Label done, nan_or_infinity_or_zero;
+      static const int kMantissaInHiWordShift =
+          kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+      static const int kMantissaInLoWordShift =
+          kBitsPerInt - kMantissaInHiWordShift;
+
+      // Test for all special exponent values: zeros, subnormal numbers, NaNs
+      // and infinities. All these should be converted to 0.
+      __ mov(r5, Operand(HeapNumber::kExponentMask));
+      __ and_(r6, r3, Operand(r5), SetCC);
+      __ b(eq, &nan_or_infinity_or_zero);
+
+      __ teq(r6, Operand(r5));
+      __ mov(r6, Operand(kBinary32ExponentMask), LeaveCC, eq);
+      __ b(eq, &nan_or_infinity_or_zero);
+
+      // Rebias exponent.
+      __ mov(r6, Operand(r6, LSR, HeapNumber::kExponentShift));
+      __ add(r6,
+             r6,
+             Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
+
+      __ cmp(r6, Operand(kBinary32MaxExponent));
+      __ and_(r3, r3, Operand(HeapNumber::kSignMask), LeaveCC, gt);
+      __ orr(r3, r3, Operand(kBinary32ExponentMask), LeaveCC, gt);
+      __ b(gt, &done);
+
+      __ cmp(r6, Operand(kBinary32MinExponent));
+      __ and_(r3, r3, Operand(HeapNumber::kSignMask), LeaveCC, lt);
+      __ b(lt, &done);
+
+      __ and_(r7, r3, Operand(HeapNumber::kSignMask));
+      __ and_(r3, r3, Operand(HeapNumber::kMantissaMask));
+      __ orr(r7, r7, Operand(r3, LSL, kMantissaInHiWordShift));
+      __ orr(r7, r7, Operand(r4, LSR, kMantissaInLoWordShift));
+      __ orr(r3, r7, Operand(r6, LSL, kBinary32ExponentShift));
+
+      __ bind(&done);
+      __ str(r3, MemOperand(r2, r1, LSL, 2));
+      __ Ret();
+
+      __ bind(&nan_or_infinity_or_zero);
+      __ and_(r7, r3, Operand(HeapNumber::kSignMask));
+      __ and_(r3, r3, Operand(HeapNumber::kMantissaMask));
+      __ orr(r6, r6, r7);
+      __ orr(r6, r6, Operand(r3, LSL, kMantissaInHiWordShift));
+      __ orr(r3, r6, Operand(r4, LSR, kMantissaInLoWordShift));
+      __ b(&done);
+    } else {
+      bool is_signed_type  = IsElementTypeSigned(array_type);
+      int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
+      int32_t min_value    = is_signed_type ? 0x80000000 : 0x00000000;
+
+      Label done, sign;
+
+      // Test for all special exponent values: zeros, subnormal numbers, NaNs
+      // and infinities. All these should be converted to 0.
+      __ mov(r5, Operand(HeapNumber::kExponentMask));
+      __ and_(r6, r3, Operand(r5), SetCC);
+      __ mov(r3, Operand(0), LeaveCC, eq);
+      __ b(eq, &done);
+
+      __ teq(r6, Operand(r5));
+      __ mov(r3, Operand(0), LeaveCC, eq);
+      __ b(eq, &done);
+
+      // Unbias exponent.
+      __ mov(r6, Operand(r6, LSR, HeapNumber::kExponentShift));
+      __ sub(r6, r6, Operand(HeapNumber::kExponentBias), SetCC);
+      // If exponent is negative than result is 0.
+      __ mov(r3, Operand(0), LeaveCC, mi);
+      __ b(mi, &done);
+
+      // If exponent is too big than result is minimal value
+      __ cmp(r6, Operand(meaningfull_bits - 1));
+      __ mov(r3, Operand(min_value), LeaveCC, ge);
+      __ b(ge, &done);
+
+      __ and_(r5, r3, Operand(HeapNumber::kSignMask), SetCC);
+      __ and_(r3, r3, Operand(HeapNumber::kMantissaMask));
+      __ orr(r3, r3, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
+
+      __ rsb(r6, r6, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
+      __ mov(r3, Operand(r3, LSR, r6), LeaveCC, pl);
+      __ b(pl, &sign);
+
+      __ rsb(r6, r6, Operand(0));
+      __ mov(r3, Operand(r3, LSL, r6));
+      __ rsb(r6, r6, Operand(meaningfull_bits));
+      __ orr(r3, r3, Operand(r4, LSR, r6));
+
+      __ bind(&sign);
+      __ teq(r5, Operand(0));
+      __ rsb(r3, r3, Operand(0), LeaveCC, ne);
+
+      __ bind(&done);
+      switch (array_type) {
+        case kExternalByteArray:
+        case kExternalUnsignedByteArray:
+          __ strb(r3, MemOperand(r2, r1, LSL, 0));
+          break;
+        case kExternalShortArray:
+        case kExternalUnsignedShortArray:
+          __ strh(r3, MemOperand(r2, r1, LSL, 1));
+          break;
+        case kExternalIntArray:
+        case kExternalUnsignedIntArray:
+          __ str(r3, MemOperand(r2, r1, LSL, 2));
+          break;
+        default:
+          UNREACHABLE();
+          break;
+      }
+    }
+  }
+
+  // Slow case: call runtime.
+  __ bind(&slow);
+  GenerateRuntimeSetProperty(masm);
 }
 
 
@@ -824,11 +1772,58 @@
   //  -- lr    : return address
   // -----------------------------------
 
-  __ push(r1);
-  __ stm(db_w, sp, r2.bit() | r0.bit());
+  __ Push(r1, r2, r0);
 
   // Perform tail call to the entry.
-  __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1);
+  ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss));
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0    : value
+  //  -- r1    : receiver
+  //  -- r2    : name
+  //  -- lr    : return address
+  // -----------------------------------
+  //
+  // This accepts as a receiver anything JSObject::SetElementsLength accepts
+  // (currently anything except for external and pixel arrays which means
+  // anything with elements of FixedArray type.), but currently is restricted
+  // to JSArray.
+  // Value must be a number, but only smis are accepted as the most common case.
+
+  Label miss;
+
+  Register receiver = r1;
+  Register value = r0;
+  Register scratch = r3;
+
+  // Check that the receiver isn't a smi.
+  __ BranchOnSmi(receiver, &miss);
+
+  // Check that the object is a JS array.
+  __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
+  __ b(ne, &miss);
+
+  // Check that elements are FixedArray.
+  __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
+  __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
+  __ b(ne, &miss);
+
+  // Check that value is a smi.
+  __ BranchOnNotSmi(value, &miss);
+
+  // Prepare tail call to StoreIC_ArrayLength.
+  __ Push(receiver, value);
+
+  ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength));
+  __ TailCallExternalReference(ref, 2, 1);
+
+  __ bind(&miss);
+
+  GenerateMiss(masm);
 }
 
 
diff --git a/src/arm/jump-target-arm.cc b/src/arm/jump-target-arm.cc
index 3315f83..a13de0e 100644
--- a/src/arm/jump-target-arm.cc
+++ b/src/arm/jump-target-arm.cc
@@ -30,6 +30,7 @@
 #include "codegen-inl.h"
 #include "jump-target-inl.h"
 #include "register-allocator-inl.h"
+#include "virtual-frame-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -172,14 +173,7 @@
 
 
 void BreakTarget::Jump(Result* arg) {
-  // On ARM we do not currently emit merge code for jumps, so we need to do
-  // it explicitly here.  The only merging necessary is to drop extra
-  // statement state from the stack.
-  ASSERT(cgen()->has_valid_frame());
-  int count = cgen()->frame()->height() - expected_height_;
-  cgen()->frame()->Drop(count);
-  cgen()->frame()->Push(arg);
-  DoJump();
+  UNIMPLEMENTED();
 }
 
 
@@ -208,27 +202,7 @@
 
 
 void BreakTarget::Bind(Result* arg) {
-#ifdef DEBUG
-  // All the forward-reaching frames should have been adjusted at the
-  // jumps to this target.
-  for (int i = 0; i < reaching_frames_.length(); i++) {
-    ASSERT(reaching_frames_[i] == NULL ||
-           reaching_frames_[i]->height() == expected_height_ + 1);
-  }
-#endif
-  // Drop leftover statement state from the frame before merging, even
-  // on the fall through.  This is so we can bind the return target
-  // with state on the frame.
-  if (cgen()->has_valid_frame()) {
-    int count = cgen()->frame()->height() - expected_height_;
-    // On ARM we do not currently emit merge code at binding sites, so we need
-    // to do it explicitly here.  The only merging necessary is to drop extra
-    // statement state from the stack.
-    cgen()->frame()->ForgetElements(count);
-    cgen()->frame()->Push(arg);
-  }
-  DoBind();
-  *arg = cgen()->frame()->Pop();
+  UNIMPLEMENTED();
 }
 
 
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index b9335f8..d97f04b 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -58,11 +58,6 @@
 #endif
 
 
-// Using blx may yield better code, so use it when required or when available
-#if defined(USE_THUMB_INTERWORK) || defined(CAN_USE_ARMV5_INSTRUCTIONS)
-#define USE_BLX 1
-#endif
-
 // Using bx does not yield better code, so use it only when required
 #if defined(USE_THUMB_INTERWORK)
 #define USE_BX 1
@@ -117,16 +112,34 @@
 
 void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
                           Condition cond) {
+#if USE_BLX
+  // On ARMv5 and after the recommended call sequence is:
+  //  ldr ip, [pc, #...]
+  //  blx ip
+
+  // The two instructions (ldr and blx) could be separated by a constant
+  // pool and the code would still work. The issue comes from the
+  // patching code which expect the ldr to be just above the blx.
+  { BlockConstPoolScope block_const_pool(this);
+    // Statement positions are expected to be recorded when the target
+    // address is loaded. The mov method will automatically record
+    // positions when pc is the target, since this is not the case here
+    // we have to do it explicitly.
+    WriteRecordedPositions();
+
+    mov(ip, Operand(target, rmode), LeaveCC, cond);
+    blx(ip, cond);
+  }
+
+  ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
+#else
   // Set lr for return at current pc + 8.
   mov(lr, Operand(pc), LeaveCC, cond);
   // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
   mov(pc, Operand(target, rmode), LeaveCC, cond);
-  // If USE_BLX is defined, we could emit a 'mov ip, target', followed by a
-  // 'blx ip'; however, the code would not be shorter than the above sequence
-  // and the target address of the call would be referenced by the first
-  // instruction rather than the second one, which would make it harder to patch
-  // (two instructions before the return address, instead of one).
+
   ASSERT(kCallTargetAddressOffset == kInstrSize);
+#endif
 }
 
 
@@ -168,6 +181,19 @@
 }
 
 
+void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
+  if (scratch.is(no_reg)) {
+    eor(reg1, reg1, Operand(reg2));
+    eor(reg2, reg2, Operand(reg1));
+    eor(reg1, reg1, Operand(reg2));
+  } else {
+    mov(scratch, reg1);
+    mov(reg1, reg2);
+    mov(reg2, scratch);
+  }
+}
+
+
 void MacroAssembler::Call(Label* target) {
   bl(target);
 }
@@ -178,6 +204,13 @@
 }
 
 
+void MacroAssembler::Move(Register dst, Register src) {
+  if (!dst.is(src)) {
+    mov(dst, src);
+  }
+}
+
+
 void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
   // Empty the const pool.
   CheckConstPool(true, true);
@@ -199,30 +232,23 @@
 }
 
 
-// Will clobber 4 registers: object, offset, scratch, ip.  The
-// register 'object' contains a heap object pointer.  The heap object
-// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object, Register offset,
-                                 Register scratch) {
-  // The compiled code assumes that record write doesn't change the
-  // context register, so we check that none of the clobbered
-  // registers are cp.
-  ASSERT(!object.is(cp) && !offset.is(cp) && !scratch.is(cp));
+void MacroAssembler::RecordWriteHelper(Register object,
+                                       Register offset,
+                                       Register scratch) {
+  if (FLAG_debug_code) {
+    // Check that the object is not in new space.
+    Label not_in_new_space;
+    InNewSpace(object, scratch, ne, &not_in_new_space);
+    Abort("new-space object passed to RecordWriteHelper");
+    bind(&not_in_new_space);
+  }
 
   // This is how much we shift the remembered set bit offset to get the
   // offset of the word in the remembered set.  We divide by kBitsPerInt (32,
   // shift right 5) and then multiply by kIntSize (4, shift left 2).
   const int kRSetWordShift = 3;
 
-  Label fast, done;
-
-  // First, test that the object is not in the new space.  We cannot set
-  // remembered set bits in the new space.
-  // object: heap object pointer (with tag)
-  // offset: offset to store location from the object
-  and_(scratch, object, Operand(ExternalReference::new_space_mask()));
-  cmp(scratch, Operand(ExternalReference::new_space_start()));
-  b(eq, &done);
+  Label fast;
 
   // Compute the bit offset in the remembered set.
   // object: heap object pointer (with tag)
@@ -274,15 +300,47 @@
   mov(ip, Operand(1));
   orr(scratch, scratch, Operand(ip, LSL, offset));
   str(scratch, MemOperand(object));
+}
+
+
+void MacroAssembler::InNewSpace(Register object,
+                                Register scratch,
+                                Condition cc,
+                                Label* branch) {
+  ASSERT(cc == eq || cc == ne);
+  and_(scratch, object, Operand(ExternalReference::new_space_mask()));
+  cmp(scratch, Operand(ExternalReference::new_space_start()));
+  b(cc, branch);
+}
+
+
+// Will clobber 4 registers: object, offset, scratch, ip.  The
+// register 'object' contains a heap object pointer.  The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(Register object, Register offset,
+                                 Register scratch) {
+  // The compiled code assumes that record write doesn't change the
+  // context register, so we check that none of the clobbered
+  // registers are cp.
+  ASSERT(!object.is(cp) && !offset.is(cp) && !scratch.is(cp));
+
+  Label done;
+
+  // First, test that the object is not in the new space.  We cannot set
+  // remembered set bits in the new space.
+  InNewSpace(object, scratch, eq, &done);
+
+  // Record the actual write.
+  RecordWriteHelper(object, offset, scratch);
 
   bind(&done);
 
   // Clobber all input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (FLAG_debug_code) {
-    mov(object, Operand(bit_cast<int32_t>(kZapValue)));
-    mov(offset, Operand(bit_cast<int32_t>(kZapValue)));
-    mov(scratch, Operand(bit_cast<int32_t>(kZapValue)));
+    mov(object, Operand(BitCast<int32_t>(kZapValue)));
+    mov(offset, Operand(BitCast<int32_t>(kZapValue)));
+    mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
   }
 }
 
@@ -323,10 +381,19 @@
   // ip = sp + kPointerSize * #args;
   add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
 
-  // Align the stack at this point.  After this point we have 5 pushes,
-  // so in fact we have to unalign here!  See also the assert on the
-  // alignment in AlignStack.
-  AlignStack(1);
+  // Prepare the stack to be aligned when calling into C. After this point there
+  // are 5 pushes before the call into C, so the stack needs to be aligned after
+  // 5 pushes.
+  int frame_alignment = ActivationFrameAlignment();
+  int frame_alignment_mask = frame_alignment - 1;
+  if (frame_alignment != kPointerSize) {
+    // The following code needs to be more general if this assert does not hold.
+    ASSERT(frame_alignment == 2 * kPointerSize);
+    // With 5 pushes left the frame must be unaligned at this point.
+    mov(r7, Operand(Smi::FromInt(0)));
+    tst(sp, Operand((frame_alignment - kPointerSize) & frame_alignment_mask));
+    push(r7, eq);  // Push if aligned to make it unaligned.
+  }
 
   // Push in reverse order: caller_fp, sp_on_exit, and caller_pc.
   stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
@@ -357,27 +424,34 @@
 }
 
 
-void MacroAssembler::AlignStack(int offset) {
+void MacroAssembler::InitializeNewString(Register string,
+                                         Register length,
+                                         Heap::RootListIndex map_index,
+                                         Register scratch1,
+                                         Register scratch2) {
+  mov(scratch1, Operand(length, LSL, kSmiTagSize));
+  LoadRoot(scratch2, map_index);
+  str(scratch1, FieldMemOperand(string, String::kLengthOffset));
+  mov(scratch1, Operand(String::kEmptyHashField));
+  str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
+  str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
+}
+
+
+int MacroAssembler::ActivationFrameAlignment() {
 #if defined(V8_HOST_ARCH_ARM)
   // Running on the real platform. Use the alignment as mandated by the local
   // environment.
   // Note: This will break if we ever start generating snapshots on one ARM
   // platform for another ARM platform with a different alignment.
-  int activation_frame_alignment = OS::ActivationFrameAlignment();
+  return OS::ActivationFrameAlignment();
 #else  // defined(V8_HOST_ARCH_ARM)
   // If we are using the simulator then we should always align to the expected
   // alignment. As the simulator is used to generate snapshots we do not know
-  // if the target platform will need alignment, so we will always align at
-  // this point here.
-  int activation_frame_alignment = 2 * kPointerSize;
+  // if the target platform will need alignment, so this is controlled from a
+  // flag.
+  return FLAG_sim_stack_alignment;
 #endif  // defined(V8_HOST_ARCH_ARM)
-  if (activation_frame_alignment != kPointerSize) {
-    // This code needs to be made more general if this assert doesn't hold.
-    ASSERT(activation_frame_alignment == 2 * kPointerSize);
-    mov(r7, Operand(Smi::FromInt(0)));
-    tst(sp, Operand(activation_frame_alignment - offset));
-    push(r7, eq);  // Conditional push instruction.
-  }
 }
 
 
@@ -687,6 +761,7 @@
 Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
                                    JSObject* holder, Register holder_reg,
                                    Register scratch,
+                                   int save_at_depth,
                                    Label* miss) {
   // Make sure there's no overlap between scratch and the other
   // registers.
@@ -694,7 +769,11 @@
 
   // Keep track of the current object in register reg.
   Register reg = object_reg;
-  int depth = 1;
+  int depth = 0;
+
+  if (save_at_depth == depth) {
+    str(reg, MemOperand(sp));
+  }
 
   // Check the maps in the prototype chain.
   // Traverse the prototype chain from the object and do map checks.
@@ -734,6 +813,10 @@
       mov(reg, Operand(Handle<JSObject>(prototype)));
     }
 
+    if (save_at_depth == depth) {
+      str(reg, MemOperand(sp));
+    }
+
     // Go to the next object in the prototype chain.
     object = prototype;
   }
@@ -744,7 +827,7 @@
   b(ne, miss);
 
   // Log the check depth.
-  LOG(IntEvent("check-maps-depth", depth));
+  LOG(IntEvent("check-maps-depth", depth + 1));
 
   // Perform security check for access to the global object and return
   // the holder register.
@@ -985,11 +1068,11 @@
                      TAG_OBJECT);
 
   // Set the map, length and hash field.
-  LoadRoot(scratch1, Heap::kStringMapRootIndex);
-  str(length, FieldMemOperand(result, String::kLengthOffset));
-  str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
-  mov(scratch2, Operand(String::kEmptyHashField));
-  str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
+  InitializeNewString(result,
+                      length,
+                      Heap::kStringMapRootIndex,
+                      scratch1,
+                      scratch2);
 }
 
 
@@ -1019,12 +1102,11 @@
                      TAG_OBJECT);
 
   // Set the map, length and hash field.
-  LoadRoot(scratch1, Heap::kAsciiStringMapRootIndex);
-  mov(scratch1, Operand(Factory::ascii_string_map()));
-  str(length, FieldMemOperand(result, String::kLengthOffset));
-  str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
-  mov(scratch2, Operand(String::kEmptyHashField));
-  str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
+  InitializeNewString(result,
+                      length,
+                      Heap::kAsciiStringMapRootIndex,
+                      scratch1,
+                      scratch2);
 }
 
 
@@ -1039,11 +1121,12 @@
                      scratch2,
                      gc_required,
                      TAG_OBJECT);
-  LoadRoot(scratch1, Heap::kConsStringMapRootIndex);
-  mov(scratch2, Operand(String::kEmptyHashField));
-  str(length, FieldMemOperand(result, String::kLengthOffset));
-  str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
-  str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
+
+  InitializeNewString(result,
+                      length,
+                      Heap::kConsStringMapRootIndex,
+                      scratch1,
+                      scratch2);
 }
 
 
@@ -1058,19 +1141,20 @@
                      scratch2,
                      gc_required,
                      TAG_OBJECT);
-  LoadRoot(scratch1, Heap::kConsAsciiStringMapRootIndex);
-  mov(scratch2, Operand(String::kEmptyHashField));
-  str(length, FieldMemOperand(result, String::kLengthOffset));
-  str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
-  str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
+
+  InitializeNewString(result,
+                      length,
+                      Heap::kConsAsciiStringMapRootIndex,
+                      scratch1,
+                      scratch2);
 }
 
 
-void MacroAssembler::CompareObjectType(Register function,
+void MacroAssembler::CompareObjectType(Register object,
                                        Register map,
                                        Register type_reg,
                                        InstanceType type) {
-  ldr(map, FieldMemOperand(function, HeapObject::kMapOffset));
+  ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
   CompareInstanceType(map, type_reg, type);
 }
 
@@ -1180,7 +1264,7 @@
   // ARMv7 VFP3 instructions to implement integer to double conversion.
   mov(r7, Operand(inReg, ASR, kSmiTagSize));
   vmov(s15, r7);
-  vcvt(d7, s15);
+  vcvt_f64_s32(d7, s15);
   vmov(outLowReg, outHighReg, d7);
 }
 
@@ -1234,19 +1318,26 @@
 }
 
 
-void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
-                                     int num_arguments,
-                                     int result_size) {
+void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
+                                               int num_arguments,
+                                               int result_size) {
   // TODO(1236192): Most runtime routines don't need the number of
   // arguments passed in because it is constant. At some point we
   // should remove this need and make the runtime routine entry code
   // smarter.
   mov(r0, Operand(num_arguments));
-  JumpToRuntime(ext);
+  JumpToExternalReference(ext);
 }
 
 
-void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) {
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
+                                     int num_arguments,
+                                     int result_size) {
+  TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
+}
+
+
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
 #if defined(__thumb__)
   // Thumb mode builtin.
   ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
@@ -1270,15 +1361,29 @@
 
 
 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+  ASSERT(!target.is(r1));
+
+  // Load the builtins object into target register.
+  ldr(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
+
   // Load the JavaScript builtin function from the builtins object.
-  ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
-  ldr(r1, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
-  int builtins_offset =
-      JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
-  ldr(r1, FieldMemOperand(r1, builtins_offset));
-  // Load the code entry point from the function into the target register.
-  ldr(target, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
-  ldr(target, FieldMemOperand(target, SharedFunctionInfo::kCodeOffset));
+  ldr(r1, FieldMemOperand(target,
+                          JSBuiltinsObject::OffsetOfFunctionWithId(id)));
+
+  // Load the code entry point from the builtins object.
+  ldr(target, FieldMemOperand(target,
+                              JSBuiltinsObject::OffsetOfCodeWithId(id)));
+  if (FLAG_debug_code) {
+    // Make sure the code objects in the builtins object and in the
+    // builtin function are the same.
+    push(r1);
+    ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+    ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCodeOffset));
+    cmp(r1, target);
+    Assert(eq, "Builtin code object changed");
+    pop(r1);
+  }
   add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
 }
 
@@ -1410,15 +1515,12 @@
   ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
   ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
   ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
-  int kFlatAsciiStringMask =
-      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
-  int kFlatAsciiStringTag = ASCII_STRING_TYPE;
-  and_(scratch1, scratch1, Operand(kFlatAsciiStringMask));
-  and_(scratch2, scratch2, Operand(kFlatAsciiStringMask));
-  cmp(scratch1, Operand(kFlatAsciiStringTag));
-  // Ignore second test if first test failed.
-  cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
-  b(ne, failure);
+
+  JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
+                                               scratch2,
+                                               scratch1,
+                                               scratch2,
+                                               failure);
 }
 
 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
@@ -1439,6 +1541,147 @@
 }
 
 
+// Allocates a heap number or jumps to the need_gc label if the young space
+// is full and a scavenge is needed.
+void MacroAssembler::AllocateHeapNumber(Register result,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Label* gc_required) {
+  // Allocate an object in the heap for the heap number and tag it as a heap
+  // object.
+  AllocateInNewSpace(HeapNumber::kSize / kPointerSize,
+                     result,
+                     scratch1,
+                     scratch2,
+                     gc_required,
+                     TAG_OBJECT);
+
+  // Get heap number map and store it in the allocated object.
+  LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
+  str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+}
+
+
+void MacroAssembler::CountLeadingZeros(Register source,
+                                       Register scratch,
+                                       Register zeros) {
+#ifdef CAN_USE_ARMV5_INSTRUCTIONS
+  clz(zeros, source);  // This instruction is only supported after ARM5.
+#else
+  mov(zeros, Operand(0));
+  mov(scratch, source);
+  // Top 16.
+  tst(scratch, Operand(0xffff0000));
+  add(zeros, zeros, Operand(16), LeaveCC, eq);
+  mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
+  // Top 8.
+  tst(scratch, Operand(0xff000000));
+  add(zeros, zeros, Operand(8), LeaveCC, eq);
+  mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
+  // Top 4.
+  tst(scratch, Operand(0xf0000000));
+  add(zeros, zeros, Operand(4), LeaveCC, eq);
+  mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
+  // Top 2.
+  tst(scratch, Operand(0xc0000000));
+  add(zeros, zeros, Operand(2), LeaveCC, eq);
+  mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
+  // Top bit.
+  tst(scratch, Operand(0x80000000u));
+  add(zeros, zeros, Operand(1), LeaveCC, eq);
+#endif
+}
+
+
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
+    Register first,
+    Register second,
+    Register scratch1,
+    Register scratch2,
+    Label* failure) {
+  int kFlatAsciiStringMask =
+      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+  int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+  and_(scratch1, first, Operand(kFlatAsciiStringMask));
+  and_(scratch2, second, Operand(kFlatAsciiStringMask));
+  cmp(scratch1, Operand(kFlatAsciiStringTag));
+  // Ignore second test if first test failed.
+  cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
+  b(ne, failure);
+}
+
+
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
+                                                            Register scratch,
+                                                            Label* failure) {
+  int kFlatAsciiStringMask =
+      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+  int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+  and_(scratch, type, Operand(kFlatAsciiStringMask));
+  cmp(scratch, Operand(kFlatAsciiStringTag));
+  b(ne, failure);
+}
+
+
+void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
+  int frame_alignment = ActivationFrameAlignment();
+  // Up to four simple arguments are passed in registers r0..r3.
+  int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4;
+  if (frame_alignment > kPointerSize) {
+    // Make stack end at alignment and make room for num_arguments - 4 words
+    // and the original value of sp.
+    mov(scratch, sp);
+    sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
+    ASSERT(IsPowerOf2(frame_alignment));
+    and_(sp, sp, Operand(-frame_alignment));
+    str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
+  } else {
+    sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
+  }
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+                                   int num_arguments) {
+  mov(ip, Operand(function));
+  CallCFunction(ip, num_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(Register function, int num_arguments) {
+  // Make sure that the stack is aligned before calling a C function unless
+  // running in the simulator. The simulator has its own alignment check which
+  // provides more information.
+#if defined(V8_HOST_ARCH_ARM)
+  if (FLAG_debug_code) {
+    int frame_alignment = OS::ActivationFrameAlignment();
+    int frame_alignment_mask = frame_alignment - 1;
+    if (frame_alignment > kPointerSize) {
+      ASSERT(IsPowerOf2(frame_alignment));
+      Label alignment_as_expected;
+      tst(sp, Operand(frame_alignment_mask));
+      b(eq, &alignment_as_expected);
+      // Don't use Check here, as it will call Runtime_Abort possibly
+      // re-entering here.
+      stop("Unexpected alignment");
+      bind(&alignment_as_expected);
+    }
+  }
+#endif
+
+  // Just call directly. The function called cannot cause a GC, or
+  // allow preemption, so the return address in the link register
+  // stays correct.
+  Call(function);
+  int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4;
+  if (OS::ActivationFrameAlignment() > kPointerSize) {
+    ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
+  } else {
+    add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
+  }
+}
+
+
 #ifdef ENABLE_DEBUGGER_SUPPORT
 CodePatcher::CodePatcher(byte* address, int instructions)
     : address_(address),
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 98cea16..2ec7a39 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -70,8 +70,15 @@
   // from the stack, clobbering only the sp register.
   void Drop(int count, Condition cond = al);
 
+
+  // Swap two registers.  If the scratch register is omitted then a slightly
+  // less efficient form using xor instead of mov is emitted.
+  void Swap(Register reg1, Register reg2, Register scratch = no_reg);
+
   void Call(Label* target);
   void Move(Register dst, Handle<Object> value);
+  // May do nothing if the registers are identical.
+  void Move(Register dst, Register src);
   // Jumps to the label at the index given by the Smi in "index".
   void SmiJumpTable(Register index, Vector<Label*> targets);
   // Load an object from the root table.
@@ -79,6 +86,20 @@
                 Heap::RootListIndex index,
                 Condition cond = al);
 
+
+  // Check if object is in new space.
+  // scratch can be object itself, but it will be clobbered.
+  void InNewSpace(Register object,
+                  Register scratch,
+                  Condition cc,  // eq for new space, ne otherwise
+                  Label* branch);
+
+
+  // Set the remebered set bit for an offset into an
+  // object. RecordWriteHelper only works if the object is not in new
+  // space.
+  void RecordWriteHelper(Register object, Register offset, Register scracth);
+
   // Sets the remembered set bit for [address+offset], where address is the
   // address of the heap object 'object'.  The address must be in the first 8K
   // of an allocated page. The 'scratch' register is used in the
@@ -86,6 +107,65 @@
   // well as the ip register.
   void RecordWrite(Register object, Register offset, Register scratch);
 
+  // Push two registers.  Pushes leftmost register first (to highest address).
+  void Push(Register src1, Register src2, Condition cond = al) {
+    ASSERT(!src1.is(src2));
+    if (src1.code() > src2.code()) {
+      stm(db_w, sp, src1.bit() | src2.bit(), cond);
+    } else {
+      str(src1, MemOperand(sp, 4, NegPreIndex), cond);
+      str(src2, MemOperand(sp, 4, NegPreIndex), cond);
+    }
+  }
+
+  // Push three registers.  Pushes leftmost register first (to highest address).
+  void Push(Register src1, Register src2, Register src3, Condition cond = al) {
+    ASSERT(!src1.is(src2));
+    ASSERT(!src2.is(src3));
+    ASSERT(!src1.is(src3));
+    if (src1.code() > src2.code()) {
+      if (src2.code() > src3.code()) {
+        stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
+      } else {
+        stm(db_w, sp, src1.bit() | src2.bit(), cond);
+        str(src3, MemOperand(sp, 4, NegPreIndex), cond);
+      }
+    } else {
+      str(src1, MemOperand(sp, 4, NegPreIndex), cond);
+      Push(src2, src3, cond);
+    }
+  }
+
+  // Push four registers.  Pushes leftmost register first (to highest address).
+  void Push(Register src1, Register src2,
+            Register src3, Register src4, Condition cond = al) {
+    ASSERT(!src1.is(src2));
+    ASSERT(!src2.is(src3));
+    ASSERT(!src1.is(src3));
+    ASSERT(!src1.is(src4));
+    ASSERT(!src2.is(src4));
+    ASSERT(!src3.is(src4));
+    if (src1.code() > src2.code()) {
+      if (src2.code() > src3.code()) {
+        if (src3.code() > src4.code()) {
+          stm(db_w,
+              sp,
+              src1.bit() | src2.bit() | src3.bit() | src4.bit(),
+              cond);
+        } else {
+          stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
+          str(src4, MemOperand(sp, 4, NegPreIndex), cond);
+        }
+      } else {
+        stm(db_w, sp, src1.bit() | src2.bit(), cond);
+        Push(src3, src4, cond);
+      }
+    } else {
+      str(src1, MemOperand(sp, 4, NegPreIndex), cond);
+      Push(src2, src3, src4, cond);
+    }
+  }
+
   // ---------------------------------------------------------------------------
   // Stack limit support
 
@@ -109,8 +189,8 @@
   // Leave the current exit frame. Expects the return value in r0.
   void LeaveExitFrame(ExitFrame::Mode mode);
 
-  // Align the stack by optionally pushing a Smi zero.
-  void AlignStack(int offset);
+  // Get the actual activation frame alignment for target environment.
+  static int ActivationFrameAlignment();
 
   void LoadContext(Register dst, int context_chain_length);
 
@@ -177,9 +257,14 @@
   // clobbered if it the same as the holder register. The function
   // returns a register containing the holder - either object_reg or
   // holder_reg.
+  // The function can optionally (when save_at_depth !=
+  // kInvalidProtoDepth) save the object at the given depth by moving
+  // it to [sp].
   Register CheckMaps(JSObject* object, Register object_reg,
                      JSObject* holder, Register holder_reg,
-                     Register scratch, Label* miss);
+                     Register scratch,
+                     int save_at_depth,
+                     Label* miss);
 
   // Generate code for checking access rights - used for security checks
   // on access to global objects across environments. The holder register
@@ -239,6 +324,12 @@
                                Register scratch2,
                                Label* gc_required);
 
+  // Allocates a heap number or jumps to the need_gc label if the young space
+  // is full and a scavenge is needed.
+  void AllocateHeapNumber(Register result,
+                          Register scratch1,
+                          Register scratch2,
+                          Label* gc_required);
 
   // ---------------------------------------------------------------------------
   // Support functions.
@@ -319,6 +410,12 @@
                                          Register outHighReg,
                                          Register outLowReg);
 
+  // Count leading zeros in a 32 bit word.  On ARM5 and later it uses the clz
+  // instruction.  On pre-ARM5 hardware this routine gives the wrong answer
+  // for 0 (31 instead of 32).
+  void CountLeadingZeros(Register source,
+                         Register scratch,
+                         Register zeros);
 
   // ---------------------------------------------------------------------------
   // Runtime calls
@@ -333,7 +430,6 @@
   void StubReturn(int argc);
 
   // Call a runtime routine.
-  // Eventually this should be used for all C calls.
   void CallRuntime(Runtime::Function* f, int num_arguments);
 
   // Convenience function: Same as above, but takes the fid instead.
@@ -344,14 +440,37 @@
                              int num_arguments);
 
   // Tail call of a runtime routine (jump).
-  // Like JumpToRuntime, but also takes care of passing the number
+  // Like JumpToExternalReference, but also takes care of passing the number
   // of parameters.
-  void TailCallRuntime(const ExternalReference& ext,
+  void TailCallExternalReference(const ExternalReference& ext,
+                                 int num_arguments,
+                                 int result_size);
+
+  // Convenience function: tail call a runtime routine (jump).
+  void TailCallRuntime(Runtime::FunctionId fid,
                        int num_arguments,
                        int result_size);
 
+  // Before calling a C-function from generated code, align arguments on stack.
+  // After aligning the frame, non-register arguments must be stored in
+  // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
+  // are word sized.
+  // Some compilers/platforms require the stack to be aligned when calling
+  // C++ code.
+  // Needs a scratch register to do some arithmetic. This register will be
+  // trashed.
+  void PrepareCallCFunction(int num_arguments, Register scratch);
+
+  // Calls a C function and cleans up the space for arguments allocated
+  // by PrepareCallCFunction. The called function is not allowed to trigger a
+  // garbage collection, since that might move the code and invalidate the
+  // return address (unless this is somehow accounted for by the called
+  // function).
+  void CallCFunction(ExternalReference function, int num_arguments);
+  void CallCFunction(Register function, int num_arguments);
+
   // Jump to a runtime routine.
-  void JumpToRuntime(const ExternalReference& builtin);
+  void JumpToExternalReference(const ExternalReference& builtin);
 
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
@@ -411,7 +530,7 @@
                                                   Register object2,
                                                   Register scratch1,
                                                   Register scratch2,
-                                                  Label *failure);
+                                                  Label* failure);
 
   // Checks if both objects are sequential ASCII strings and jumps to label
   // if either is not.
@@ -421,6 +540,22 @@
                                            Register scratch2,
                                            Label* not_flat_ascii_strings);
 
+  // Checks if both instance types are sequential ASCII strings and jumps to
+  // label if either is not.
+  void JumpIfBothInstanceTypesAreNotSequentialAscii(
+      Register first_object_instance_type,
+      Register second_object_instance_type,
+      Register scratch1,
+      Register scratch2,
+      Label* failure);
+
+  // Check if instance type is sequential ASCII string and jump to label if
+  // it is not.
+  void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
+                                              Register scratch,
+                                              Label* failure);
+
+
  private:
   void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
   void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
@@ -437,6 +572,12 @@
   void EnterFrame(StackFrame::Type type);
   void LeaveFrame(StackFrame::Type type);
 
+  void InitializeNewString(Register string,
+                           Register length,
+                           Heap::RootListIndex map_index,
+                           Register scratch1,
+                           Register scratch2);
+
   bool generating_stub_;
   bool allow_stub_calls_;
   // This handle will be patched with the code object on installation.
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 9dd3b93..2fdba14 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -39,7 +39,7 @@
 namespace v8 {
 namespace internal {
 
-#ifdef V8_NATIVE_REGEXP
+#ifndef V8_INTERPRETED_REGEXP
 /*
  * This assembler uses the following register assignment convention
  * - r5 : Pointer to current code object (Code*) including heap object tag.
@@ -163,7 +163,7 @@
   CheckPreemption();
   // Pop Code* offset from backtrack stack, add Code* and jump to location.
   Pop(r0);
-  __ add(pc, r0, Operand(r5));
+  __ add(pc, r0, Operand(code_pointer()));
 }
 
 
@@ -338,7 +338,7 @@
   } else {
     ASSERT(mode_ == UC16);
     int argument_count = 3;
-    FrameAlign(argument_count, r2);
+    __ PrepareCallCFunction(argument_count, r2);
 
     // r0 - offset of start of capture
     // r1 - length of capture
@@ -360,7 +360,7 @@
 
     ExternalReference function =
         ExternalReference::re_case_insensitive_compare_uc16();
-    CallCFunction(function, argument_count);
+    __ CallCFunction(function, argument_count);
 
     // Check if function returned non-zero for success or zero for failure.
     __ cmp(r0, Operand(0));
@@ -611,7 +611,6 @@
   __ add(frame_pointer(), sp, Operand(4 * kPointerSize));
   __ push(r0);  // Make room for "position - 1" constant (value is irrelevant).
   __ push(r0);  // Make room for "at start" constant (value is irrelevant).
-
   // Check if we have space on the stack for registers.
   Label stack_limit_hit;
   Label stack_ok;
@@ -648,16 +647,17 @@
   __ ldr(r0, MemOperand(frame_pointer(), kInputStart));
   // Find negative length (offset of start relative to end).
   __ sub(current_input_offset(), r0, end_of_input_address());
-  // Set r0 to address of char before start of input
+  // Set r0 to address of char before start of the input string
   // (effectively string position -1).
+  __ ldr(r1, MemOperand(frame_pointer(), kStartIndex));
   __ sub(r0, current_input_offset(), Operand(char_size()));
+  __ sub(r0, r0, Operand(r1, LSL, (mode_ == UC16) ? 1 : 0));
   // Store this value in a local variable, for use when clearing
   // position registers.
   __ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
 
   // Determine whether the start index is zero, that is at the start of the
   // string, and store that value in a local variable.
-  __ ldr(r1, MemOperand(frame_pointer(), kStartIndex));
   __ tst(r1, Operand(r1));
   __ mov(r1, Operand(1), LeaveCC, eq);
   __ mov(r1, Operand(0), LeaveCC, ne);
@@ -700,12 +700,15 @@
       // copy captures to output
       __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
       __ ldr(r0, MemOperand(frame_pointer(), kRegisterOutput));
+      __ ldr(r2, MemOperand(frame_pointer(), kStartIndex));
       __ sub(r1, end_of_input_address(), r1);
       // r1 is length of input in bytes.
       if (mode_ == UC16) {
         __ mov(r1, Operand(r1, LSR, 1));
       }
       // r1 is length of input in characters.
+      __ add(r1, r1, Operand(r2));
+      // r1 is length of string in characters.
 
       ASSERT_EQ(0, num_saved_registers_ % 2);
       // Always an even number of capture registers. This allows us to
@@ -765,13 +768,13 @@
     Label grow_failed;
 
     // Call GrowStack(backtrack_stackpointer())
-    int num_arguments = 2;
-    FrameAlign(num_arguments, r0);
+    static const int num_arguments = 2;
+    __ PrepareCallCFunction(num_arguments, r0);
     __ mov(r0, backtrack_stackpointer());
     __ add(r1, frame_pointer(), Operand(kStackHighEnd));
     ExternalReference grow_stack =
       ExternalReference::re_grow_stack();
-    CallCFunction(grow_stack, num_arguments);
+    __ CallCFunction(grow_stack, num_arguments);
     // If return NULL, we have failed to grow the stack, and
     // must exit with a stack-overflow exception.
     __ cmp(r0, Operand(0));
@@ -796,7 +799,7 @@
                                        NULL,
                                        Code::ComputeFlags(Code::REGEXP),
                                        masm_->CodeObject());
-  LOG(RegExpCodeCreateEvent(*code, *source));
+  PROFILE(RegExpCodeCreateEvent(*code, *source));
   return Handle<Object>::cast(code);
 }
 
@@ -966,8 +969,8 @@
 // Private methods:
 
 void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) {
-  int num_arguments = 3;
-  FrameAlign(num_arguments, scratch);
+  static const int num_arguments = 3;
+  __ PrepareCallCFunction(num_arguments, scratch);
   // RegExp code frame pointer.
   __ mov(r2, frame_pointer());
   // Code* of self.
@@ -997,6 +1000,12 @@
   // If not real stack overflow the stack guard was used to interrupt
   // execution for another purpose.
 
+  // If this is a direct call from JavaScript retry the RegExp forcing the call
+  // through the runtime system. Currently the direct call cannot handle a GC.
+  if (frame_entry<int>(re_frame, kDirectCall) == 1) {
+    return RETRY;
+  }
+
   // Prepare for possible GC.
   HandleScope handles;
   Handle<Code> code_handle(re_code);
@@ -1179,47 +1188,12 @@
 }
 
 
-void RegExpMacroAssemblerARM::FrameAlign(int num_arguments, Register scratch) {
-  int frameAlignment = OS::ActivationFrameAlignment();
-  // Up to four simple arguments are passed in registers r0..r3.
-  int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4;
-  if (frameAlignment != 0) {
-    // Make stack end at alignment and make room for num_arguments - 4 words
-    // and the original value of sp.
-    __ mov(scratch, sp);
-    __ sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
-    ASSERT(IsPowerOf2(frameAlignment));
-    __ and_(sp, sp, Operand(-frameAlignment));
-    __ str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
-  } else {
-    __ sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
-  }
-}
-
-
-void RegExpMacroAssemblerARM::CallCFunction(ExternalReference function,
-                                            int num_arguments) {
-  __ mov(r5, Operand(function));
-  // Just call directly. The function called cannot cause a GC, or
-  // allow preemption, so the return address in the link register
-  // stays correct.
-  __ Call(r5);
-  int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4;
-  if (OS::ActivationFrameAlignment() > kIntSize) {
-    __ ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
-  } else {
-    __ add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
-  }
-  __ mov(code_pointer(), Operand(masm_->CodeObject()));
-}
-
-
 void RegExpMacroAssemblerARM::CallCFunctionUsingStub(
     ExternalReference function,
     int num_arguments) {
   // Must pass all arguments in registers. The stub pushes on the stack.
   ASSERT(num_arguments <= 4);
-  __ mov(r5, Operand(function));
+  __ mov(code_pointer(), Operand(function));
   RegExpCEntryStub stub;
   __ CallStub(&stub);
   if (OS::ActivationFrameAlignment() != 0) {
@@ -1261,6 +1235,6 @@
 
 #undef __
 
-#endif  // V8_NATIVE_REGEXP
+#endif  // V8_INTERPRETED_REGEXP
 
 }}  // namespace v8::internal
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
index 7de5f93..2c0a8d8 100644
--- a/src/arm/regexp-macro-assembler-arm.h
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -32,14 +32,14 @@
 namespace internal {
 
 
-#ifndef V8_NATIVE_REGEXP
+#ifdef V8_INTERPRETED_REGEXP
 class RegExpMacroAssemblerARM: public RegExpMacroAssembler {
  public:
   RegExpMacroAssemblerARM();
   virtual ~RegExpMacroAssemblerARM();
 };
 
-#else
+#else  // V8_INTERPRETED_REGEXP
 class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
  public:
   RegExpMacroAssemblerARM(Mode mode, int registers_to_save);
@@ -206,22 +206,6 @@
   // and increments it by a word size.
   inline void Pop(Register target);
 
-  // Before calling a C-function from generated code, align arguments on stack.
-  // After aligning the frame, non-register arguments must be stored in
-  // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
-  // are word sized.
-  // Some compilers/platforms require the stack to be aligned when calling
-  // C++ code.
-  // Needs a scratch register to do some arithmetic. This register will be
-  // trashed.
-  inline void FrameAlign(int num_arguments, Register scratch);
-
-  // Calls a C function and cleans up the space for arguments allocated
-  // by FrameAlign. The called function is not allowed to trigger a garbage
-  // collection.
-  inline void CallCFunction(ExternalReference function,
-                            int num_arguments);
-
   // Calls a C function and cleans up the frame alignment done by
   // by FrameAlign. The called function *is* allowed to trigger a garbage
   // collection, but may not take more than four arguments (no arguments
@@ -274,7 +258,7 @@
   const char* GetName() { return "RegExpCEntryStub"; }
 };
 
-#endif  // V8_NATIVE_REGEXP
+#endif  // V8_INTERPRETED_REGEXP
 
 
 }}  // namespace v8::internal
diff --git a/src/arm/register-allocator-arm-inl.h b/src/arm/register-allocator-arm-inl.h
index 4691f29..945cdeb 100644
--- a/src/arm/register-allocator-arm-inl.h
+++ b/src/arm/register-allocator-arm-inl.h
@@ -92,9 +92,6 @@
 
 void RegisterAllocator::Initialize() {
   Reset();
-  // The non-reserved r1 and lr registers are live on JS function entry.
-  Use(r1);  // JS function.
-  Use(lr);  // Return address.
 }
 
 
diff --git a/src/arm/register-allocator-arm.h b/src/arm/register-allocator-arm.h
index f953ed9..fdbc88f 100644
--- a/src/arm/register-allocator-arm.h
+++ b/src/arm/register-allocator-arm.h
@@ -33,7 +33,8 @@
 
 class RegisterAllocatorConstants : public AllStatic {
  public:
-  static const int kNumRegisters = 12;
+  // No registers are currently managed by the register allocator on ARM.
+  static const int kNumRegisters = 0;
   static const int kInvalidRegister = -1;
 };
 
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index cee5aea..5fe7d5f 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -72,6 +72,8 @@
 
   int32_t GetRegisterValue(int regnum);
   bool GetValue(const char* desc, int32_t* value);
+  bool GetVFPSingleValue(const char* desc, float* value);
+  bool GetVFPDoubleValue(const char* desc, double* value);
 
   // Set or delete a breakpoint. Returns true if successful.
   bool SetBreakpoint(Instr* breakpc);
@@ -148,7 +150,33 @@
     *value = GetRegisterValue(regnum);
     return true;
   } else {
-    return SScanF(desc, "%i", value) == 1;
+    if (strncmp(desc, "0x", 2) == 0) {
+      return SScanF(desc + 2, "%x", reinterpret_cast<uint32_t*>(value)) == 1;
+    } else {
+      return SScanF(desc, "%u", reinterpret_cast<uint32_t*>(value)) == 1;
+    }
+  }
+  return false;
+}
+
+
+bool Debugger::GetVFPSingleValue(const char* desc, float* value) {
+  bool is_double;
+  int regnum = VFPRegisters::Number(desc, &is_double);
+  if (regnum != kNoRegister && !is_double) {
+    *value = sim_->get_float_from_s_register(regnum);
+    return true;
+  }
+  return false;
+}
+
+
+bool Debugger::GetVFPDoubleValue(const char* desc, double* value) {
+  bool is_double;
+  int regnum = VFPRegisters::Number(desc, &is_double);
+  if (regnum != kNoRegister && is_double) {
+    *value = sim_->get_double_from_d_register(regnum);
+    return true;
   }
   return false;
 }
@@ -207,6 +235,7 @@
   char cmd[COMMAND_SIZE + 1];
   char arg1[ARG_SIZE + 1];
   char arg2[ARG_SIZE + 1];
+  char* argv[3] = { cmd, arg1, arg2 };
 
   // make sure to have a proper terminating character if reaching the limit
   cmd[COMMAND_SIZE] = 0;
@@ -234,7 +263,7 @@
     } else {
       // Use sscanf to parse the individual parts of the command line. At the
       // moment no command expects more than two parameters.
-      int args = SScanF(line,
+      int argc = SScanF(line,
                         "%" XSTR(COMMAND_SIZE) "s "
                         "%" XSTR(ARG_SIZE) "s "
                         "%" XSTR(ARG_SIZE) "s",
@@ -247,8 +276,10 @@
         // Leave the debugger shell.
         done = true;
       } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
-        if (args == 2) {
+        if (argc == 2) {
           int32_t value;
+          float svalue;
+          double dvalue;
           if (strcmp(arg1, "all") == 0) {
             for (int i = 0; i < kNumRegisters; i++) {
               value = GetRegisterValue(i);
@@ -257,6 +288,10 @@
           } else {
             if (GetValue(arg1, &value)) {
               PrintF("%s: 0x%08x %d \n", arg1, value, value);
+            } else if (GetVFPSingleValue(arg1, &svalue)) {
+              PrintF("%s: %f \n", arg1, svalue);
+            } else if (GetVFPDoubleValue(arg1, &dvalue)) {
+              PrintF("%s: %lf \n", arg1, dvalue);
             } else {
               PrintF("%s unrecognized\n", arg1);
             }
@@ -266,7 +301,7 @@
         }
       } else if ((strcmp(cmd, "po") == 0)
                  || (strcmp(cmd, "printobject") == 0)) {
-        if (args == 2) {
+        if (argc == 2) {
           int32_t value;
           if (GetValue(arg1, &value)) {
             Object* obj = reinterpret_cast<Object*>(value);
@@ -283,6 +318,37 @@
         } else {
           PrintF("printobject <value>\n");
         }
+      } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+        int32_t* cur = NULL;
+        int32_t* end = NULL;
+        int next_arg = 1;
+
+        if (strcmp(cmd, "stack") == 0) {
+          cur = reinterpret_cast<int32_t*>(sim_->get_register(Simulator::sp));
+        } else {  // "mem"
+          int32_t value;
+          if (!GetValue(arg1, &value)) {
+            PrintF("%s unrecognized\n", arg1);
+            continue;
+          }
+          cur = reinterpret_cast<int32_t*>(value);
+          next_arg++;
+        }
+
+        int32_t words;
+        if (argc == next_arg) {
+          words = 10;
+        } else if (argc == next_arg + 1) {
+          if (!GetValue(argv[next_arg], &words)) {
+            words = 10;
+          }
+        }
+        end = cur + words;
+
+        while (cur < end) {
+          PrintF("  0x%08x:  0x%08x %10d\n", cur, *cur, *cur);
+          cur++;
+        }
       } else if (strcmp(cmd, "disasm") == 0) {
         disasm::NameConverter converter;
         disasm::Disassembler dasm(converter);
@@ -292,10 +358,10 @@
         byte* cur = NULL;
         byte* end = NULL;
 
-        if (args == 1) {
+        if (argc == 1) {
           cur = reinterpret_cast<byte*>(sim_->get_pc());
           end = cur + (10 * Instr::kInstrSize);
-        } else if (args == 2) {
+        } else if (argc == 2) {
           int32_t value;
           if (GetValue(arg1, &value)) {
             cur = reinterpret_cast<byte*>(value);
@@ -321,7 +387,7 @@
         v8::internal::OS::DebugBreak();
         PrintF("regaining control from gdb\n");
       } else if (strcmp(cmd, "break") == 0) {
-        if (args == 2) {
+        if (argc == 2) {
           int32_t value;
           if (GetValue(arg1, &value)) {
             if (!SetBreakpoint(reinterpret_cast<Instr*>(value))) {
@@ -371,6 +437,10 @@
         PrintF("  print an object from a register (alias 'po')\n");
         PrintF("flags\n");
         PrintF("  print flags\n");
+        PrintF("stack [<words>]\n");
+        PrintF("  dump stack content, default dump 10 words)\n");
+        PrintF("mem <address> [<words>]\n");
+        PrintF("  dump memory content, default dump 10 words)\n");
         PrintF("disasm [<instructions>]\n");
         PrintF("disasm [[<address>] <instructions>]\n");
         PrintF("  disassemble code, default is 10 instructions from pc\n");
@@ -384,7 +454,7 @@
         PrintF("  ignore the stop instruction at the current location");
         PrintF("  from now on\n");
         PrintF("trace (alias 't')\n");
-        PrintF("  toogle the tracing of all executed statements");
+        PrintF("  toogle the tracing of all executed statements\n");
       } else {
         PrintF("Unknown command: %s\n", cmd);
       }
@@ -404,6 +474,94 @@
 }
 
 
+static bool ICacheMatch(void* one, void* two) {
+  ASSERT((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
+  ASSERT((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
+  return one == two;
+}
+
+
+static uint32_t ICacheHash(void* key) {
+  return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)) >> 2;
+}
+
+
+static bool AllOnOnePage(uintptr_t start, int size) {
+  intptr_t start_page = (start & ~CachePage::kPageMask);
+  intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+  return start_page == end_page;
+}
+
+
+void Simulator::FlushICache(void* start_addr, size_t size) {
+  intptr_t start = reinterpret_cast<intptr_t>(start_addr);
+  int intra_line = (start & CachePage::kLineMask);
+  start -= intra_line;
+  size += intra_line;
+  size = ((size - 1) | CachePage::kLineMask) + 1;
+  int offset = (start & CachePage::kPageMask);
+  while (!AllOnOnePage(start, size - 1)) {
+    int bytes_to_flush = CachePage::kPageSize - offset;
+    FlushOnePage(start, bytes_to_flush);
+    start += bytes_to_flush;
+    size -= bytes_to_flush;
+    ASSERT_EQ(0, start & CachePage::kPageMask);
+    offset = 0;
+  }
+  if (size != 0) {
+    FlushOnePage(start, size);
+  }
+}
+
+
+CachePage* Simulator::GetCachePage(void* page) {
+  v8::internal::HashMap::Entry* entry = i_cache_->Lookup(page,
+                                                         ICacheHash(page),
+                                                         true);
+  if (entry->value == NULL) {
+    CachePage* new_page = new CachePage();
+    entry->value = new_page;
+  }
+  return reinterpret_cast<CachePage*>(entry->value);
+}
+
+
+// Flush from start up to and not including start + size.
+void Simulator::FlushOnePage(intptr_t start, int size) {
+  ASSERT(size <= CachePage::kPageSize);
+  ASSERT(AllOnOnePage(start, size - 1));
+  ASSERT((start & CachePage::kLineMask) == 0);
+  ASSERT((size & CachePage::kLineMask) == 0);
+  void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+  int offset = (start & CachePage::kPageMask);
+  CachePage* cache_page = GetCachePage(page);
+  char* valid_bytemap = cache_page->ValidityByte(offset);
+  memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+
+void Simulator::CheckICache(Instr* instr) {
+  intptr_t address = reinterpret_cast<intptr_t>(instr);
+  void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+  void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+  int offset = (address & CachePage::kPageMask);
+  CachePage* cache_page = GetCachePage(page);
+  char* cache_valid_byte = cache_page->ValidityByte(offset);
+  bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+  char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
+  if (cache_hit) {
+    // Check that the data in memory matches the contents of the I-cache.
+    CHECK(memcmp(reinterpret_cast<void*>(instr),
+                 cache_page->CachedData(offset),
+                 Instr::kInstrSize) == 0);
+  } else {
+    // Cache miss.  Load memory into the cache.
+    memcpy(cached_line, line, CachePage::kLineLength);
+    *cache_valid_byte = CachePage::LINE_VALID;
+  }
+}
+
+
 // Create one simulator per thread and keep it in thread local storage.
 static v8::internal::Thread::LocalStorageKey simulator_key;
 
@@ -419,7 +577,13 @@
 }
 
 
+v8::internal::HashMap* Simulator::i_cache_ = NULL;
+
+
 Simulator::Simulator() {
+  if (i_cache_ == NULL) {
+    i_cache_ = new v8::internal::HashMap(&ICacheMatch);
+  }
   Initialize();
   // Setup simulator support first. Some of this information is needed to
   // setup the architecture state.
@@ -484,6 +648,9 @@
         swi_instruction_((AL << 28) | (0xf << 24) | call_rt_redirected),
         fp_return_(fp_return),
         next_(list_) {
+    Simulator::current()->
+        FlushICache(reinterpret_cast<void*>(&swi_instruction_),
+                      Instr::kInstrSize);
     list_ = this;
   }
 
@@ -1179,6 +1346,11 @@
   int swi = instr->SwiField();
   switch (swi) {
     case call_rt_redirected: {
+      // Check if stack is aligned. Error if not aligned is reported below to
+      // include information on the function called.
+      bool stack_aligned =
+          (get_register(sp)
+           & (::v8::internal::FLAG_sim_stack_alignment - 1)) == 0;
       Redirection* redirection = Redirection::FromSwiInstruction(instr);
       int32_t arg0 = get_register(r0);
       int32_t arg1 = get_register(r1);
@@ -1192,12 +1364,17 @@
             reinterpret_cast<intptr_t>(redirection->external_function());
         SimulatorRuntimeFPCall target =
             reinterpret_cast<SimulatorRuntimeFPCall>(external);
-        if (::v8::internal::FLAG_trace_sim) {
+        if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
           double x, y;
           GetFpArgs(&x, &y);
-          PrintF("Call to host function at %p with args %f, %f\n",
+          PrintF("Call to host function at %p with args %f, %f",
                  FUNCTION_ADDR(target), x, y);
+          if (!stack_aligned) {
+            PrintF(" with unaligned stack %08x\n", get_register(sp));
+          }
+          PrintF("\n");
         }
+        CHECK(stack_aligned);
         double result = target(arg0, arg1, arg2, arg3);
         SetFpResult(result);
       } else {
@@ -1205,15 +1382,20 @@
             reinterpret_cast<int32_t>(redirection->external_function());
         SimulatorRuntimeCall target =
             reinterpret_cast<SimulatorRuntimeCall>(external);
-        if (::v8::internal::FLAG_trace_sim) {
+        if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
           PrintF(
-              "Call to host function at %p with args %08x, %08x, %08x, %08x\n",
+              "Call to host function at %p with args %08x, %08x, %08x, %08x",
               FUNCTION_ADDR(target),
               arg0,
               arg1,
               arg2,
               arg3);
+          if (!stack_aligned) {
+            PrintF(" with unaligned stack %08x\n", get_register(sp));
+          }
+          PrintF("\n");
         }
+        CHECK(stack_aligned);
         int64_t result = target(arg0, arg1, arg2, arg3);
         int32_t lo_res = static_cast<int32_t>(result);
         int32_t hi_res = static_cast<int32_t>(result >> 32);
@@ -1435,6 +1617,50 @@
       }
       return;
     }
+  } else if ((type == 0) && instr->IsMiscType0()) {
+    if (instr->Bits(22, 21) == 1) {
+      int rm = instr->RmField();
+      switch (instr->Bits(7, 4)) {
+        case BX:
+          set_pc(get_register(rm));
+          break;
+        case BLX: {
+          uint32_t old_pc = get_pc();
+          set_pc(get_register(rm));
+          set_register(lr, old_pc + Instr::kInstrSize);
+          break;
+        }
+        case BKPT:
+          v8::internal::OS::DebugBreak();
+          break;
+        default:
+          UNIMPLEMENTED();
+      }
+    } else if (instr->Bits(22, 21) == 3) {
+      int rm = instr->RmField();
+      int rd = instr->RdField();
+      switch (instr->Bits(7, 4)) {
+        case CLZ: {
+          uint32_t bits = get_register(rm);
+          int leading_zeros = 0;
+          if (bits == 0) {
+            leading_zeros = 32;
+          } else {
+            while ((bits & 0x80000000u) == 0) {
+              bits <<= 1;
+              leading_zeros++;
+            }
+          }
+          set_register(rd, leading_zeros);
+          break;
+        }
+        default:
+          UNIMPLEMENTED();
+      }
+    } else {
+      PrintF("%08x\n", instr->InstructionBits());
+      UNIMPLEMENTED();
+    }
   } else {
     int rd = instr->RdField();
     int rn = instr->RnField();
@@ -1552,21 +1778,9 @@
           SetNZFlags(alu_out);
           SetCFlag(shifter_carry_out);
         } else {
-          ASSERT(type == 0);
-          int rm = instr->RmField();
-          switch (instr->Bits(7, 4)) {
-            case BX:
-              set_pc(get_register(rm));
-              break;
-            case BLX: {
-              uint32_t old_pc = get_pc();
-              set_pc(get_register(rm));
-              set_register(lr, old_pc + Instr::kInstrSize);
-              break;
-            }
-            default:
-              UNIMPLEMENTED();
-          }
+          // Other instructions matching this pattern are handled in the
+          // miscellaneous instructions part above.
+          UNREACHABLE();
         }
         break;
       }
@@ -1594,27 +1808,9 @@
           SetCFlag(!CarryFrom(rn_val, shifter_operand));
           SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
         } else {
-          ASSERT(type == 0);
-          int rm = instr->RmField();
-          int rd = instr->RdField();
-          switch (instr->Bits(7, 4)) {
-            case CLZ: {
-              uint32_t bits = get_register(rm);
-              int leading_zeros = 0;
-              if (bits == 0) {
-                leading_zeros = 32;
-              } else {
-                while ((bits & 0x80000000u) == 0) {
-                  bits <<= 1;
-                  leading_zeros++;
-                }
-              }
-              set_register(rd, leading_zeros);
-              break;
-            }
-            default:
-              UNIMPLEMENTED();
-          }
+          // Other instructions matching this pattern are handled in the
+          // miscellaneous instructions part above.
+          UNREACHABLE();
         }
         break;
       }
@@ -1768,6 +1964,7 @@
       break;
     }
     case 3: {
+      // UBFX.
       if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
         uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
         uint32_t lsbit = static_cast<uint32_t>(instr->ShiftAmountField());
@@ -1919,6 +2116,13 @@
 }
 
 
+// Depending on value of last_bit flag glue register code from vm and m values
+// (where m is expected to be a single bit).
+static int GlueRegCode(bool last_bit, int vm, int m) {
+  return last_bit ? ((vm << 1) | m) : ((m << 4) | vm);
+}
+
+
 // void Simulator::DecodeTypeVFP(Instr* instr)
 // The Following ARMv7 VFPv instructions are currently supported.
 // vmov :Sn = Rt
@@ -1933,114 +2137,212 @@
 // VMRS
 void Simulator::DecodeTypeVFP(Instr* instr) {
   ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
+  ASSERT(instr->Bits(11, 9) == 0x5);
 
-  int rt = instr->RtField();
   int vm = instr->VmField();
-  int vn = instr->VnField();
   int vd = instr->VdField();
+  int vn = instr->VnField();
 
-  if (instr->Bit(23) == 1) {
-    if ((instr->Bits(21, 19) == 0x7) &&
-        (instr->Bits(18, 16) == 0x5) &&
-        (instr->Bits(11, 9) == 0x5) &&
-        (instr->Bit(8) == 1) &&
-        (instr->Bit(6) == 1) &&
-        (instr->Bit(4) == 0)) {
-      double dm_val = get_double_from_d_register(vm);
-      int32_t int_value = static_cast<int32_t>(dm_val);
-      set_s_register_from_sinteger(((vd<<1) | instr->DField()), int_value);
-    } else if ((instr->Bits(21, 19) == 0x7) &&
-               (instr->Bits(18, 16) == 0x0) &&
-               (instr->Bits(11, 9) == 0x5) &&
-               (instr->Bit(8) == 1) &&
-               (instr->Bit(7) == 1) &&
-               (instr->Bit(6) == 1) &&
-               (instr->Bit(4) == 0)) {
-      int32_t int_value = get_sinteger_from_s_register(((vm<<1) |
-                                                       instr->MField()));
-      double dbl_value = static_cast<double>(int_value);
-      set_d_register_from_double(vd, dbl_value);
-    } else if ((instr->Bit(21) == 0x0) &&
-               (instr->Bit(20) == 0x0) &&
-               (instr->Bits(11, 9) == 0x5) &&
-               (instr->Bit(8) == 1) &&
-               (instr->Bit(6) == 0) &&
-               (instr->Bit(4) == 0)) {
+  if (instr->Bit(4) == 0) {
+    if (instr->Opc1Field() == 0x7) {
+      // Other data processing instructions
+      if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
+        DecodeVCVTBetweenDoubleAndSingle(instr);
+      } else if ((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) {
+        DecodeVCVTBetweenFloatingPointAndInteger(instr);
+      } else if (((instr->Opc2Field() >> 1) == 0x6) &&
+                 (instr->Opc3Field() & 0x1)) {
+        DecodeVCVTBetweenFloatingPointAndInteger(instr);
+      } else if (((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
+                 (instr->Opc3Field() & 0x1)) {
+        DecodeVCMP(instr);
+      } else {
+        UNREACHABLE();  // Not used by V8.
+      }
+    } else if (instr->Opc1Field() == 0x3) {
+      if (instr->SzField() != 0x1) {
+        UNREACHABLE();  // Not used by V8.
+      }
+
+      if (instr->Opc3Field() & 0x1) {
+        // vsub
+        double dn_value = get_double_from_d_register(vn);
+        double dm_value = get_double_from_d_register(vm);
+        double dd_value = dn_value - dm_value;
+        set_d_register_from_double(vd, dd_value);
+      } else {
+        // vadd
+        double dn_value = get_double_from_d_register(vn);
+        double dm_value = get_double_from_d_register(vm);
+        double dd_value = dn_value + dm_value;
+        set_d_register_from_double(vd, dd_value);
+      }
+    } else if ((instr->Opc1Field() == 0x2) && !(instr->Opc3Field() & 0x1)) {
+      // vmul
+      if (instr->SzField() != 0x1) {
+        UNREACHABLE();  // Not used by V8.
+      }
+
+      double dn_value = get_double_from_d_register(vn);
+      double dm_value = get_double_from_d_register(vm);
+      double dd_value = dn_value * dm_value;
+      set_d_register_from_double(vd, dd_value);
+    } else if ((instr->Opc1Field() == 0x4) && !(instr->Opc3Field() & 0x1)) {
+      // vdiv
+      if (instr->SzField() != 0x1) {
+        UNREACHABLE();  // Not used by V8.
+      }
+
       double dn_value = get_double_from_d_register(vn);
       double dm_value = get_double_from_d_register(vm);
       double dd_value = dn_value / dm_value;
       set_d_register_from_double(vd, dd_value);
-    } else if ((instr->Bits(21, 20) == 0x3) &&
-               (instr->Bits(19, 16) == 0x4) &&
-               (instr->Bits(11, 9) == 0x5) &&
-               (instr->Bit(8) == 0x1) &&
-               (instr->Bit(6) == 0x1) &&
-               (instr->Bit(4) == 0x0)) {
-      double dd_value = get_double_from_d_register(vd);
-      double dm_value = get_double_from_d_register(vm);
-      Compute_FPSCR_Flags(dd_value, dm_value);
-    } else if ((instr->Bits(23, 20) == 0xF) &&
-               (instr->Bits(19, 16) == 0x1) &&
-               (instr->Bits(11, 8) == 0xA) &&
-               (instr->Bits(7, 5) == 0x0) &&
-               (instr->Bit(4) == 0x1)    &&
-               (instr->Bits(3, 0) == 0x0)) {
-      if (instr->Bits(15, 12) == 0xF)
+    } else {
+      UNIMPLEMENTED();  // Not used by V8.
+    }
+  } else {
+    if ((instr->VCField() == 0x0) &&
+        (instr->VAField() == 0x0)) {
+      DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
+    } else if ((instr->VLField() == 0x1) &&
+               (instr->VCField() == 0x0) &&
+               (instr->VAField() == 0x7) &&
+               (instr->Bits(19, 16) == 0x1)) {
+      // vmrs
+      if (instr->RtField() == 0xF)
         Copy_FPSCR_to_APSR();
       else
         UNIMPLEMENTED();  // Not used by V8.
     } else {
       UNIMPLEMENTED();  // Not used by V8.
     }
-  } else if (instr->Bit(21) == 1) {
-    if ((instr->Bit(20) == 0x1) &&
-        (instr->Bits(11, 9) == 0x5) &&
-        (instr->Bit(8) == 0x1) &&
-        (instr->Bit(6) == 0) &&
-        (instr->Bit(4) == 0)) {
-      double dn_value = get_double_from_d_register(vn);
-      double dm_value = get_double_from_d_register(vm);
-      double dd_value = dn_value + dm_value;
-      set_d_register_from_double(vd, dd_value);
-    } else if ((instr->Bit(20) == 0x1) &&
-               (instr->Bits(11, 9) == 0x5) &&
-               (instr->Bit(8) == 0x1) &&
-               (instr->Bit(6) == 1) &&
-               (instr->Bit(4) == 0)) {
-      double dn_value = get_double_from_d_register(vn);
-      double dm_value = get_double_from_d_register(vm);
-      double dd_value = dn_value - dm_value;
-      set_d_register_from_double(vd, dd_value);
-    } else if ((instr->Bit(20) == 0x0) &&
-               (instr->Bits(11, 9) == 0x5) &&
-               (instr->Bit(8) == 0x1) &&
-               (instr->Bit(6) == 0) &&
-               (instr->Bit(4) == 0)) {
-      double dn_value = get_double_from_d_register(vn);
-      double dm_value = get_double_from_d_register(vm);
-      double dd_value = dn_value * dm_value;
-      set_d_register_from_double(vd, dd_value);
-    } else {
+  }
+}
+
+
+void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr) {
+  ASSERT((instr->Bit(4) == 1) && (instr->VCField() == 0x0) &&
+         (instr->VAField() == 0x0));
+
+  int t = instr->RtField();
+  int n  = GlueRegCode(true, instr->VnField(), instr->NField());
+  bool to_arm_register = (instr->VLField() == 0x1);
+
+  if (to_arm_register) {
+    int32_t int_value = get_sinteger_from_s_register(n);
+    set_register(t, int_value);
+  } else {
+    int32_t rs_val = get_register(t);
+    set_s_register_from_sinteger(n, rs_val);
+  }
+}
+
+
+void Simulator::DecodeVCMP(Instr* instr) {
+  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
+  ASSERT(((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
+         (instr->Opc3Field() & 0x1));
+
+  // Comparison.
+  bool dp_operation = (instr->SzField() == 1);
+
+  if (instr->Bit(7) != 0) {
+    // Raising exceptions for quiet NaNs are not supported.
+    UNIMPLEMENTED();  // Not used by V8.
+  }
+
+  int d = GlueRegCode(!dp_operation, instr->VdField(), instr->DField());
+  int m = GlueRegCode(!dp_operation, instr->VmField(), instr->MField());
+
+  if (dp_operation) {
+    double dd_value = get_double_from_d_register(d);
+    double dm_value = get_double_from_d_register(m);
+
+    Compute_FPSCR_Flags(dd_value, dm_value);
+  } else {
+    UNIMPLEMENTED();  // Not used by V8.
+  }
+}
+
+
+void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instr* instr) {
+  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
+  ASSERT((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3));
+
+  bool double_to_single = (instr->SzField() == 1);
+  int dst = GlueRegCode(double_to_single, instr->VdField(), instr->DField());
+  int src = GlueRegCode(!double_to_single, instr->VmField(), instr->MField());
+
+  if (double_to_single) {
+    double val = get_double_from_d_register(src);
+    set_s_register_from_float(dst, static_cast<float>(val));
+  } else {
+    float val = get_float_from_s_register(src);
+    set_d_register_from_double(dst, static_cast<double>(val));
+  }
+}
+
+
+void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
+  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
+  ASSERT(((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) ||
+         (((instr->Opc2Field() >> 1) == 0x6) && (instr->Opc3Field() & 0x1)));
+
+  // Conversion between floating-point and integer.
+  int vd = instr->VdField();
+  int d = instr->DField();
+  int vm = instr->VmField();
+  int m = instr->MField();
+
+  bool to_integer = (instr->Bit(18) == 1);
+  bool dp_operation = (instr->SzField() == 1);
+  if (to_integer) {
+    bool unsigned_integer = (instr->Bit(16) == 0);
+    if (instr->Bit(7) != 1) {
+      // Only rounding towards zero supported.
       UNIMPLEMENTED();  // Not used by V8.
     }
-  } else {
-    if ((instr->Bit(20) == 0x0) &&
-        (instr->Bits(11, 8) == 0xA) &&
-        (instr->Bits(6, 5) == 0x0) &&
-        (instr->Bit(4) == 1) &&
-        (instr->Bits(3, 0) == 0x0)) {
-      int32_t rs_val = get_register(rt);
-      set_s_register_from_sinteger(((vn<<1) | instr->NField()), rs_val);
-    } else if ((instr->Bit(20) == 0x1) &&
-               (instr->Bits(11, 8) == 0xA) &&
-               (instr->Bits(6, 5) == 0x0) &&
-               (instr->Bit(4) == 1) &&
-               (instr->Bits(3, 0) == 0x0)) {
-      int32_t int_value = get_sinteger_from_s_register(((vn<<1) |
-                                                       instr->NField()));
-      set_register(rt, int_value);
+
+    int dst = GlueRegCode(true, vd, d);
+    int src = GlueRegCode(!dp_operation, vm, m);
+
+    if (dp_operation) {
+      double val = get_double_from_d_register(src);
+
+      int sint = unsigned_integer ? static_cast<uint32_t>(val) :
+                                    static_cast<int32_t>(val);
+
+      set_s_register_from_sinteger(dst, sint);
     } else {
-      UNIMPLEMENTED();  // Not used by V8.
+      float val = get_float_from_s_register(src);
+
+      int sint = unsigned_integer ? static_cast<uint32_t>(val) :
+                                      static_cast<int32_t>(val);
+
+      set_s_register_from_sinteger(dst, sint);
+    }
+  } else {
+    bool unsigned_integer = (instr->Bit(7) == 0);
+
+    int dst = GlueRegCode(!dp_operation, vd, d);
+    int src = GlueRegCode(true, vm, m);
+
+    int val = get_sinteger_from_s_register(src);
+
+    if (dp_operation) {
+      if (unsigned_integer) {
+        set_d_register_from_double(dst,
+                                   static_cast<double>((uint32_t)val));
+      } else {
+        set_d_register_from_double(dst, static_cast<double>(val));
+      }
+    } else {
+      if (unsigned_integer) {
+        set_s_register_from_float(dst,
+                                  static_cast<float>((uint32_t)val));
+      } else {
+        set_s_register_from_float(dst, static_cast<float>(val));
+      }
     }
   }
 }
@@ -2055,9 +2357,32 @@
 void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
   ASSERT((instr->TypeField() == 6));
 
-  if (instr->CoprocessorField() != 0xB) {
-    UNIMPLEMENTED();  // Not used by V8.
-  } else {
+  if (instr->CoprocessorField() == 0xA) {
+    switch (instr->OpcodeField()) {
+      case 0x8:
+      case 0xC: {  // Load and store float to memory.
+        int rn = instr->RnField();
+        int vd = instr->VdField();
+        int offset = instr->Immed8Field();
+        if (!instr->HasU()) {
+          offset = -offset;
+        }
+
+        int32_t address = get_register(rn) + 4 * offset;
+        if (instr->HasL()) {
+          // Load double from memory: vldr.
+          set_s_register_from_sinteger(vd, ReadW(address, instr));
+        } else {
+          // Store double to memory: vstr.
+          WriteW(address, get_sinteger_from_s_register(vd), instr);
+        }
+        break;
+      }
+      default:
+        UNIMPLEMENTED();  // Not used by V8.
+        break;
+    }
+  } else if (instr->CoprocessorField() == 0xB) {
     switch (instr->OpcodeField()) {
       case 0x2:
         // Load and store double to two GP registers
@@ -2106,12 +2431,17 @@
         UNIMPLEMENTED();  // Not used by V8.
         break;
     }
+  } else {
+    UNIMPLEMENTED();  // Not used by V8.
   }
 }
 
 
 // Executes the current instruction.
 void Simulator::InstructionDecode(Instr* instr) {
+  if (v8::internal::FLAG_check_icache) {
+    CheckICache(instr);
+  }
   pc_modified_ = false;
   if (::v8::internal::FLAG_trace_sim) {
     disasm::NameConverter converter;
@@ -2306,7 +2636,6 @@
   return address;
 }
 
-
 } }  // namespace assembler::arm
 
-#endif  // !defined(__arm__)
+#endif  // __arm__
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 1973730..91614ea 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -89,11 +89,43 @@
 
 
 #include "constants-arm.h"
+#include "hashmap.h"
 
 
 namespace assembler {
 namespace arm {
 
+class CachePage {
+ public:
+  static const int LINE_VALID = 0;
+  static const int LINE_INVALID = 1;
+
+  static const int kPageShift = 12;
+  static const int kPageSize = 1 << kPageShift;
+  static const int kPageMask = kPageSize - 1;
+  static const int kLineShift = 2;  // The cache line is only 4 bytes right now.
+  static const int kLineLength = 1 << kLineShift;
+  static const int kLineMask = kLineLength - 1;
+
+  CachePage() {
+    memset(&validity_map_, LINE_INVALID, sizeof(validity_map_));
+  }
+
+  char* ValidityByte(int offset) {
+    return &validity_map_[offset >> kLineShift];
+  }
+
+  char* CachedData(int offset) {
+    return &data_[offset];
+  }
+
+ private:
+  char data_[kPageSize];   // The cached data.
+  static const int kValidityMapSize = kPageSize >> kLineShift;
+  char validity_map_[kValidityMapSize];  // One byte per line.
+};
+
+
 class Simulator {
  public:
   friend class Debugger;
@@ -162,6 +194,9 @@
   // Pop an address from the JS stack.
   uintptr_t PopAddress();
 
+  // ICache checking.
+  static void FlushICache(void* start, size_t size);
+
  private:
   enum special_values {
     // Known bad pc value to ensure that the simulator does not execute
@@ -231,9 +266,19 @@
   void DecodeTypeVFP(Instr* instr);
   void DecodeType6CoprocessorIns(Instr* instr);
 
+  void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr);
+  void DecodeVCMP(Instr* instr);
+  void DecodeVCVTBetweenDoubleAndSingle(Instr* instr);
+  void DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr);
+
   // Executes one instruction.
   void InstructionDecode(Instr* instr);
 
+  // ICache.
+  static void CheckICache(Instr* instr);
+  static void FlushOnePage(intptr_t start, int size);
+  static CachePage* GetCachePage(void* page);
+
   // Runtime call support.
   static void* RedirectExternalReference(void* external_function,
                                          bool fp_return);
@@ -271,6 +316,9 @@
   int icount_;
   static bool initialized_;
 
+  // Icache simulation
+  static v8::internal::HashMap* i_cache_;
+
   // Registered breakpoints.
   Instr* break_pc_;
   instr_t break_instr_;
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index da73942..095631d 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -53,7 +53,7 @@
   // Check that the key in the entry matches the name.
   __ mov(ip, Operand(key_offset));
   __ ldr(ip, MemOperand(ip, offset, LSL, 1));
-  __ cmp(name, Operand(ip));
+  __ cmp(name, ip);
   __ b(ne, &miss);
 
   // Get the code entry from the cache.
@@ -229,7 +229,6 @@
 
   // Load length directly from the string.
   __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
-  __ mov(r0, Operand(r0, LSL, kSmiTagSize));
   __ Ret();
 
   // Check if the object is a JSValue wrapper.
@@ -241,7 +240,6 @@
   __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
   GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
   __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
-  __ mov(r0, Operand(r0, LSL, kSmiTagSize));
   __ Ret();
 }
 
@@ -296,8 +294,8 @@
     // We jump to a runtime call that extends the properties array.
     __ push(receiver_reg);
     __ mov(r2, Operand(Handle<Map>(transition)));
-    __ stm(db_w, sp, r2.bit() | r0.bit());
-    __ TailCallRuntime(
+    __ Push(r2, r0);
+    __ TailCallExternalReference(
            ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage)),
            3, 1);
     return;
@@ -396,15 +394,14 @@
                                      Register holder,
                                      Register name,
                                      JSObject* holder_obj) {
-  __ push(receiver);
-  __ push(holder);
   __ push(name);
   InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
   ASSERT(!Heap::InNewSpace(interceptor));
-
-  Register scratch = receiver;
+  Register scratch = name;
   __ mov(scratch, Operand(Handle<Object>(interceptor)));
   __ push(scratch);
+  __ push(receiver);
+  __ push(holder);
   __ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
   __ push(scratch);
 }
@@ -465,8 +462,7 @@
     __ EnterInternalFrame();
 
     __ push(receiver);
-    __ push(holder);
-    __ push(name_);
+    __ Push(holder, name_);
 
     CompileCallLoadPropertyWithInterceptor(masm,
                                            receiver,
@@ -511,8 +507,7 @@
 
       Label cleanup;
       __ pop(scratch2);
-      __ push(receiver);
-      __ push(scratch2);
+      __ Push(receiver, scratch2);
 
       holder = stub_compiler->CheckPrototypes(holder_obj, holder,
                                               lookup->holder(), scratch1,
@@ -524,12 +519,11 @@
       __ Move(holder, Handle<AccessorInfo>(callback));
       __ push(holder);
       __ ldr(scratch1, FieldMemOperand(holder, AccessorInfo::kDataOffset));
-      __ push(scratch1);
-      __ push(name_);
+      __ Push(scratch1, name_);
 
       ExternalReference ref =
           ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
-      __ TailCallRuntime(ref, 5, 1);
+      __ TailCallExternalReference(ref, 5, 1);
 
       __ bind(&cleanup);
       __ pop(scratch1);
@@ -549,7 +543,7 @@
 
     ExternalReference ref = ExternalReference(
         IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
-    __ TailCallRuntime(ref, 5, 1);
+    __ TailCallExternalReference(ref, 5, 1);
   }
 
  private:
@@ -601,6 +595,280 @@
 }
 
 
+// Reserves space for the extra arguments to FastHandleApiCall in the
+// caller's frame.
+//
+// These arguments are set by CheckPrototypes and GenerateFastApiCall.
+static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
+                                       Register scratch) {
+  __ mov(scratch, Operand(Smi::FromInt(0)));
+  __ push(scratch);
+  __ push(scratch);
+  __ push(scratch);
+  __ push(scratch);
+}
+
+
+// Undoes the effects of ReserveSpaceForFastApiCall.
+static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
+  __ Drop(4);
+}
+
+
+// Generates call to FastHandleApiCall builtin.
+static void GenerateFastApiCall(MacroAssembler* masm,
+                                const CallOptimization& optimization,
+                                int argc) {
+  // Get the function and setup the context.
+  JSFunction* function = optimization.constant_function();
+  __ mov(r7, Operand(Handle<JSFunction>(function)));
+  __ ldr(cp, FieldMemOperand(r7, JSFunction::kContextOffset));
+
+  // Pass the additional arguments FastHandleApiCall expects.
+  bool info_loaded = false;
+  Object* callback = optimization.api_call_info()->callback();
+  if (Heap::InNewSpace(callback)) {
+    info_loaded = true;
+    __ Move(r0, Handle<CallHandlerInfo>(optimization.api_call_info()));
+    __ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kCallbackOffset));
+  } else {
+    __ Move(r6, Handle<Object>(callback));
+  }
+  Object* call_data = optimization.api_call_info()->data();
+  if (Heap::InNewSpace(call_data)) {
+    if (!info_loaded) {
+      __ Move(r0, Handle<CallHandlerInfo>(optimization.api_call_info()));
+    }
+    __ ldr(r5, FieldMemOperand(r0, CallHandlerInfo::kDataOffset));
+  } else {
+    __ Move(r5, Handle<Object>(call_data));
+  }
+
+  __ add(sp, sp, Operand(1 * kPointerSize));
+  __ stm(ia, sp, r5.bit() | r6.bit() | r7.bit());
+  __ sub(sp, sp, Operand(1 * kPointerSize));
+
+  // Set the number of arguments.
+  __ mov(r0, Operand(argc + 4));
+
+  // Jump to the fast api call builtin (tail call).
+  Handle<Code> code = Handle<Code>(
+      Builtins::builtin(Builtins::FastHandleApiCall));
+  ParameterCount expected(0);
+  __ InvokeCode(code, expected, expected,
+                RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+}
+
+
+class CallInterceptorCompiler BASE_EMBEDDED {
+ public:
+  CallInterceptorCompiler(StubCompiler* stub_compiler,
+                          const ParameterCount& arguments,
+                          Register name)
+      : stub_compiler_(stub_compiler),
+        arguments_(arguments),
+        name_(name) {}
+
+  void Compile(MacroAssembler* masm,
+               JSObject* object,
+               JSObject* holder,
+               String* name,
+               LookupResult* lookup,
+               Register receiver,
+               Register scratch1,
+               Register scratch2,
+               Label* miss) {
+    ASSERT(holder->HasNamedInterceptor());
+    ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+    // Check that the receiver isn't a smi.
+    __ BranchOnSmi(receiver, miss);
+
+    CallOptimization optimization(lookup);
+
+    if (optimization.is_constant_call()) {
+      CompileCacheable(masm,
+                       object,
+                       receiver,
+                       scratch1,
+                       scratch2,
+                       holder,
+                       lookup,
+                       name,
+                       optimization,
+                       miss);
+    } else {
+      CompileRegular(masm,
+                     object,
+                     receiver,
+                     scratch1,
+                     scratch2,
+                     name,
+                     holder,
+                     miss);
+    }
+  }
+
+ private:
+  void CompileCacheable(MacroAssembler* masm,
+                       JSObject* object,
+                       Register receiver,
+                       Register scratch1,
+                       Register scratch2,
+                       JSObject* holder_obj,
+                       LookupResult* lookup,
+                       String* name,
+                       const CallOptimization& optimization,
+                       Label* miss_label) {
+    ASSERT(optimization.is_constant_call());
+    ASSERT(!lookup->holder()->IsGlobalObject());
+
+    int depth1 = kInvalidProtoDepth;
+    int depth2 = kInvalidProtoDepth;
+    bool can_do_fast_api_call = false;
+    if (optimization.is_simple_api_call() &&
+       !lookup->holder()->IsGlobalObject()) {
+     depth1 = optimization.GetPrototypeDepthOfExpectedType(object, holder_obj);
+     if (depth1 == kInvalidProtoDepth) {
+       depth2 = optimization.GetPrototypeDepthOfExpectedType(holder_obj,
+                                                             lookup->holder());
+     }
+     can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
+                            (depth2 != kInvalidProtoDepth);
+    }
+
+    __ IncrementCounter(&Counters::call_const_interceptor, 1,
+                      scratch1, scratch2);
+
+    if (can_do_fast_api_call) {
+      __ IncrementCounter(&Counters::call_const_interceptor_fast_api, 1,
+                          scratch1, scratch2);
+      ReserveSpaceForFastApiCall(masm, scratch1);
+    }
+
+    Label miss_cleanup;
+    Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
+    Register holder =
+        stub_compiler_->CheckPrototypes(object, receiver, holder_obj, scratch1,
+                                        scratch2, name, depth1, miss);
+
+    Label regular_invoke;
+    LoadWithInterceptor(masm, receiver, holder, holder_obj, scratch2,
+                        &regular_invoke);
+
+    // Generate code for the failed interceptor case.
+
+    // Check the lookup is still valid.
+    stub_compiler_->CheckPrototypes(holder_obj, receiver,
+                                    lookup->holder(), scratch1,
+                                    scratch2, name, depth2, miss);
+
+    if (can_do_fast_api_call) {
+      GenerateFastApiCall(masm, optimization, arguments_.immediate());
+    } else {
+      __ InvokeFunction(optimization.constant_function(), arguments_,
+                        JUMP_FUNCTION);
+    }
+
+    if (can_do_fast_api_call) {
+      __ bind(&miss_cleanup);
+      FreeSpaceForFastApiCall(masm);
+      __ b(miss_label);
+    }
+
+    __ bind(&regular_invoke);
+    if (can_do_fast_api_call) {
+      FreeSpaceForFastApiCall(masm);
+    }
+  }
+
+  void CompileRegular(MacroAssembler* masm,
+                      JSObject* object,
+                      Register receiver,
+                      Register scratch1,
+                      Register scratch2,
+                      String* name,
+                      JSObject* holder_obj,
+                      Label* miss_label) {
+    Register holder =
+        stub_compiler_->CheckPrototypes(object, receiver, holder_obj,
+                                        scratch1, scratch2, name,
+                                        miss_label);
+
+    // Call a runtime function to load the interceptor property.
+    __ EnterInternalFrame();
+    // Save the name_ register across the call.
+    __ push(name_);
+
+    PushInterceptorArguments(masm,
+                             receiver,
+                             holder,
+                             name_,
+                             holder_obj);
+
+    __ CallExternalReference(
+          ExternalReference(
+              IC_Utility(IC::kLoadPropertyWithInterceptorForCall)),
+          5);
+
+    // Restore the name_ register.
+    __ pop(name_);
+    __ LeaveInternalFrame();
+  }
+
+  void LoadWithInterceptor(MacroAssembler* masm,
+                           Register receiver,
+                           Register holder,
+                           JSObject* holder_obj,
+                           Register scratch,
+                           Label* interceptor_succeeded) {
+    __ EnterInternalFrame();
+    __ Push(holder, name_);
+
+    CompileCallLoadPropertyWithInterceptor(masm,
+                                           receiver,
+                                           holder,
+                                           name_,
+                                           holder_obj);
+
+    __ pop(name_);  // Restore the name.
+    __ pop(receiver);  // Restore the holder.
+    __ LeaveInternalFrame();
+
+    // If interceptor returns no-result sentinel, call the constant function.
+    __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
+    __ cmp(r0, scratch);
+    __ b(ne, interceptor_succeeded);
+  }
+
+  StubCompiler* stub_compiler_;
+  const ParameterCount& arguments_;
+  Register name_;
+};
+
+
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+static Object* GenerateCheckPropertyCell(MacroAssembler* masm,
+                                         GlobalObject* global,
+                                         String* name,
+                                         Register scratch,
+                                         Label* miss) {
+  Object* probe = global->EnsurePropertyCell(name);
+  if (probe->IsFailure()) return probe;
+  JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+  ASSERT(cell->value()->IsTheHole());
+  __ mov(scratch, Operand(Handle<Object>(cell)));
+  __ ldr(scratch,
+         FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+  __ cmp(scratch, ip);
+  __ b(ne, miss);
+  return cell;
+}
+
+
 #undef __
 #define __ ACCESS_MASM(masm())
 
@@ -613,31 +881,25 @@
                                        String* name,
                                        int save_at_depth,
                                        Label* miss) {
-  // TODO(602): support object saving.
-  ASSERT(save_at_depth == kInvalidProtoDepth);
-
   // Check that the maps haven't changed.
   Register result =
-      masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch, miss);
+      masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch,
+                        save_at_depth, miss);
 
   // If we've skipped any global objects, it's not enough to verify
-  // that their maps haven't changed.
+  // that their maps haven't changed.  We also need to check that the
+  // property cell for the property is still empty.
   while (object != holder) {
     if (object->IsGlobalObject()) {
-      GlobalObject* global = GlobalObject::cast(object);
-      Object* probe = global->EnsurePropertyCell(name);
-      if (probe->IsFailure()) {
-        set_failure(Failure::cast(probe));
+      Object* cell = GenerateCheckPropertyCell(masm(),
+                                               GlobalObject::cast(object),
+                                               name,
+                                               scratch,
+                                               miss);
+      if (cell->IsFailure()) {
+        set_failure(Failure::cast(cell));
         return result;
       }
-      JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
-      ASSERT(cell->value()->IsTheHole());
-      __ mov(scratch, Operand(Handle<Object>(cell)));
-      __ ldr(scratch,
-             FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
-      __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-      __ cmp(scratch, ip);
-      __ b(ne, miss);
     }
     object = JSObject::cast(object->GetPrototype());
   }
@@ -708,18 +970,16 @@
       CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
 
   // Push the arguments on the JS stack of the caller.
-  __ push(receiver);  // receiver
-  __ push(reg);  // holder
+  __ push(receiver);  // Receiver.
+  __ push(reg);  // Holder.
   __ mov(ip, Operand(Handle<AccessorInfo>(callback)));  // callback data
-  __ push(ip);
   __ ldr(reg, FieldMemOperand(ip, AccessorInfo::kDataOffset));
-  __ push(reg);
-  __ push(name_reg);  // name
+  __ Push(ip, reg, name_reg);
 
   // Do tail-call to the runtime system.
   ExternalReference load_callback_property =
       ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
-  __ TailCallRuntime(load_callback_property, 5, 1);
+  __ TailCallExternalReference(load_callback_property, 5, 1);
 
   return true;
 }
@@ -815,6 +1075,114 @@
 }
 
 
+Object* CallStubCompiler::CompileArrayPushCall(Object* object,
+                                               JSObject* holder,
+                                               JSFunction* function,
+                                               String* name,
+                                               CheckType check) {
+  // ----------- S t a t e -------------
+  //  -- r2    : name
+  //  -- lr    : return address
+  // -----------------------------------
+
+  // If object is not an array, bail out to regular call.
+  if (!object->IsJSArray()) {
+    return Heap::undefined_value();
+  }
+
+  // TODO(639): faster implementation.
+  ASSERT(check == RECEIVER_MAP_CHECK);
+
+  Label miss;
+
+  // Get the receiver from the stack
+  const int argc = arguments().immediate();
+  __ ldr(r1, MemOperand(sp, argc * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ tst(r1, Operand(kSmiTagMask));
+  __ b(eq, &miss);
+
+  // Check that the maps haven't changed.
+  CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, name, &miss);
+
+  if (object->IsGlobalObject()) {
+    __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
+    __ str(r3, MemOperand(sp, argc * kPointerSize));
+  }
+
+  __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush),
+                               argc + 1,
+                               1);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  String* function_name = NULL;
+  if (function->shared()->name()->IsString()) {
+    function_name = String::cast(function->shared()->name());
+  }
+  return GetCode(CONSTANT_FUNCTION, function_name);
+}
+
+
+Object* CallStubCompiler::CompileArrayPopCall(Object* object,
+                                              JSObject* holder,
+                                              JSFunction* function,
+                                              String* name,
+                                              CheckType check) {
+  // ----------- S t a t e -------------
+  //  -- r2    : name
+  //  -- lr    : return address
+  // -----------------------------------
+
+  // If object is not an array, bail out to regular call.
+  if (!object->IsJSArray()) {
+    return Heap::undefined_value();
+  }
+
+  // TODO(642): faster implementation.
+  ASSERT(check == RECEIVER_MAP_CHECK);
+
+  Label miss;
+
+  // Get the receiver from the stack
+  const int argc = arguments().immediate();
+  __ ldr(r1, MemOperand(sp, argc * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ tst(r1, Operand(kSmiTagMask));
+  __ b(eq, &miss);
+
+  // Check that the maps haven't changed.
+  CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, name, &miss);
+
+  if (object->IsGlobalObject()) {
+    __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
+    __ str(r3, MemOperand(sp, argc * kPointerSize));
+  }
+
+  __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop),
+                               argc + 1,
+                               1);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  String* function_name = NULL;
+  if (function->shared()->name()->IsString()) {
+    function_name = String::cast(function->shared()->name());
+  }
+  return GetCode(CONSTANT_FUNCTION, function_name);
+}
+
+
 Object* CallStubCompiler::CompileCallConstant(Object* object,
                                               JSObject* holder,
                                               JSFunction* function,
@@ -824,7 +1192,18 @@
   //  -- r2    : name
   //  -- lr    : return address
   // -----------------------------------
-  Label miss;
+  SharedFunctionInfo* function_info = function->shared();
+  if (function_info->HasCustomCallGenerator()) {
+    CustomCallGenerator generator =
+        ToCData<CustomCallGenerator>(function_info->function_data());
+    Object* result = generator(this, object, holder, function, name, check);
+    // undefined means bail out to regular compiler.
+    if (!result->IsUndefined()) {
+      return result;
+    }
+  }
+
+  Label miss_in_smi_check;
 
   // Get the receiver from the stack
   const int argc = arguments().immediate();
@@ -833,21 +1212,39 @@
   // Check that the receiver isn't a smi.
   if (check != NUMBER_CHECK) {
     __ tst(r1, Operand(kSmiTagMask));
-    __ b(eq, &miss);
+    __ b(eq, &miss_in_smi_check);
   }
 
   // Make sure that it's okay not to patch the on stack receiver
   // unless we're doing a receiver map check.
   ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
 
+  CallOptimization optimization(function);
+  int depth = kInvalidProtoDepth;
+  Label miss;
+
   switch (check) {
     case RECEIVER_MAP_CHECK:
+      __ IncrementCounter(&Counters::call_const, 1, r0, r3);
+
+      if (optimization.is_simple_api_call() && !object->IsGlobalObject()) {
+        depth = optimization.GetPrototypeDepthOfExpectedType(
+            JSObject::cast(object), holder);
+      }
+
+      if (depth != kInvalidProtoDepth) {
+        __ IncrementCounter(&Counters::call_const_fast_api, 1, r0, r3);
+        ReserveSpaceForFastApiCall(masm(), r0);
+      }
+
       // Check that the maps haven't changed.
-      CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, name, &miss);
+      CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, name,
+                      depth, &miss);
 
       // Patch the receiver on the stack with the global proxy if
       // necessary.
       if (object->IsGlobalObject()) {
+        ASSERT(depth == kInvalidProtoDepth);
         __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
         __ str(r3, MemOperand(sp, argc * kPointerSize));
       }
@@ -916,26 +1313,23 @@
       break;
     }
 
-    case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
-      CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, name, &miss);
-      // Make sure object->HasFastElements().
-      // Get the elements array of the object.
-      __ ldr(r3, FieldMemOperand(r1, JSObject::kElementsOffset));
-      // Check that the object is in fast mode (not dictionary).
-      __ ldr(r0, FieldMemOperand(r3, HeapObject::kMapOffset));
-      __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
-      __ cmp(r0, ip);
-      __ b(ne, &miss);
-      break;
-
     default:
       UNREACHABLE();
   }
 
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+  if (depth != kInvalidProtoDepth) {
+    GenerateFastApiCall(masm(), optimization, argc);
+  } else {
+    __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+  }
 
   // Handle call cache miss.
   __ bind(&miss);
+  if (depth != kInvalidProtoDepth) {
+    FreeSpaceForFastApiCall(masm());
+  }
+
+  __ bind(&miss_in_smi_check);
   Handle<Code> ic = ComputeCallMiss(arguments().immediate());
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
@@ -955,14 +1349,8 @@
   //  -- r2    : name
   //  -- lr    : return address
   // -----------------------------------
-  ASSERT(holder->HasNamedInterceptor());
-  ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-  Label miss;
 
-  const Register receiver = r0;
-  const Register holder_reg = r1;
-  const Register name_reg = r2;
-  const Register scratch = r3;
+  Label miss;
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
@@ -970,81 +1358,24 @@
   LookupResult lookup;
   LookupPostInterceptor(holder, name, &lookup);
 
-  // Get the receiver from the stack into r0.
-  __ ldr(r0, MemOperand(sp, argc * kPointerSize));
+  // Get the receiver from the stack.
+  __ ldr(r1, MemOperand(sp, argc * kPointerSize));
 
-  // Check that the receiver isn't a smi.
-  __ BranchOnSmi(receiver, &miss);
-
-  // Check that the maps haven't changed.
-  Register reg = CheckPrototypes(object, receiver, holder, holder_reg,
-                                 scratch, name, &miss);
-  if (!reg.is(holder_reg)) {
-    __ mov(holder_reg, reg);
-  }
-
-  // If we call a constant function when the interceptor returns
-  // the no-result sentinel, generate code that optimizes this case.
-  if (lookup.IsProperty() &&
-      lookup.IsCacheable() &&
-      lookup.type() == CONSTANT_FUNCTION &&
-      lookup.GetConstantFunction()->is_compiled() &&
-      !holder->IsJSArray()) {
-    // Constant functions cannot sit on global object.
-    ASSERT(!lookup.holder()->IsGlobalObject());
-
-    // Call the interceptor.
-    __ EnterInternalFrame();
-    __ push(holder_reg);
-    __ push(name_reg);
-    CompileCallLoadPropertyWithInterceptor(masm(),
-                                           receiver,
-                                           holder_reg,
-                                           name_reg,
-                                           holder);
-    __ pop(name_reg);
-    __ pop(holder_reg);
-    __ LeaveInternalFrame();
-    // r0 no longer contains the receiver.
-
-    // If interceptor returns no-result sentinal, call the constant function.
-    __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
-    __ cmp(r0, scratch);
-    Label invoke;
-    __ b(ne, &invoke);
-    // Check the prototypes between the interceptor's holder and the
-    // constant function's holder.
-    CheckPrototypes(holder, holder_reg,
-                    lookup.holder(), r0,
-                    scratch,
-                    name,
-                    &miss);
-
-    __ InvokeFunction(lookup.GetConstantFunction(),
-                      arguments(),
-                      JUMP_FUNCTION);
-
-    __ bind(&invoke);
-
-  } else {
-    // Call a runtime function to load the interceptor property.
-    __ EnterInternalFrame();
-    __ push(name_reg);
-
-    PushInterceptorArguments(masm(), receiver, holder_reg, name_reg, holder);
-
-    __ CallExternalReference(
-        ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall)),
-        5);
-
-    __ pop(name_reg);
-    __ LeaveInternalFrame();
-  }
+  CallInterceptorCompiler compiler(this, arguments(), r2);
+  compiler.Compile(masm(),
+                   object,
+                   holder,
+                   name,
+                   &lookup,
+                   r1,
+                   r3,
+                   r4,
+                   &miss);
 
   // Move returned value, the function to call, to r1.
   __ mov(r1, r0);
   // Restore receiver.
-  __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
+  __ ldr(r0, MemOperand(sp, argc * kPointerSize));
 
   GenerateCallFunction(masm(), object, arguments(), &miss);
 
@@ -1123,7 +1454,7 @@
   __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
 
   // Jump to the cached code (tail call).
-  __ IncrementCounter(&Counters::call_global_inline, 1, r1, r3);
+  __ IncrementCounter(&Counters::call_global_inline, 1, r3, r4);
   ASSERT(function->is_compiled());
   Handle<Code> code(function->code());
   ParameterCount expected(function->shared()->formal_parameter_count());
@@ -1199,12 +1530,12 @@
 
   __ push(r1);  // receiver
   __ mov(ip, Operand(Handle<AccessorInfo>(callback)));  // callback info
-  __ stm(db_w, sp, ip.bit() | r2.bit() | r0.bit());
+  __ Push(ip, r2, r0);
 
   // Do tail-call to the runtime system.
   ExternalReference store_callback_property =
       ExternalReference(IC_Utility(IC::kStoreCallbackProperty));
-  __ TailCallRuntime(store_callback_property, 4, 1);
+  __ TailCallExternalReference(store_callback_property, 4, 1);
 
   // Handle store cache miss.
   __ bind(&miss);
@@ -1244,14 +1575,12 @@
   // checks.
   ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
 
-  __ push(r1);  // receiver.
-  __ push(r2);  // name.
-  __ push(r0);  // value.
+  __ Push(r1, r2, r0);  // Receiver, name, value.
 
   // Do tail-call to the runtime system.
   ExternalReference store_ic_property =
       ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
-  __ TailCallRuntime(store_ic_property, 3, 1);
+  __ TailCallExternalReference(store_ic_property, 3, 1);
 
   // Handle store cache miss.
   __ bind(&miss);
@@ -1297,6 +1626,50 @@
 }
 
 
+Object* LoadStubCompiler::CompileLoadNonexistent(String* name,
+                                                 JSObject* object,
+                                                 JSObject* last) {
+  // ----------- S t a t e -------------
+  //  -- r2    : name
+  //  -- lr    : return address
+  //  -- [sp]  : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Load receiver.
+  __ ldr(r0, MemOperand(sp, 0));
+
+  // Check that receiver is not a smi.
+  __ tst(r0, Operand(kSmiTagMask));
+  __ b(eq, &miss);
+
+  // Check the maps of the full prototype chain.
+  CheckPrototypes(object, r0, last, r3, r1, name, &miss);
+
+  // If the last object in the prototype chain is a global object,
+  // check that the global property cell is empty.
+  if (last->IsGlobalObject()) {
+    Object* cell = GenerateCheckPropertyCell(masm(),
+                                             GlobalObject::cast(last),
+                                             name,
+                                             r1,
+                                             &miss);
+    if (cell->IsFailure()) return cell;
+  }
+
+  // Return undefined if maps of the full prototype chain are still the
+  // same and no global property with this name contains a value.
+  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(NONEXISTENT, Heap::empty_string());
+}
+
+
 Object* LoadStubCompiler::CompileLoadField(JSObject* object,
                                            JSObject* holder,
                                            int index,
@@ -1405,35 +1778,34 @@
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
-  //  -- [sp]  : receiver
+  //  -- r0    : receiver
+  //  -- sp[0] : receiver
   // -----------------------------------
   Label miss;
 
-  // Get the receiver from the stack.
-  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
-
   // If the object is the holder then we know that it's a global
   // object which can only happen for contextual calls. In this case,
   // the receiver cannot be a smi.
   if (object != holder) {
-    __ tst(r1, Operand(kSmiTagMask));
+    __ tst(r0, Operand(kSmiTagMask));
     __ b(eq, &miss);
   }
 
   // Check that the map of the global has not changed.
-  CheckPrototypes(object, r1, holder, r3, r0, name, &miss);
+  CheckPrototypes(object, r0, holder, r3, r4, name, &miss);
 
   // Get the value from the cell.
   __ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
-  __ ldr(r0, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
+  __ ldr(r4, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
 
   // Check for deleted property if property can actually be deleted.
   if (!is_dont_delete) {
     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-    __ cmp(r0, ip);
+    __ cmp(r4, ip);
     __ b(eq, &miss);
   }
 
+  __ mov(r0, r4);
   __ IncrementCounter(&Counters::named_load_global_inline, 1, r1, r3);
   __ Ret();
 
@@ -1452,18 +1824,18 @@
                                                 int index) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
+  //  -- r0    : key
   //  -- sp[0] : key
   //  -- sp[4] : receiver
   // -----------------------------------
   Label miss;
 
-  __ ldr(r2, MemOperand(sp, 0));
-  __ ldr(r0, MemOperand(sp, kPointerSize));
-
-  __ cmp(r2, Operand(Handle<String>(name)));
+  // Check the key is the cached one.
+  __ cmp(r0, Operand(Handle<String>(name)));
   __ b(ne, &miss);
 
-  GenerateLoadField(receiver, holder, r0, r3, r1, index, name, &miss);
+  __ ldr(r1, MemOperand(sp, kPointerSize));  // Receiver.
+  GenerateLoadField(receiver, holder, r1, r2, r3, index, name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
@@ -1477,19 +1849,19 @@
                                                    AccessorInfo* callback) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
+  //  -- r0    : key
   //  -- sp[0] : key
   //  -- sp[4] : receiver
   // -----------------------------------
   Label miss;
 
-  __ ldr(r2, MemOperand(sp, 0));
-  __ ldr(r0, MemOperand(sp, kPointerSize));
-
-  __ cmp(r2, Operand(Handle<String>(name)));
+  // Check the key is the cached one.
+  __ cmp(r0, Operand(Handle<String>(name)));
   __ b(ne, &miss);
 
   Failure* failure = Failure::InternalError();
-  bool success = GenerateLoadCallback(receiver, holder, r0, r2, r3, r1,
+  __ ldr(r1, MemOperand(sp, kPointerSize));  // Receiver.
+  bool success = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3,
                                       callback, name, &miss, &failure);
   if (!success) return failure;
 
@@ -1506,19 +1878,18 @@
                                                    Object* value) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
+  //  -- r0    : key
   //  -- sp[0] : key
   //  -- sp[4] : receiver
   // -----------------------------------
   Label miss;
 
-  // Check the key is the cached one
-  __ ldr(r2, MemOperand(sp, 0));
-  __ ldr(r0, MemOperand(sp, kPointerSize));
-
-  __ cmp(r2, Operand(Handle<String>(name)));
+  // Check the key is the cached one.
+  __ cmp(r0, Operand(Handle<String>(name)));
   __ b(ne, &miss);
 
-  GenerateLoadConstant(receiver, holder, r0, r3, r1, value, name, &miss);
+  __ ldr(r1, MemOperand(sp, kPointerSize));  // Receiver.
+  GenerateLoadConstant(receiver, holder, r1, r2, r3, value, name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
@@ -1532,27 +1903,26 @@
                                                       String* name) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
+  //  -- r0    : key
   //  -- sp[0] : key
   //  -- sp[4] : receiver
   // -----------------------------------
   Label miss;
 
-  // Check the key is the cached one
-  __ ldr(r2, MemOperand(sp, 0));
-  __ ldr(r0, MemOperand(sp, kPointerSize));
-
-  __ cmp(r2, Operand(Handle<String>(name)));
+  // Check the key is the cached one.
+  __ cmp(r0, Operand(Handle<String>(name)));
   __ b(ne, &miss);
 
   LookupResult lookup;
   LookupPostInterceptor(holder, name, &lookup);
+  __ ldr(r1, MemOperand(sp, kPointerSize));  // Receiver.
   GenerateLoadInterceptor(receiver,
                           holder,
                           &lookup,
+                          r1,
                           r0,
                           r2,
                           r3,
-                          r1,
                           name,
                           &miss);
   __ bind(&miss);
@@ -1565,19 +1935,18 @@
 Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
+  //  -- r0    : key
   //  -- sp[0] : key
   //  -- sp[4] : receiver
   // -----------------------------------
   Label miss;
 
-  // Check the key is the cached one
-  __ ldr(r2, MemOperand(sp, 0));
-  __ ldr(r0, MemOperand(sp, kPointerSize));
-
-  __ cmp(r2, Operand(Handle<String>(name)));
+  // Check the key is the cached one.
+  __ cmp(r0, Operand(Handle<String>(name)));
   __ b(ne, &miss);
 
-  GenerateLoadArrayLength(masm(), r0, r3, &miss);
+  __ ldr(r1, MemOperand(sp, kPointerSize));  // Receiver.
+  GenerateLoadArrayLength(masm(), r1, r2, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
@@ -1588,19 +1957,19 @@
 Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
+  //  -- r0    : key
   //  -- sp[0] : key
   //  -- sp[4] : receiver
   // -----------------------------------
   Label miss;
   __ IncrementCounter(&Counters::keyed_load_string_length, 1, r1, r3);
 
-  __ ldr(r2, MemOperand(sp));
-  __ ldr(r0, MemOperand(sp, kPointerSize));  // receiver
-
-  __ cmp(r2, Operand(Handle<String>(name)));
+  // Check the key is the cached one.
+  __ cmp(r0, Operand(Handle<String>(name)));
   __ b(ne, &miss);
 
-  GenerateLoadStringLength(masm(), r0, r1, r3, &miss);
+  __ ldr(r1, MemOperand(sp, kPointerSize));  // Receiver.
+  GenerateLoadStringLength(masm(), r1, r2, r3, &miss);
   __ bind(&miss);
   __ DecrementCounter(&Counters::keyed_load_string_length, 1, r1, r3);
 
@@ -1614,6 +1983,7 @@
 Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
+  //  -- r0    : key
   //  -- sp[0] : key
   //  -- sp[4] : receiver
   // -----------------------------------
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
index 0f7c597..bf5cff2 100644
--- a/src/arm/virtual-frame-arm.cc
+++ b/src/arm/virtual-frame-arm.cc
@@ -30,69 +30,131 @@
 #include "codegen-inl.h"
 #include "register-allocator-inl.h"
 #include "scopes.h"
+#include "virtual-frame-inl.h"
 
 namespace v8 {
 namespace internal {
 
-// -------------------------------------------------------------------------
-// VirtualFrame implementation.
-
 #define __ ACCESS_MASM(masm())
 
-
-// On entry to a function, the virtual frame already contains the
-// receiver and the parameters.  All initial frame elements are in
-// memory.
-VirtualFrame::VirtualFrame()
-    : elements_(parameter_count() + local_count() + kPreallocatedElements),
-      stack_pointer_(parameter_count()) {  // 0-based index of TOS.
-  for (int i = 0; i <= stack_pointer_; i++) {
-    elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown));
-  }
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    register_locations_[i] = kIllegalIndex;
-  }
+void VirtualFrame::PopToR1R0() {
+  VirtualFrame where_to_go = *this;
+  // Shuffle things around so the top of stack is in r0 and r1.
+  where_to_go.top_of_stack_state_ = R0_R1_TOS;
+  MergeTo(&where_to_go);
+  // Pop the two registers off the stack so they are detached from the frame.
+  element_count_ -= 2;
+  top_of_stack_state_ = NO_TOS_REGISTERS;
 }
 
 
-void VirtualFrame::SyncElementBelowStackPointer(int index) {
-  UNREACHABLE();
+void VirtualFrame::PopToR1() {
+  VirtualFrame where_to_go = *this;
+  // Shuffle things around so the top of stack is only in r1.
+  where_to_go.top_of_stack_state_ = R1_TOS;
+  MergeTo(&where_to_go);
+  // Pop the register off the stack so it is detached from the frame.
+  element_count_ -= 1;
+  top_of_stack_state_ = NO_TOS_REGISTERS;
 }
 
 
-void VirtualFrame::SyncElementByPushing(int index) {
-  UNREACHABLE();
-}
-
-
-void VirtualFrame::SyncRange(int begin, int end) {
-  // All elements are in memory on ARM (ie, synced).
-#ifdef DEBUG
-  for (int i = begin; i <= end; i++) {
-    ASSERT(elements_[i].is_synced());
-  }
-#endif
+void VirtualFrame::PopToR0() {
+  VirtualFrame where_to_go = *this;
+  // Shuffle things around so the top of stack only in r0.
+  where_to_go.top_of_stack_state_ = R0_TOS;
+  MergeTo(&where_to_go);
+  // Pop the register off the stack so it is detached from the frame.
+  element_count_ -= 1;
+  top_of_stack_state_ = NO_TOS_REGISTERS;
 }
 
 
 void VirtualFrame::MergeTo(VirtualFrame* expected) {
-  // ARM frames are currently always in memory.
-  ASSERT(Equals(expected));
-}
-
-
-void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
-  UNREACHABLE();
-}
-
-
-void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
-  UNREACHABLE();
-}
-
-
-void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
-  UNREACHABLE();
+  if (Equals(expected)) return;
+#define CASE_NUMBER(a, b) ((a) * TOS_STATES + (b))
+  switch (CASE_NUMBER(top_of_stack_state_, expected->top_of_stack_state_)) {
+    case CASE_NUMBER(NO_TOS_REGISTERS, NO_TOS_REGISTERS):
+      break;
+    case CASE_NUMBER(NO_TOS_REGISTERS, R0_TOS):
+      __ pop(r0);
+      break;
+    case CASE_NUMBER(NO_TOS_REGISTERS, R1_TOS):
+      __ pop(r1);
+      break;
+    case CASE_NUMBER(NO_TOS_REGISTERS, R0_R1_TOS):
+      __ pop(r0);
+      __ pop(r1);
+      break;
+    case CASE_NUMBER(NO_TOS_REGISTERS, R1_R0_TOS):
+      __ pop(r1);
+      __ pop(r0);
+      break;
+    case CASE_NUMBER(R0_TOS, NO_TOS_REGISTERS):
+      __ push(r0);
+      break;
+    case CASE_NUMBER(R0_TOS, R0_TOS):
+      break;
+    case CASE_NUMBER(R0_TOS, R1_TOS):
+      __ mov(r1, r0);
+      break;
+    case CASE_NUMBER(R0_TOS, R0_R1_TOS):
+      __ pop(r1);
+      break;
+    case CASE_NUMBER(R0_TOS, R1_R0_TOS):
+      __ mov(r1, r0);
+      __ pop(r0);
+      break;
+    case CASE_NUMBER(R1_TOS, NO_TOS_REGISTERS):
+      __ push(r1);
+      break;
+    case CASE_NUMBER(R1_TOS, R0_TOS):
+      __ mov(r0, r1);
+      break;
+    case CASE_NUMBER(R1_TOS, R1_TOS):
+      break;
+    case CASE_NUMBER(R1_TOS, R0_R1_TOS):
+      __ mov(r0, r1);
+      __ pop(r1);
+      break;
+    case CASE_NUMBER(R1_TOS, R1_R0_TOS):
+      __ pop(r0);
+      break;
+    case CASE_NUMBER(R0_R1_TOS, NO_TOS_REGISTERS):
+      __ Push(r1, r0);
+      break;
+    case CASE_NUMBER(R0_R1_TOS, R0_TOS):
+      __ push(r1);
+      break;
+    case CASE_NUMBER(R0_R1_TOS, R1_TOS):
+      __ push(r1);
+      __ mov(r1, r0);
+      break;
+    case CASE_NUMBER(R0_R1_TOS, R0_R1_TOS):
+      break;
+    case CASE_NUMBER(R0_R1_TOS, R1_R0_TOS):
+      __ Swap(r0, r1, ip);
+      break;
+    case CASE_NUMBER(R1_R0_TOS, NO_TOS_REGISTERS):
+      __ Push(r0, r1);
+      break;
+    case CASE_NUMBER(R1_R0_TOS, R0_TOS):
+      __ push(r0);
+      __ mov(r0, r1);
+      break;
+    case CASE_NUMBER(R1_R0_TOS, R1_TOS):
+      __ push(r0);
+      break;
+    case CASE_NUMBER(R1_R0_TOS, R0_R1_TOS):
+      __ Swap(r0, r1, ip);
+      break;
+    case CASE_NUMBER(R1_R0_TOS, R1_R0_TOS):
+      break;
+    default:
+      UNREACHABLE();
+#undef CASE_NUMBER
+  }
+  ASSERT(register_allocation_map_ == expected->register_allocation_map_);
 }
 
 
@@ -120,8 +182,6 @@
   __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
   // Adjust FP to point to saved FP.
   __ add(fp, sp, Operand(2 * kPointerSize));
-  cgen()->allocator()->Unuse(r1);
-  cgen()->allocator()->Unuse(lr);
 }
 
 
@@ -180,37 +240,11 @@
 
 
 
-void VirtualFrame::SaveContextRegister() {
-  UNIMPLEMENTED();
-}
-
-
-void VirtualFrame::RestoreContextRegister() {
-  UNIMPLEMENTED();
-}
-
-
 void VirtualFrame::PushReceiverSlotAddress() {
   UNIMPLEMENTED();
 }
 
 
-int VirtualFrame::InvalidateFrameSlotAt(int index) {
-  UNIMPLEMENTED();
-  return kIllegalIndex;
-}
-
-
-void VirtualFrame::TakeFrameSlotAt(int index) {
-  UNIMPLEMENTED();
-}
-
-
-void VirtualFrame::StoreToFrameSlotAt(int index) {
-  UNIMPLEMENTED();
-}
-
-
 void VirtualFrame::PushTryHandler(HandlerType type) {
   // Grow the expression stack by handler size less one (the return
   // address in lr is already counted by a call instruction).
@@ -219,7 +253,22 @@
 }
 
 
+void VirtualFrame::CallJSFunction(int arg_count) {
+  // InvokeFunction requires function in r1.
+  EmitPop(r1);
+
+  // +1 for receiver.
+  Forget(arg_count + 1);
+  ASSERT(cgen()->HasValidEntryRegisters());
+  ParameterCount count(arg_count);
+  __ InvokeFunction(r1, count, CALL_FUNCTION);
+  // Restore the context.
+  __ ldr(cp, Context());
+}
+
+
 void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
+  ASSERT(SpilledScope::is_spilled());
   Forget(arg_count);
   ASSERT(cgen()->HasValidEntryRegisters());
   __ CallRuntime(f, arg_count);
@@ -249,6 +298,43 @@
 }
 
 
+void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
+  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+  SpillAllButCopyTOSToR0();
+  __ mov(r2, Operand(name));
+  CallCodeObject(ic, mode, 0);
+}
+
+
+void VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual) {
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+  PopToR0();
+  if (is_contextual) {
+    SpillAll();
+    __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  } else {
+    EmitPop(r1);
+    SpillAll();
+  }
+  __ mov(r2, Operand(name));
+  CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
+}
+
+
+void VirtualFrame::CallKeyedLoadIC() {
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+  SpillAllButCopyTOSToR0();
+  CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
+}
+
+
+void VirtualFrame::CallKeyedStoreIC() {
+  ASSERT(SpilledScope::is_spilled());
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+  CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
+}
+
+
 void VirtualFrame::CallCodeObject(Handle<Code> code,
                                   RelocInfo::Mode rmode,
                                   int dropped_args) {
@@ -275,57 +361,308 @@
 }
 
 
+//    NO_TOS_REGISTERS, R0_TOS, R1_TOS, R1_R0_TOS, R0_R1_TOS.
+const bool VirtualFrame::kR0InUse[TOS_STATES] =
+    { false,            true,   false,  true,      true };
+const bool VirtualFrame::kR1InUse[TOS_STATES] =
+    { false,            false,  true,   true,      true };
+const int VirtualFrame::kVirtualElements[TOS_STATES] =
+    { 0,                1,      1,      2,         2 };
+const Register VirtualFrame::kTopRegister[TOS_STATES] =
+    { r0,               r0,     r1,     r1,        r0 };
+const Register VirtualFrame::kBottomRegister[TOS_STATES] =
+    { r0,               r0,     r1,     r0,        r1 };
+const Register VirtualFrame::kAllocatedRegisters[
+    VirtualFrame::kNumberOfAllocatedRegisters] = { r2, r3, r4, r5, r6 };
+// Popping is done by the transition implied by kStateAfterPop.  Of course if
+// there were no stack slots allocated to registers then the physical SP must
+// be adjusted.
+const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPop[TOS_STATES] =
+    { NO_TOS_REGISTERS, NO_TOS_REGISTERS, NO_TOS_REGISTERS, R0_TOS, R1_TOS };
+// Pushing is done by the transition implied by kStateAfterPush.  Of course if
+// the maximum number of registers was already allocated to the top of stack
+// slots then one register must be physically pushed onto the stack.
+const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPush[TOS_STATES] =
+    { R0_TOS, R1_R0_TOS, R0_R1_TOS, R0_R1_TOS, R1_R0_TOS };
+
+
+bool VirtualFrame::SpilledScope::is_spilled_ = false;
+
+
 void VirtualFrame::Drop(int count) {
   ASSERT(count >= 0);
   ASSERT(height() >= count);
-  int num_virtual_elements = (element_count() - 1) - stack_pointer_;
-
-  // Emit code to lower the stack pointer if necessary.
-  if (num_virtual_elements < count) {
-    int num_dropped = count - num_virtual_elements;
-    stack_pointer_ -= num_dropped;
-    __ add(sp, sp, Operand(num_dropped * kPointerSize));
-  }
-
   // Discard elements from the virtual frame and free any registers.
-  for (int i = 0; i < count; i++) {
-    FrameElement dropped = elements_.RemoveLast();
-    if (dropped.is_register()) {
-      Unuse(dropped.reg());
-    }
+  int num_virtual_elements = kVirtualElements[top_of_stack_state_];
+  while (num_virtual_elements > 0) {
+    Pop();
+    num_virtual_elements--;
+    count--;
+    if (count == 0) return;
   }
+  if (count == 0) return;
+  __ add(sp, sp, Operand(count * kPointerSize));
+  element_count_ -= count;
 }
 
 
-Result VirtualFrame::Pop() {
-  UNIMPLEMENTED();
-  return Result();
+void VirtualFrame::Pop() {
+  if (top_of_stack_state_ == NO_TOS_REGISTERS) {
+    __ add(sp, sp, Operand(kPointerSize));
+  } else {
+    top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
+  }
+  element_count_--;
 }
 
 
 void VirtualFrame::EmitPop(Register reg) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  stack_pointer_--;
-  elements_.RemoveLast();
-  __ pop(reg);
+  ASSERT(!is_used(reg));
+  if (top_of_stack_state_ == NO_TOS_REGISTERS) {
+    __ pop(reg);
+  } else {
+    __ mov(reg, kTopRegister[top_of_stack_state_]);
+    top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
+  }
+  element_count_--;
+}
+
+
+void VirtualFrame::SpillAllButCopyTOSToR0() {
+  switch (top_of_stack_state_) {
+    case NO_TOS_REGISTERS:
+      __ ldr(r0, MemOperand(sp, 0));
+      break;
+    case R0_TOS:
+      __ push(r0);
+      break;
+    case R1_TOS:
+      __ push(r1);
+      __ mov(r0, r1);
+      break;
+    case R0_R1_TOS:
+      __ Push(r1, r0);
+      break;
+    case R1_R0_TOS:
+      __ Push(r0, r1);
+      __ mov(r0, r1);
+      break;
+    default:
+      UNREACHABLE();
+  }
+  top_of_stack_state_ = NO_TOS_REGISTERS;
+}
+
+
+void VirtualFrame::SpillAllButCopyTOSToR1R0() {
+  switch (top_of_stack_state_) {
+    case NO_TOS_REGISTERS:
+      __ ldr(r1, MemOperand(sp, 0));
+      __ ldr(r0, MemOperand(sp, kPointerSize));
+      break;
+    case R0_TOS:
+      __ push(r0);
+      __ mov(r1, r0);
+      __ ldr(r0, MemOperand(sp, kPointerSize));
+      break;
+    case R1_TOS:
+      __ push(r1);
+      __ ldr(r0, MemOperand(sp, kPointerSize));
+      break;
+    case R0_R1_TOS:
+      __ Push(r1, r0);
+      __ Swap(r0, r1, ip);
+      break;
+    case R1_R0_TOS:
+      __ Push(r0, r1);
+      break;
+    default:
+      UNREACHABLE();
+  }
+  top_of_stack_state_ = NO_TOS_REGISTERS;
+}
+
+
+Register VirtualFrame::Peek() {
+  AssertIsNotSpilled();
+  if (top_of_stack_state_ == NO_TOS_REGISTERS) {
+    top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
+    Register answer = kTopRegister[top_of_stack_state_];
+    __ pop(answer);
+    return answer;
+  } else {
+    return kTopRegister[top_of_stack_state_];
+  }
+}
+
+
+void VirtualFrame::Dup() {
+  AssertIsNotSpilled();
+  switch (top_of_stack_state_) {
+    case NO_TOS_REGISTERS:
+      __ ldr(r0, MemOperand(sp, 0));
+      top_of_stack_state_ = R0_TOS;
+      break;
+    case R0_TOS:
+      __ mov(r1, r0);
+      top_of_stack_state_ = R0_R1_TOS;
+      break;
+    case R1_TOS:
+      __ mov(r0, r1);
+      top_of_stack_state_ = R0_R1_TOS;
+      break;
+    case R0_R1_TOS:
+      __ push(r1);
+      __ mov(r1, r0);
+      // No need to change state as r0 and r1 now contains the same value.
+      break;
+    case R1_R0_TOS:
+      __ push(r0);
+      __ mov(r0, r1);
+      // No need to change state as r0 and r1 now contains the same value.
+      break;
+    default:
+      UNREACHABLE();
+  }
+  element_count_++;
+}
+
+
+Register VirtualFrame::PopToRegister(Register but_not_to_this_one) {
+  ASSERT(but_not_to_this_one.is(r0) ||
+         but_not_to_this_one.is(r1) ||
+         but_not_to_this_one.is(no_reg));
+  AssertIsNotSpilled();
+  element_count_--;
+  if (top_of_stack_state_ == NO_TOS_REGISTERS) {
+    if (but_not_to_this_one.is(r0)) {
+      __ pop(r1);
+      return r1;
+    } else {
+      __ pop(r0);
+      return r0;
+    }
+  } else {
+    Register answer = kTopRegister[top_of_stack_state_];
+    ASSERT(!answer.is(but_not_to_this_one));
+    top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
+    return answer;
+  }
+}
+
+
+void VirtualFrame::EnsureOneFreeTOSRegister() {
+  if (kVirtualElements[top_of_stack_state_] == kMaxTOSRegisters) {
+    __ push(kBottomRegister[top_of_stack_state_]);
+    top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
+    top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
+  }
+  ASSERT(kVirtualElements[top_of_stack_state_] != kMaxTOSRegisters);
 }
 
 
 void VirtualFrame::EmitPush(Register reg) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown));
-  stack_pointer_++;
-  __ push(reg);
+  element_count_++;
+  if (SpilledScope::is_spilled()) {
+    __ push(reg);
+    return;
+  }
+  if (top_of_stack_state_ == NO_TOS_REGISTERS) {
+    if (reg.is(r0)) {
+      top_of_stack_state_ = R0_TOS;
+      return;
+    }
+    if (reg.is(r1)) {
+      top_of_stack_state_ = R1_TOS;
+      return;
+    }
+  }
+  EnsureOneFreeTOSRegister();
+  top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
+  Register dest = kTopRegister[top_of_stack_state_];
+  __ Move(dest, reg);
+}
+
+
+Register VirtualFrame::GetTOSRegister() {
+  if (SpilledScope::is_spilled()) return r0;
+
+  EnsureOneFreeTOSRegister();
+  return kTopRegister[kStateAfterPush[top_of_stack_state_]];
+}
+
+
+void VirtualFrame::EmitPush(Operand operand) {
+  element_count_++;
+  if (SpilledScope::is_spilled()) {
+    __ mov(r0, operand);
+    __ push(r0);
+    return;
+  }
+  EnsureOneFreeTOSRegister();
+  top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
+  __ mov(kTopRegister[top_of_stack_state_], operand);
+}
+
+
+void VirtualFrame::EmitPush(MemOperand operand) {
+  element_count_++;
+  if (SpilledScope::is_spilled()) {
+    __ ldr(r0, operand);
+    __ push(r0);
+    return;
+  }
+  EnsureOneFreeTOSRegister();
+  top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
+  __ ldr(kTopRegister[top_of_stack_state_], operand);
+}
+
+
+void VirtualFrame::EmitPushRoot(Heap::RootListIndex index) {
+  element_count_++;
+  if (SpilledScope::is_spilled()) {
+    __ LoadRoot(r0, index);
+    __ push(r0);
+    return;
+  }
+  EnsureOneFreeTOSRegister();
+  top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
+  __ LoadRoot(kTopRegister[top_of_stack_state_], index);
 }
 
 
 void VirtualFrame::EmitPushMultiple(int count, int src_regs) {
-  ASSERT(stack_pointer_ == element_count() - 1);
+  ASSERT(SpilledScope::is_spilled());
   Adjust(count);
   __ stm(db_w, sp, src_regs);
 }
 
 
+void VirtualFrame::SpillAll() {
+  switch (top_of_stack_state_) {
+    case R1_R0_TOS:
+      masm()->push(r0);
+      // Fall through.
+    case R1_TOS:
+      masm()->push(r1);
+      top_of_stack_state_ = NO_TOS_REGISTERS;
+      break;
+    case R0_R1_TOS:
+      masm()->push(r1);
+      // Fall through.
+    case R0_TOS:
+      masm()->push(r0);
+      top_of_stack_state_ = NO_TOS_REGISTERS;
+      // Fall through.
+    case NO_TOS_REGISTERS:
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  ASSERT(register_allocation_map_ == 0);  // Not yet implemented.
+}
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h
index a45cfc6..77bc70e 100644
--- a/src/arm/virtual-frame-arm.h
+++ b/src/arm/virtual-frame-arm.h
@@ -45,61 +45,123 @@
 
 class VirtualFrame : public ZoneObject {
  public:
+  class RegisterAllocationScope;
   // A utility class to introduce a scope where the virtual frame is
   // expected to remain spilled.  The constructor spills the code
-  // generator's current frame, but no attempt is made to require it
-  // to stay spilled.  It is intended as documentation while the code
-  // generator is being transformed.
+  // generator's current frame, and keeps it spilled.
   class SpilledScope BASE_EMBEDDED {
    public:
-    SpilledScope() {}
+    explicit SpilledScope(VirtualFrame* frame)
+      : old_is_spilled_(is_spilled_) {
+      if (frame != NULL) {
+        if (!is_spilled_) {
+          frame->SpillAll();
+        } else {
+          frame->AssertIsSpilled();
+        }
+      }
+      is_spilled_ = true;
+    }
+    ~SpilledScope() {
+      is_spilled_ = old_is_spilled_;
+    }
+    static bool is_spilled() { return is_spilled_; }
+
+   private:
+    static bool is_spilled_;
+    int old_is_spilled_;
+
+    SpilledScope() { }
+
+    friend class RegisterAllocationScope;
+  };
+
+  class RegisterAllocationScope BASE_EMBEDDED {
+   public:
+    // A utility class to introduce a scope where the virtual frame
+    // is not spilled, ie. where register allocation occurs.  Eventually
+    // when RegisterAllocationScope is ubiquitous it can be removed
+    // along with the (by then unused) SpilledScope class.
+    explicit RegisterAllocationScope(CodeGenerator* cgen)
+      : cgen_(cgen),
+        old_is_spilled_(SpilledScope::is_spilled_) {
+      SpilledScope::is_spilled_ = false;
+      if (old_is_spilled_) {
+        VirtualFrame* frame = cgen->frame();
+        if (frame != NULL) {
+          frame->AssertIsSpilled();
+        }
+      }
+    }
+    ~RegisterAllocationScope() {
+      SpilledScope::is_spilled_ = old_is_spilled_;
+      if (old_is_spilled_) {
+        VirtualFrame* frame = cgen_->frame();
+        if (frame != NULL) {
+          frame->SpillAll();
+        }
+      }
+    }
+
+   private:
+    CodeGenerator* cgen_;
+    bool old_is_spilled_;
+
+    RegisterAllocationScope() { }
   };
 
   // An illegal index into the virtual frame.
   static const int kIllegalIndex = -1;
 
   // Construct an initial virtual frame on entry to a JS function.
-  VirtualFrame();
+  inline VirtualFrame();
 
   // Construct a virtual frame as a clone of an existing one.
-  explicit VirtualFrame(VirtualFrame* original);
+  explicit inline VirtualFrame(VirtualFrame* original);
 
   CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
   MacroAssembler* masm() { return cgen()->masm(); }
 
-  // Create a duplicate of an existing valid frame element.
-  FrameElement CopyElementAt(int index,
-                             NumberInfo::Type info = NumberInfo::kUnknown);
-
   // The number of elements on the virtual frame.
-  int element_count() { return elements_.length(); }
+  int element_count() { return element_count_; }
 
   // The height of the virtual expression stack.
   int height() {
     return element_count() - expression_base_index();
   }
 
-  int register_location(int num) {
-    ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
-    return register_locations_[num];
-  }
-
-  int register_location(Register reg) {
-    return register_locations_[RegisterAllocator::ToNumber(reg)];
-  }
-
-  void set_register_location(Register reg, int index) {
-    register_locations_[RegisterAllocator::ToNumber(reg)] = index;
-  }
-
   bool is_used(int num) {
-    ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
-    return register_locations_[num] != kIllegalIndex;
+    switch (num) {
+      case 0: {  // r0.
+        return kR0InUse[top_of_stack_state_];
+      }
+      case 1: {  // r1.
+        return kR1InUse[top_of_stack_state_];
+      }
+      case 2:
+      case 3:
+      case 4:
+      case 5:
+      case 6: {  // r2 to r6.
+        ASSERT(num - kFirstAllocatedRegister < kNumberOfAllocatedRegisters);
+        ASSERT(num >= kFirstAllocatedRegister);
+        if ((register_allocation_map_ &
+             (1 << (num - kFirstAllocatedRegister))) == 0) {
+          return false;
+        } else {
+          return true;
+        }
+      }
+      default: {
+        ASSERT(num < kFirstAllocatedRegister ||
+               num >= kFirstAllocatedRegister + kNumberOfAllocatedRegisters);
+        return false;
+      }
+    }
   }
 
   bool is_used(Register reg) {
-    return register_locations_[RegisterAllocator::ToNumber(reg)]
-        != kIllegalIndex;
+    return is_used(RegisterAllocator::ToNumber(reg));
   }
 
   // Add extra in-memory elements to the top of the frame to match an actual
@@ -108,39 +170,35 @@
   void Adjust(int count);
 
   // Forget elements from the top of the frame to match an actual frame (eg,
-  // the frame after a runtime call).  No code is emitted.
+  // the frame after a runtime call).  No code is emitted except to bring the
+  // frame to a spilled state.
   void Forget(int count) {
-    ASSERT(count >= 0);
-    ASSERT(stack_pointer_ == element_count() - 1);
-    stack_pointer_ -= count;
-    // On ARM, all elements are in memory, so there is no extra bookkeeping
-    // (registers, copies, etc.) beyond dropping the elements.
-    elements_.Rewind(stack_pointer_ + 1);
+    SpillAll();
+    element_count_ -= count;
   }
 
-  // Forget count elements from the top of the frame and adjust the stack
-  // pointer downward.  This is used, for example, before merging frames at
-  // break, continue, and return targets.
-  void ForgetElements(int count);
-
   // Spill all values from the frame to memory.
   void SpillAll();
 
+  void AssertIsSpilled() {
+    ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
+    ASSERT(register_allocation_map_ == 0);
+  }
+
+  void AssertIsNotSpilled() {
+    ASSERT(!SpilledScope::is_spilled());
+  }
+
   // Spill all occurrences of a specific register from the frame.
   void Spill(Register reg) {
-    if (is_used(reg)) SpillElementAt(register_location(reg));
+    UNIMPLEMENTED();
   }
 
   // Spill all occurrences of an arbitrary register if possible.  Return the
   // register spilled or no_reg if it was not possible to free any register
-  // (ie, they all have frame-external references).
+  // (ie, they all have frame-external references).  Unimplemented.
   Register SpillAnyRegister();
 
-  // Prepare this virtual frame for merging to an expected frame by
-  // performing some state changes that do not require generating
-  // code.  It is guaranteed that no code will be generated.
-  void PrepareMergeTo(VirtualFrame* expected);
-
   // Make this virtual frame have a state identical to an expected virtual
   // frame.  As a side effect, code may be emitted to make this frame match
   // the expected one.
@@ -151,10 +209,7 @@
   // registers.  Used when the code generator's frame is switched from this
   // one to NULL by an unconditional jump.
   void DetachFromCodeGenerator() {
-    RegisterAllocator* cgen_allocator = cgen()->allocator();
-    for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-      if (is_used(i)) cgen_allocator->Unuse(i);
-    }
+    AssertIsSpilled();
   }
 
   // (Re)attach a frame to its code generator.  This informs the register
@@ -162,10 +217,7 @@
   // Used when a code generator's frame is switched from NULL to this one by
   // binding a label.
   void AttachToCodeGenerator() {
-    RegisterAllocator* cgen_allocator = cgen()->allocator();
-    for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-      if (is_used(i)) cgen_allocator->Unuse(i);
-    }
+    AssertIsSpilled();
   }
 
   // Emit code for the physical JS entry and exit frame sequences.  After
@@ -179,7 +231,7 @@
   // dropping all non-locals elements in the virtual frame.  This
   // avoids generating unnecessary merge code when jumping to the
   // shared return site.  Emits code for spills.
-  void PrepareForReturn();
+  inline void PrepareForReturn();
 
   // Number of local variables after when we use a loop for allocating.
   static const int kLocalVarBound = 5;
@@ -188,27 +240,17 @@
   void AllocateStackSlots();
 
   // The current top of the expression stack as an assembly operand.
-  MemOperand Top() { return MemOperand(sp, 0); }
+  MemOperand Top() {
+    AssertIsSpilled();
+    return MemOperand(sp, 0);
+  }
 
   // An element of the expression stack as an assembly operand.
   MemOperand ElementAt(int index) {
+    AssertIsSpilled();
     return MemOperand(sp, index * kPointerSize);
   }
 
-  // Random-access store to a frame-top relative frame element.  The result
-  // becomes owned by the frame and is invalidated.
-  void SetElementAt(int index, Result* value);
-
-  // Set a frame element to a constant.  The index is frame-top relative.
-  void SetElementAt(int index, Handle<Object> value) {
-    Result temp(value);
-    SetElementAt(index, &temp);
-  }
-
-  void PushElementAt(int index) {
-    PushFrameSlotAt(element_count() - index - 1);
-  }
-
   // A frame-allocated local as an assembly operand.
   MemOperand LocalAt(int index) {
     ASSERT(0 <= index);
@@ -216,43 +258,15 @@
     return MemOperand(fp, kLocal0Offset - index * kPointerSize);
   }
 
-  // Push a copy of the value of a local frame slot on top of the frame.
-  void PushLocalAt(int index) {
-    PushFrameSlotAt(local0_index() + index);
-  }
-
-  // Push the value of a local frame slot on top of the frame and invalidate
-  // the local slot.  The slot should be written to before trying to read
-  // from it again.
-  void TakeLocalAt(int index) {
-    TakeFrameSlotAt(local0_index() + index);
-  }
-
-  // Store the top value on the virtual frame into a local frame slot.  The
-  // value is left in place on top of the frame.
-  void StoreToLocalAt(int index) {
-    StoreToFrameSlotAt(local0_index() + index);
-  }
-
   // Push the address of the receiver slot on the frame.
   void PushReceiverSlotAddress();
 
   // The function frame slot.
   MemOperand Function() { return MemOperand(fp, kFunctionOffset); }
 
-  // Push the function on top of the frame.
-  void PushFunction() { PushFrameSlotAt(function_index()); }
-
   // The context frame slot.
   MemOperand Context() { return MemOperand(fp, kContextOffset); }
 
-  // Save the value of the esi register to the context frame slot.
-  void SaveContextRegister();
-
-  // Restore the esi register from the value of the context frame
-  // slot.
-  void RestoreContextRegister();
-
   // A parameter as an assembly operand.
   MemOperand ParameterAt(int index) {
     // Index -1 corresponds to the receiver.
@@ -261,24 +275,6 @@
     return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize);
   }
 
-  // Push a copy of the value of a parameter frame slot on top of the frame.
-  void PushParameterAt(int index) {
-    PushFrameSlotAt(param0_index() + index);
-  }
-
-  // Push the value of a paramter frame slot on top of the frame and
-  // invalidate the parameter slot.  The slot should be written to before
-  // trying to read from it again.
-  void TakeParameterAt(int index) {
-    TakeFrameSlotAt(param0_index() + index);
-  }
-
-  // Store the top value on the virtual frame into a parameter frame slot.
-  // The value is left in place on top of the frame.
-  void StoreToParameterAt(int index) {
-    StoreToFrameSlotAt(param0_index() + index);
-  }
-
   // The receiver frame slot.
   MemOperand Receiver() { return ParameterAt(-1); }
 
@@ -288,11 +284,15 @@
   // Call stub given the number of arguments it expects on (and
   // removes from) the stack.
   void CallStub(CodeStub* stub, int arg_count) {
-    Forget(arg_count);
+    if (arg_count != 0) Forget(arg_count);
     ASSERT(cgen()->HasValidEntryRegisters());
     masm()->CallStub(stub);
   }
 
+  // Call JS function from top of the stack with arguments
+  // taken from the stack.
+  void CallJSFunction(int arg_count);
+
   // Call runtime given the number of arguments expected on (and
   // removed from) the stack.
   void CallRuntime(Runtime::Function* f, int arg_count);
@@ -308,6 +308,22 @@
                      InvokeJSFlags flag,
                      int arg_count);
 
+  // Call load IC. Receiver is on the stack. Result is returned in r0.
+  void CallLoadIC(Handle<String> name, RelocInfo::Mode mode);
+
+  // Call store IC. If the load is contextual, value is found on top of the
+  // frame. If not, value and receiver are on the frame. Both are consumed.
+  // Result is returned in r0.
+  void CallStoreIC(Handle<String> name, bool is_contextual);
+
+  // Call keyed load IC. Key and receiver are on the stack. Result is returned
+  // in r0.
+  void CallKeyedLoadIC();
+
+  // Call keyed store IC. Key and receiver are on the stack and the value is in
+  // r0. Result is returned in r0.
+  void CallKeyedStoreIC();
+
   // Call into an IC stub given the number of arguments it removes
   // from the stack.  Register arguments to the IC stub are implicit,
   // and depend on the type of IC stub.
@@ -323,46 +339,61 @@
   // Drop one element.
   void Drop() { Drop(1); }
 
-  // Duplicate the top element of the frame.
-  void Dup() { PushFrameSlotAt(element_count() - 1); }
+  // Pop an element from the top of the expression stack.  Discards
+  // the result.
+  void Pop();
 
-  // Pop an element from the top of the expression stack.  Returns a
-  // Result, which may be a constant or a register.
-  Result Pop();
+  // Pop an element from the top of the expression stack.  The register
+  // will be one normally used for the top of stack register allocation
+  // so you can't hold on to it if you push on the stack.
+  Register PopToRegister(Register but_not_to_this_one = no_reg);
+
+  // Look at the top of the stack.  The register returned is aliased and
+  // must be copied to a scratch register before modification.
+  Register Peek();
+
+  // Duplicate the top of stack.
+  void Dup();
+
+  // Flushes all registers, but it puts a copy of the top-of-stack in r0.
+  void SpillAllButCopyTOSToR0();
+
+  // Flushes all registers, but it puts a copy of the top-of-stack in r1
+  // and the next value on the stack in r0.
+  void SpillAllButCopyTOSToR1R0();
 
   // Pop and save an element from the top of the expression stack and
   // emit a corresponding pop instruction.
   void EmitPop(Register reg);
 
+  // Takes the top two elements and puts them in r0 (top element) and r1
+  // (second element).
+  void PopToR1R0();
+
+  // Takes the top element and puts it in r1.
+  void PopToR1();
+
+  // Takes the top element and puts it in r0.
+  void PopToR0();
+
   // Push an element on top of the expression stack and emit a
   // corresponding push instruction.
   void EmitPush(Register reg);
+  void EmitPush(Operand operand);
+  void EmitPush(MemOperand operand);
+  void EmitPushRoot(Heap::RootListIndex index);
+
+  // Get a register which is free and which must be immediately used to
+  // push on the top of the stack.
+  Register GetTOSRegister();
 
   // Push multiple registers on the stack and the virtual frame
   // Register are selected by setting bit in src_regs and
   // are pushed in decreasing order: r15 .. r0.
   void EmitPushMultiple(int count, int src_regs);
 
-  // Push an element on the virtual frame.
-  void Push(Register reg, NumberInfo::Type info = NumberInfo::kUnknown);
-  void Push(Handle<Object> value);
-  void Push(Smi* value) { Push(Handle<Object>(value)); }
-
-  // Pushing a result invalidates it (its contents become owned by the frame).
-  void Push(Result* result) {
-    if (result->is_register()) {
-      Push(result->reg());
-    } else {
-      ASSERT(result->is_constant());
-      Push(result->handle());
-    }
-    result->Unuse();
-  }
-
-  // Nip removes zero or more elements from immediately below the top
-  // of the frame, leaving the previous top-of-frame value on top of
-  // the frame.  Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
-  void Nip(int num_dropped);
+  static Register scratch0() { return r7; }
+  static Register scratch1() { return r9; }
 
  private:
   static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
@@ -372,15 +403,47 @@
   static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
   static const int kPreallocatedElements = 5 + 8;  // 8 expression stack slots.
 
-  ZoneList<FrameElement> elements_;
+  // 5 states for the top of stack, which can be in memory or in r0 and r1.
+  enum TopOfStack {
+    NO_TOS_REGISTERS,
+    R0_TOS,
+    R1_TOS,
+    R1_R0_TOS,
+    R0_R1_TOS,
+    TOS_STATES
+  };
+
+  static const int kMaxTOSRegisters = 2;
+
+  static const bool kR0InUse[TOS_STATES];
+  static const bool kR1InUse[TOS_STATES];
+  static const int kVirtualElements[TOS_STATES];
+  static const TopOfStack kStateAfterPop[TOS_STATES];
+  static const TopOfStack kStateAfterPush[TOS_STATES];
+  static const Register kTopRegister[TOS_STATES];
+  static const Register kBottomRegister[TOS_STATES];
+
+  // We allocate up to 5 locals in registers.
+  static const int kNumberOfAllocatedRegisters = 5;
+  // r2 to r6 are allocated to locals.
+  static const int kFirstAllocatedRegister = 2;
+
+  static const Register kAllocatedRegisters[kNumberOfAllocatedRegisters];
+
+  static Register AllocatedRegister(int r) {
+    ASSERT(r >= 0 && r < kNumberOfAllocatedRegisters);
+    return kAllocatedRegisters[r];
+  }
+
+  // The number of elements on the stack frame.
+  int element_count_;
+  TopOfStack top_of_stack_state_:3;
+  int register_allocation_map_:kNumberOfAllocatedRegisters;
 
   // The index of the element that is at the processor's stack pointer
-  // (the sp register).
-  int stack_pointer_;
-
-  // The index of the register frame element using each register, or
-  // kIllegalIndex if a register is not on the frame.
-  int register_locations_[RegisterAllocator::kNumRegisters];
+  // (the sp register).  For now since everything is in memory it is given
+  // by the number of elements on the not-very-virtual stack frame.
+  int stack_pointer() { return element_count_ - 1; }
 
   // The number of frame-allocated locals and parameters respectively.
   int parameter_count() { return cgen()->scope()->num_parameters(); }
@@ -418,93 +481,20 @@
     return (frame_pointer() - index) * kPointerSize;
   }
 
-  // Record an occurrence of a register in the virtual frame.  This has the
-  // effect of incrementing the register's external reference count and
-  // of updating the index of the register's location in the frame.
-  void Use(Register reg, int index) {
-    ASSERT(!is_used(reg));
-    set_register_location(reg, index);
-    cgen()->allocator()->Use(reg);
-  }
-
-  // Record that a register reference has been dropped from the frame.  This
-  // decrements the register's external reference count and invalidates the
-  // index of the register's location in the frame.
-  void Unuse(Register reg) {
-    ASSERT(is_used(reg));
-    set_register_location(reg, kIllegalIndex);
-    cgen()->allocator()->Unuse(reg);
-  }
-
-  // Spill the element at a particular index---write it to memory if
-  // necessary, free any associated register, and forget its value if
-  // constant.
-  void SpillElementAt(int index);
-
-  // Sync the element at a particular index.  If it is a register or
-  // constant that disagrees with the value on the stack, write it to memory.
-  // Keep the element type as register or constant, and clear the dirty bit.
-  void SyncElementAt(int index);
-
-  // Sync the range of elements in [begin, end] with memory.
-  void SyncRange(int begin, int end);
-
-  // Sync a single unsynced element that lies beneath or at the stack pointer.
-  void SyncElementBelowStackPointer(int index);
-
-  // Sync a single unsynced element that lies just above the stack pointer.
-  void SyncElementByPushing(int index);
-
-  // Push a copy of a frame slot (typically a local or parameter) on top of
-  // the frame.
-  void PushFrameSlotAt(int index);
-
-  // Push a the value of a frame slot (typically a local or parameter) on
-  // top of the frame and invalidate the slot.
-  void TakeFrameSlotAt(int index);
-
-  // Store the value on top of the frame to a frame slot (typically a local
-  // or parameter).
-  void StoreToFrameSlotAt(int index);
-
   // Spill all elements in registers. Spill the top spilled_args elements
   // on the frame.  Sync all other frame elements.
   // Then drop dropped_args elements from the virtual frame, to match
   // the effect of an upcoming call that will drop them from the stack.
   void PrepareForCall(int spilled_args, int dropped_args);
 
-  // Move frame elements currently in registers or constants, that
-  // should be in memory in the expected frame, to memory.
-  void MergeMoveRegistersToMemory(VirtualFrame* expected);
+  // If all top-of-stack registers are in use then the lowest one is pushed
+  // onto the physical stack and made free.
+  void EnsureOneFreeTOSRegister();
 
-  // Make the register-to-register moves necessary to
-  // merge this frame with the expected frame.
-  // Register to memory moves must already have been made,
-  // and memory to register moves must follow this call.
-  // This is because some new memory-to-register moves are
-  // created in order to break cycles of register moves.
-  // Used in the implementation of MergeTo().
-  void MergeMoveRegistersToRegisters(VirtualFrame* expected);
+  inline bool Equals(VirtualFrame* other);
 
-  // Make the memory-to-register and constant-to-register moves
-  // needed to make this frame equal the expected frame.
-  // Called after all register-to-memory and register-to-register
-  // moves have been made.  After this function returns, the frames
-  // should be equal.
-  void MergeMoveMemoryToRegisters(VirtualFrame* expected);
-
-  // Invalidates a frame slot (puts an invalid frame element in it).
-  // Copies on the frame are correctly handled, and if this slot was
-  // the backing store of copies, the index of the new backing store
-  // is returned.  Otherwise, returns kIllegalIndex.
-  // Register counts are correctly updated.
-  int InvalidateFrameSlotAt(int index);
-
-  bool Equals(VirtualFrame* other);
-
-  // Classes that need raw access to the elements_ array.
-  friend class DeferredCode;
   friend class JumpTarget;
+  friend class DeferredCode;
 };
 
 
diff --git a/src/array.js b/src/array.js
index c28a662..216c03b 100644
--- a/src/array.js
+++ b/src/array.js
@@ -644,55 +644,33 @@
   // In-place QuickSort algorithm.
   // For short (length <= 22) arrays, insertion sort is used for efficiency.
 
-  var custom_compare = IS_FUNCTION(comparefn);
-
-  function Compare(x,y) {
-    // Assume the comparefn, if any, is a consistent comparison function.
-    // If it isn't, we are allowed arbitrary behavior by ECMA 15.4.4.11.
-    if (x === y) return 0;
-    if (custom_compare) {
-      // Don't call directly to avoid exposing the builtin's global object.
-      return comparefn.call(null, x, y);
-    }
-    if (%_IsSmi(x) && %_IsSmi(y)) {
-      return %SmiLexicographicCompare(x, y);
-    }
-    x = ToString(x);
-    y = ToString(y);
-    if (x == y) return 0;
-    else return x < y ? -1 : 1;
-  };
+  if (!IS_FUNCTION(comparefn)) {
+    comparefn = function (x, y) {
+      if (x === y) return 0;
+      if (%_IsSmi(x) && %_IsSmi(y)) {
+        return %SmiLexicographicCompare(x, y);
+      }
+      x = ToString(x);
+      y = ToString(y);
+      if (x == y) return 0;
+      else return x < y ? -1 : 1;
+    };
+  }
+  var global_receiver = %GetGlobalReceiver();
 
   function InsertionSort(a, from, to) {
     for (var i = from + 1; i < to; i++) {
       var element = a[i];
-      // Pre-convert the element to a string for comparison if we know
-      // it will happen on each compare anyway.
-      var key =
-          (custom_compare || %_IsSmi(element)) ? element : ToString(element);
-      // place element in a[from..i[
-      // binary search
-      var min = from;
-      var max = i;
-      // The search interval is a[min..max[
-      while (min < max) {
-        var mid = min + ((max - min) >> 1);
-        var order = Compare(a[mid], key);
-        if (order == 0) {
-          min = max = mid;
+      for (var j = i - 1; j >= from; j--) {
+        var tmp = a[j];
+        var order = %_CallFunction(global_receiver, tmp, element, comparefn);
+        if (order > 0) {
+          a[j + 1] = tmp;
+        } else {
           break;
         }
-        if (order < 0) {
-          min = mid + 1;
-        } else {
-          max = mid;
-        }
       }
-      // place element at position min==max.
-      for (var j = i; j > min; j--) {
-        a[j] = a[j - 1];
-      }
-      a[min] = element;
+      a[j + 1] = element;
     }
   }
 
@@ -704,30 +682,23 @@
     }
     var pivot_index = $floor($random() * (to - from)) + from;
     var pivot = a[pivot_index];
-    // Pre-convert the element to a string for comparison if we know
-    // it will happen on each compare anyway.
-    var pivot_key =
-      (custom_compare || %_IsSmi(pivot)) ? pivot : ToString(pivot);
     // Issue 95: Keep the pivot element out of the comparisons to avoid
     // infinite recursion if comparefn(pivot, pivot) != 0.
-    a[pivot_index] = a[from];
-    a[from] = pivot;
+    %_SwapElements(a, from, pivot_index);
     var low_end = from;   // Upper bound of the elements lower than pivot.
     var high_start = to;  // Lower bound of the elements greater than pivot.
     // From low_end to i are elements equal to pivot.
     // From i to high_start are elements that haven't been compared yet.
     for (var i = from + 1; i < high_start; ) {
       var element = a[i];
-      var order = Compare(element, pivot_key);
+      var order = %_CallFunction(global_receiver, element, pivot, comparefn);
       if (order < 0) {
-        a[i] = a[low_end];
-        a[low_end] = element;
+        %_SwapElements(a, i, low_end);
         i++;
         low_end++;
       } else if (order > 0) {
         high_start--;
-        a[i] = a[high_start];
-        a[high_start] = element;
+        %_SwapElements(a, i, high_start);
       } else {  // order == 0
         i++;
       }
@@ -736,8 +707,6 @@
     QuickSort(a, high_start, to);
   }
 
-  var length;
-
   // Copies elements in the range 0..length from obj's prototype chain
   // to obj itself, if obj has holes. Returns one more than the maximal index
   // of a prototype property.
@@ -855,7 +824,7 @@
     return first_undefined;
   }
 
-  length = TO_UINT32(this.length);
+  var length = TO_UINT32(this.length);
   if (length < 2) return this;
 
   var is_array = IS_ARRAY(this);
@@ -994,11 +963,16 @@
     // If index is still negative, search the entire array.
     if (index < 0) index = 0;
   }
+  if (!IS_UNDEFINED(element)) {
+    for (var i = index; i < length; i++) {
+      if (this[i] === element) return i;
+    }
+    return -1;
+  }
   // Lookup through the array.
   for (var i = index; i < length; i++) {
-    var current = this[i];
-    if (!IS_UNDEFINED(current) || i in this) {
-      if (current === element) return i;
+    if (IS_UNDEFINED(this[i]) && i in this) {
+      return i;
     }
   }
   return -1;
@@ -1018,10 +992,15 @@
     else if (index >= length) index = length - 1;
   }
   // Lookup through the array.
+  if (!IS_UNDEFINED(element)) {
+    for (var i = index; i >= 0; i--) {
+      if (this[i] === element) return i;
+    }
+    return -1;
+  }
   for (var i = index; i >= 0; i--) {
-    var current = this[i];
-    if (!IS_UNDEFINED(current) || i in this) {
-      if (current === element) return i;
+    if (IS_UNDEFINED(this[i]) && i in this) {
+      return i;
     }
   }
   return -1;
@@ -1088,15 +1067,6 @@
   return IS_ARRAY(obj);
 }
 
-// -------------------------------------------------------------------
-
-
-function UpdateFunctionLengths(lengths) {
-  for (var key in lengths) {
-    %FunctionSetLength(this[key], lengths[key]);
-  }
-}
-
 
 // -------------------------------------------------------------------
 function SetupArray() {
@@ -1109,46 +1079,48 @@
     "isArray", ArrayIsArray
   ));
 
+  var specialFunctions = %SpecialArrayFunctions({});
+
+  function getFunction(name, jsBuiltin, len) {
+    var f = jsBuiltin;
+    if (specialFunctions.hasOwnProperty(name)) {
+      f = specialFunctions[name];
+    }
+    if (!IS_UNDEFINED(len)) {
+      %FunctionSetLength(f, len);
+    }
+    return f;
+  }
+
   // Setup non-enumerable functions of the Array.prototype object and
   // set their names.
-  InstallFunctionsOnHiddenPrototype($Array.prototype, DONT_ENUM, $Array(
-    "toString", ArrayToString,
-    "toLocaleString", ArrayToLocaleString,
-    "join", ArrayJoin,
-    "pop", ArrayPop,
-    "push", ArrayPush,
-    "concat", ArrayConcat,
-    "reverse", ArrayReverse,
-    "shift", ArrayShift,
-    "unshift", ArrayUnshift,
-    "slice", ArraySlice,
-    "splice", ArraySplice,
-    "sort", ArraySort,
-    "filter", ArrayFilter,
-    "forEach", ArrayForEach,
-    "some", ArraySome,
-    "every", ArrayEvery,
-    "map", ArrayMap,
-    "indexOf", ArrayIndexOf,
-    "lastIndexOf", ArrayLastIndexOf,
-    "reduce", ArrayReduce,
-    "reduceRight", ArrayReduceRight
-  ));
-    
   // Manipulate the length of some of the functions to meet
   // expectations set by ECMA-262 or Mozilla.
-  UpdateFunctionLengths({
-    ArrayFilter: 1,
-    ArrayForEach: 1,
-    ArraySome: 1,
-    ArrayEvery: 1,
-    ArrayMap: 1,
-    ArrayIndexOf: 1,
-    ArrayLastIndexOf: 1,
-    ArrayPush: 1,
-    ArrayReduce: 1,
-    ArrayReduceRight: 1
-  });
+  InstallFunctionsOnHiddenPrototype($Array.prototype, DONT_ENUM, $Array(
+    "toString", getFunction("toString", ArrayToString),
+    "toLocaleString", getFunction("toLocaleString", ArrayToLocaleString),
+    "join", getFunction("join", ArrayJoin),
+    "pop", getFunction("pop", ArrayPop),
+    "push", getFunction("push", ArrayPush, 1),
+    "concat", getFunction("concat", ArrayConcat, 1),
+    "reverse", getFunction("reverse", ArrayReverse),
+    "shift", getFunction("shift", ArrayShift),
+    "unshift", getFunction("unshift", ArrayUnshift, 1),
+    "slice", getFunction("slice", ArraySlice, 2),
+    "splice", getFunction("splice", ArraySplice, 2),
+    "sort", getFunction("sort", ArraySort),
+    "filter", getFunction("filter", ArrayFilter, 1),
+    "forEach", getFunction("forEach", ArrayForEach, 1),
+    "some", getFunction("some", ArraySome, 1),
+    "every", getFunction("every", ArrayEvery, 1),
+    "map", getFunction("map", ArrayMap, 1),
+    "indexOf", getFunction("indexOf", ArrayIndexOf, 1),
+    "lastIndexOf", getFunction("lastIndexOf", ArrayLastIndexOf, 1),
+    "reduce", getFunction("reduce", ArrayReduce, 1),
+    "reduceRight", getFunction("reduceRight", ArrayReduceRight, 1)
+  ));
+
+  %FinishArrayPrototypeSetup($Array.prototype);
 }
 
 
diff --git a/src/assembler.cc b/src/assembler.cc
index aaf10ef..ac03c20 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -46,7 +46,7 @@
 #include "regexp-macro-assembler.h"
 #include "platform.h"
 // Include native regexp-macro-assembler.
-#ifdef V8_NATIVE_REGEXP
+#ifndef V8_INTERPRETED_REGEXP
 #if V8_TARGET_ARCH_IA32
 #include "ia32/regexp-macro-assembler-ia32.h"
 #elif V8_TARGET_ARCH_X64
@@ -56,7 +56,7 @@
 #else  // Unknown architecture.
 #error "Unknown architecture."
 #endif  // Target architecture.
-#endif  // V8_NATIVE_REGEXP
+#endif  // V8_INTERPRETED_REGEXP
 
 namespace v8 {
 namespace internal {
@@ -574,8 +574,14 @@
 }
 
 
-ExternalReference ExternalReference::random_positive_smi_function() {
-  return ExternalReference(Redirect(FUNCTION_ADDR(V8::RandomPositiveSmi)));
+ExternalReference ExternalReference::fill_heap_number_with_random_function() {
+  return
+      ExternalReference(Redirect(FUNCTION_ADDR(V8::FillHeapNumberWithRandom)));
+}
+
+
+ExternalReference ExternalReference::random_uint32_function() {
+  return ExternalReference(Redirect(FUNCTION_ADDR(V8::Random)));
 }
 
 
@@ -664,7 +670,17 @@
 }
 
 
-#ifdef V8_NATIVE_REGEXP
+ExternalReference ExternalReference::compile_array_pop_call() {
+  return ExternalReference(FUNCTION_ADDR(CompileArrayPopCall));
+}
+
+
+ExternalReference ExternalReference::compile_array_push_call() {
+  return ExternalReference(FUNCTION_ADDR(CompileArrayPushCall));
+}
+
+
+#ifndef V8_INTERPRETED_REGEXP
 
 ExternalReference ExternalReference::re_check_stack_guard_state() {
   Address function;
@@ -707,7 +723,7 @@
   return ExternalReference(RegExpStack::memory_size_address());
 }
 
-#endif
+#endif  // V8_INTERPRETED_REGEXP
 
 
 static double add_two_doubles(double x, double y) {
diff --git a/src/assembler.h b/src/assembler.h
index 004ede3..03a2f8e 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -37,7 +37,6 @@
 
 #include "runtime.h"
 #include "top.h"
-#include "zone-inl.h"
 #include "token.h"
 
 namespace v8 {
@@ -399,7 +398,8 @@
   // ExternalReferenceTable in serialize.cc manually.
 
   static ExternalReference perform_gc_function();
-  static ExternalReference random_positive_smi_function();
+  static ExternalReference fill_heap_number_with_random_function();
+  static ExternalReference random_uint32_function();
   static ExternalReference transcendental_cache_array_address();
 
   // Static data in the keyed lookup cache.
@@ -444,6 +444,9 @@
 
   static ExternalReference scheduled_exception_address();
 
+  static ExternalReference compile_array_pop_call();
+  static ExternalReference compile_array_push_call();
+
   Address address() const {return reinterpret_cast<Address>(address_);}
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -454,7 +457,7 @@
   static ExternalReference debug_step_in_fp_address();
 #endif
 
-#ifdef V8_NATIVE_REGEXP
+#ifndef V8_INTERPRETED_REGEXP
   // C functions called from RegExp generated code.
 
   // Function NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()
diff --git a/src/ast.cc b/src/ast.cc
index 7cb5578..75b2945 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,7 @@
 #include "v8.h"
 
 #include "ast.h"
+#include "data-flow.h"
 #include "parser.h"
 #include "scopes.h"
 #include "string-stream.h"
@@ -46,11 +47,8 @@
 // ----------------------------------------------------------------------------
 // All the Accept member functions for each syntax tree node type.
 
-#define DECL_ACCEPT(type)                \
-  void type::Accept(AstVisitor* v) {        \
-    if (v->CheckStackOverflow()) return; \
-    v->Visit##type(this);                \
-  }
+#define DECL_ACCEPT(type)                                       \
+  void type::Accept(AstVisitor* v) { v->Visit##type(this); }
 AST_NODE_LIST(DECL_ACCEPT)
 #undef DECL_ACCEPT
 
@@ -58,22 +56,38 @@
 // ----------------------------------------------------------------------------
 // Implementation of other node functionality.
 
+Assignment* ExpressionStatement::StatementAsSimpleAssignment() {
+  return (expression()->AsAssignment() != NULL &&
+          !expression()->AsAssignment()->is_compound())
+      ? expression()->AsAssignment()
+      : NULL;
+}
+
+
+CountOperation* ExpressionStatement::StatementAsCountOperation() {
+  return expression()->AsCountOperation();
+}
+
+
 VariableProxy::VariableProxy(Handle<String> name,
                              bool is_this,
                              bool inside_with)
   : name_(name),
     var_(NULL),
     is_this_(is_this),
-    inside_with_(inside_with) {
+    inside_with_(inside_with),
+    is_trivial_(false),
+    reaching_definitions_(NULL),
+    is_primitive_(false) {
   // names must be canonicalized for fast equality checks
   ASSERT(name->IsSymbol());
-  // at least one access, otherwise no need for a VariableProxy
-  var_uses_.RecordRead(1);
 }
 
 
 VariableProxy::VariableProxy(bool is_this)
-  : is_this_(is_this) {
+  : is_this_(is_this),
+    reaching_definitions_(NULL),
+    is_primitive_(false) {
 }
 
 
@@ -87,8 +101,7 @@
   // eval() etc.  Const-ness and variable declarations are a complete mess
   // in JS. Sigh...
   var_ = var;
-  var->var_uses()->RecordUses(&var_uses_);
-  var->obj_uses()->RecordUses(&obj_uses_);
+  var->set_is_used(true);
 }
 
 
@@ -156,9 +169,82 @@
 }
 
 
+bool Expression::GuaranteedSmiResult() {
+  BinaryOperation* node = AsBinaryOperation();
+  if (node == NULL) return false;
+  Token::Value op = node->op();
+  switch (op) {
+    case Token::COMMA:
+    case Token::OR:
+    case Token::AND:
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+    case Token::MOD:
+    case Token::BIT_XOR:
+    case Token::SHL:
+      return false;
+      break;
+    case Token::BIT_OR:
+    case Token::BIT_AND: {
+      Literal* left = node->left()->AsLiteral();
+      Literal* right = node->right()->AsLiteral();
+      if (left != NULL && left->handle()->IsSmi()) {
+        int value = Smi::cast(*left->handle())->value();
+        if (op == Token::BIT_OR && ((value & 0xc0000000) == 0xc0000000)) {
+          // Result of bitwise or is always a negative Smi.
+          return true;
+        }
+        if (op == Token::BIT_AND && ((value & 0xc0000000) == 0)) {
+          // Result of bitwise and is always a positive Smi.
+          return true;
+        }
+      }
+      if (right != NULL && right->handle()->IsSmi()) {
+        int value = Smi::cast(*right->handle())->value();
+        if (op == Token::BIT_OR && ((value & 0xc0000000) == 0xc0000000)) {
+          // Result of bitwise or is always a negative Smi.
+          return true;
+        }
+        if (op == Token::BIT_AND && ((value & 0xc0000000) == 0)) {
+          // Result of bitwise and is always a positive Smi.
+          return true;
+        }
+      }
+      return false;
+      break;
+    }
+    case Token::SAR:
+    case Token::SHR: {
+      Literal* right = node->right()->AsLiteral();
+       if (right != NULL && right->handle()->IsSmi()) {
+        int value = Smi::cast(*right->handle())->value();
+        if ((value & 0x1F) > 1 ||
+            (op == Token::SAR && (value & 0x1F) == 1)) {
+          return true;
+        }
+       }
+       return false;
+       break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+  return false;
+}
+
 // ----------------------------------------------------------------------------
 // Implementation of AstVisitor
 
+bool AstVisitor::CheckStackOverflow() {
+  if (stack_overflow_) return true;
+  StackLimitCheck check;
+  if (!check.HasOverflowed()) return false;
+  return (stack_overflow_ = true);
+}
+
 
 void AstVisitor::VisitDeclarations(ZoneList<Declaration*>* declarations) {
   for (int i = 0; i < declarations->length(); i++) {
@@ -487,5 +573,566 @@
   }
 }
 
+// IsPrimitive implementation.  IsPrimitive is true if the value of an
+// expression is known at compile-time to be any JS type other than Object
+// (e.g, it is Undefined, Null, Boolean, String, or Number).
+
+// The following expression types are never primitive because they express
+// Object values.
+bool FunctionLiteral::IsPrimitive() { return false; }
+bool SharedFunctionInfoLiteral::IsPrimitive() { return false; }
+bool RegExpLiteral::IsPrimitive() { return false; }
+bool ObjectLiteral::IsPrimitive() { return false; }
+bool ArrayLiteral::IsPrimitive() { return false; }
+bool CatchExtensionObject::IsPrimitive() { return false; }
+bool CallNew::IsPrimitive() { return false; }
+bool ThisFunction::IsPrimitive() { return false; }
+
+
+// The following expression types are not always primitive because we do not
+// have enough information to conclude that they are.
+bool Property::IsPrimitive() { return false; }
+bool Call::IsPrimitive() { return false; }
+bool CallRuntime::IsPrimitive() { return false; }
+
+
+// A variable use is not primitive unless the primitive-type analysis
+// determines otherwise.
+bool VariableProxy::IsPrimitive() {
+  ASSERT(!is_primitive_ || (var() != NULL && var()->IsStackAllocated()));
+  return is_primitive_;
+}
+
+// The value of a conditional is the value of one of the alternatives.  It's
+// always primitive if both alternatives are always primitive.
+bool Conditional::IsPrimitive() {
+  return then_expression()->IsPrimitive() && else_expression()->IsPrimitive();
+}
+
+
+// A literal is primitive when it is not a JSObject.
+bool Literal::IsPrimitive() { return !handle()->IsJSObject(); }
+
+
+// The value of an assignment is the value of its right-hand side.
+bool Assignment::IsPrimitive() {
+  switch (op()) {
+    case Token::INIT_VAR:
+    case Token::INIT_CONST:
+    case Token::ASSIGN:
+      return value()->IsPrimitive();
+
+    default:
+      // {|=, ^=, &=, <<=, >>=, >>>=, +=, -=, *=, /=, %=}
+      // Arithmetic operations are always primitive.  They express Numbers
+      // with the exception of +, which expresses a Number or a String.
+      return true;
+  }
+}
+
+
+// Throw does not express a value, so it's trivially always primitive.
+bool Throw::IsPrimitive() { return true; }
+
+
+// Unary operations always express primitive values.  delete and ! express
+// Booleans, void Undefined, typeof String, +, -, and ~ Numbers.
+bool UnaryOperation::IsPrimitive() { return true; }
+
+
+// Count operations (pre- and post-fix increment and decrement) always
+// express primitive values (Numbers).  See ECMA-262-3, 11.3.1, 11.3.2,
+// 11.4.4, ane 11.4.5.
+bool CountOperation::IsPrimitive() { return true; }
+
+
+// Binary operations depend on the operator.
+bool BinaryOperation::IsPrimitive() {
+  switch (op()) {
+    case Token::COMMA:
+      // Value is the value of the right subexpression.
+      return right()->IsPrimitive();
+
+    case Token::OR:
+    case Token::AND:
+      // Value is the value one of the subexpressions.
+      return left()->IsPrimitive() && right()->IsPrimitive();
+
+    default:
+      // {|, ^, &, <<, >>, >>>, +, -, *, /, %}
+      // Arithmetic operations are always primitive.  They express Numbers
+      // with the exception of +, which expresses a Number or a String.
+      return true;
+  }
+}
+
+
+// Compare operations always express Boolean values.
+bool CompareOperation::IsPrimitive() { return true; }
+
+
+// Overridden IsCritical member functions.  IsCritical is true for AST nodes
+// whose evaluation is absolutely required (they are never dead) because
+// they are externally visible.
+
+// References to global variables or lookup slots are critical because they
+// may have getters.  All others, including parameters rewritten to explicit
+// property references, are not critical.
+bool VariableProxy::IsCritical() {
+  Variable* var = AsVariable();
+  return var != NULL &&
+      (var->slot() == NULL || var->slot()->type() == Slot::LOOKUP);
+}
+
+
+// Literals are never critical.
+bool Literal::IsCritical() { return false; }
+
+
+// Property assignments and throwing of reference errors are always
+// critical.  Assignments to escaping variables are also critical.  In
+// addition the operation of compound assignments is critical if either of
+// its operands is non-primitive (the arithmetic operations all use one of
+// ToPrimitive, ToNumber, ToInt32, or ToUint32 on each of their operands).
+// In this case, we mark the entire AST node as critical because there is
+// no binary operation node to mark.
+bool Assignment::IsCritical() {
+  Variable* var = AssignedVariable();
+  return var == NULL ||
+      !var->IsStackAllocated() ||
+      (is_compound() && (!target()->IsPrimitive() || !value()->IsPrimitive()));
+}
+
+
+// Property references are always critical, because they may have getters.
+bool Property::IsCritical() { return true; }
+
+
+// Calls are always critical.
+bool Call::IsCritical() { return true; }
+
+
+// +,- use ToNumber on the value of their operand.
+bool UnaryOperation::IsCritical() {
+  ASSERT(op() == Token::ADD || op() == Token::SUB);
+  return !expression()->IsPrimitive();
+}
+
+
+// Count operations targeting properties and reference errors are always
+// critical.  Count operations on escaping variables are critical.  Count
+// operations targeting non-primitives are also critical because they use
+// ToNumber.
+bool CountOperation::IsCritical() {
+  Variable* var = AssignedVariable();
+  return var == NULL ||
+      !var->IsStackAllocated() ||
+      !expression()->IsPrimitive();
+}
+
+
+// Arithmetic operations all use one of ToPrimitive, ToNumber, ToInt32, or
+// ToUint32 on each of their operands.
+bool BinaryOperation::IsCritical() {
+  ASSERT(op() != Token::COMMA);
+  ASSERT(op() != Token::OR);
+  ASSERT(op() != Token::AND);
+  return !left()->IsPrimitive() || !right()->IsPrimitive();
+}
+
+
+// <, >, <=, and >= all use ToPrimitive on both their operands.
+bool CompareOperation::IsCritical() {
+  ASSERT(op() != Token::EQ);
+  ASSERT(op() != Token::NE);
+  ASSERT(op() != Token::EQ_STRICT);
+  ASSERT(op() != Token::NE_STRICT);
+  ASSERT(op() != Token::INSTANCEOF);
+  ASSERT(op() != Token::IN);
+  return !left()->IsPrimitive() || !right()->IsPrimitive();
+}
+
+
+// Implementation of a copy visitor. The visitor create a deep copy
+// of ast nodes. Nodes that do not require a deep copy are copied
+// with the default copy constructor.
+
+AstNode::AstNode(AstNode* other) : num_(kNoNumber) {
+  // AST node number should be unique. Assert that we only copy AstNodes
+  // before node numbers are assigned.
+  ASSERT(other->num_ == kNoNumber);
+}
+
+
+Statement::Statement(Statement* other)
+    : AstNode(other), statement_pos_(other->statement_pos_) {}
+
+
+Expression::Expression(Expression* other)
+    : AstNode(other),
+      bitfields_(other->bitfields_),
+      type_(other->type_) {}
+
+
+BreakableStatement::BreakableStatement(BreakableStatement* other)
+    : Statement(other), labels_(other->labels_), type_(other->type_) {}
+
+
+Block::Block(Block* other, ZoneList<Statement*>* statements)
+    : BreakableStatement(other),
+      statements_(statements->length()),
+      is_initializer_block_(other->is_initializer_block_) {
+  statements_.AddAll(*statements);
+}
+
+
+ExpressionStatement::ExpressionStatement(ExpressionStatement* other,
+                                         Expression* expression)
+    : Statement(other), expression_(expression) {}
+
+
+IfStatement::IfStatement(IfStatement* other,
+                         Expression* condition,
+                         Statement* then_statement,
+                         Statement* else_statement)
+    : Statement(other),
+      condition_(condition),
+      then_statement_(then_statement),
+      else_statement_(else_statement) {}
+
+
+EmptyStatement::EmptyStatement(EmptyStatement* other) : Statement(other) {}
+
+
+IterationStatement::IterationStatement(IterationStatement* other,
+                                       Statement* body)
+    : BreakableStatement(other), body_(body) {}
+
+
+ForStatement::ForStatement(ForStatement* other,
+                           Statement* init,
+                           Expression* cond,
+                           Statement* next,
+                           Statement* body)
+    : IterationStatement(other, body),
+      init_(init),
+      cond_(cond),
+      next_(next),
+      may_have_function_literal_(other->may_have_function_literal_),
+      loop_variable_(other->loop_variable_),
+      peel_this_loop_(other->peel_this_loop_) {}
+
+
+Assignment::Assignment(Assignment* other,
+                       Expression* target,
+                       Expression* value)
+    : Expression(other),
+      op_(other->op_),
+      target_(target),
+      value_(value),
+      pos_(other->pos_),
+      block_start_(other->block_start_),
+      block_end_(other->block_end_) {}
+
+
+Property::Property(Property* other, Expression* obj, Expression* key)
+    : Expression(other),
+      obj_(obj),
+      key_(key),
+      pos_(other->pos_),
+      type_(other->type_) {}
+
+
+Call::Call(Call* other,
+           Expression* expression,
+           ZoneList<Expression*>* arguments)
+    : Expression(other),
+      expression_(expression),
+      arguments_(arguments),
+      pos_(other->pos_) {}
+
+
+UnaryOperation::UnaryOperation(UnaryOperation* other, Expression* expression)
+    : Expression(other), op_(other->op_), expression_(expression) {}
+
+
+BinaryOperation::BinaryOperation(Expression* other,
+                                 Token::Value op,
+                                 Expression* left,
+                                 Expression* right)
+    : Expression(other), op_(op), left_(left), right_(right) {}
+
+
+CountOperation::CountOperation(CountOperation* other, Expression* expression)
+    : Expression(other),
+      is_prefix_(other->is_prefix_),
+      op_(other->op_),
+      expression_(expression) {}
+
+
+CompareOperation::CompareOperation(CompareOperation* other,
+                                   Expression* left,
+                                   Expression* right)
+    : Expression(other),
+      op_(other->op_),
+      left_(left),
+      right_(right) {}
+
+
+Expression* CopyAstVisitor::DeepCopyExpr(Expression* expr) {
+  expr_ = NULL;
+  if (expr != NULL) Visit(expr);
+  return expr_;
+}
+
+
+Statement* CopyAstVisitor::DeepCopyStmt(Statement* stmt) {
+  stmt_ = NULL;
+  if (stmt != NULL) Visit(stmt);
+  return stmt_;
+}
+
+
+ZoneList<Expression*>* CopyAstVisitor::DeepCopyExprList(
+    ZoneList<Expression*>* expressions) {
+  ZoneList<Expression*>* copy =
+      new ZoneList<Expression*>(expressions->length());
+  for (int i = 0; i < expressions->length(); i++) {
+    copy->Add(DeepCopyExpr(expressions->at(i)));
+  }
+  return copy;
+}
+
+
+ZoneList<Statement*>* CopyAstVisitor::DeepCopyStmtList(
+    ZoneList<Statement*>* statements) {
+  ZoneList<Statement*>* copy = new ZoneList<Statement*>(statements->length());
+  for (int i = 0; i < statements->length(); i++) {
+    copy->Add(DeepCopyStmt(statements->at(i)));
+  }
+  return copy;
+}
+
+
+void CopyAstVisitor::VisitBlock(Block* stmt) {
+  stmt_ = new Block(stmt,
+                    DeepCopyStmtList(stmt->statements()));
+}
+
+
+void CopyAstVisitor::VisitExpressionStatement(
+    ExpressionStatement* stmt) {
+  stmt_ = new ExpressionStatement(stmt, DeepCopyExpr(stmt->expression()));
+}
+
+
+void CopyAstVisitor::VisitEmptyStatement(EmptyStatement* stmt) {
+  stmt_ = new EmptyStatement(stmt);
+}
+
+
+void CopyAstVisitor::VisitIfStatement(IfStatement* stmt) {
+  stmt_ = new IfStatement(stmt,
+                          DeepCopyExpr(stmt->condition()),
+                          DeepCopyStmt(stmt->then_statement()),
+                          DeepCopyStmt(stmt->else_statement()));
+}
+
+
+void CopyAstVisitor::VisitContinueStatement(ContinueStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitBreakStatement(BreakStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitReturnStatement(ReturnStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitWithEnterStatement(
+    WithEnterStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitWithExitStatement(WithExitStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitSwitchStatement(SwitchStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitWhileStatement(WhileStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitForStatement(ForStatement* stmt) {
+  stmt_ = new ForStatement(stmt,
+                           DeepCopyStmt(stmt->init()),
+                           DeepCopyExpr(stmt->cond()),
+                           DeepCopyStmt(stmt->next()),
+                           DeepCopyStmt(stmt->body()));
+}
+
+
+void CopyAstVisitor::VisitForInStatement(ForInStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitTryCatchStatement(TryCatchStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitTryFinallyStatement(
+    TryFinallyStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitDebuggerStatement(
+    DebuggerStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitFunctionLiteral(FunctionLiteral* expr) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitConditional(Conditional* expr) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitSlot(Slot* expr) {
+  UNREACHABLE();
+}
+
+
+void CopyAstVisitor::VisitVariableProxy(VariableProxy* expr) {
+  expr_ = new VariableProxy(*expr);
+}
+
+
+void CopyAstVisitor::VisitLiteral(Literal* expr) {
+  expr_ = new Literal(*expr);
+}
+
+
+void CopyAstVisitor::VisitRegExpLiteral(RegExpLiteral* expr) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitObjectLiteral(ObjectLiteral* expr) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitArrayLiteral(ArrayLiteral* expr) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitCatchExtensionObject(
+    CatchExtensionObject* expr) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitAssignment(Assignment* expr) {
+  expr_ = new Assignment(expr,
+                         DeepCopyExpr(expr->target()),
+                         DeepCopyExpr(expr->value()));
+}
+
+
+void CopyAstVisitor::VisitThrow(Throw* expr) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitProperty(Property* expr) {
+  expr_ = new Property(expr,
+                       DeepCopyExpr(expr->obj()),
+                       DeepCopyExpr(expr->key()));
+}
+
+
+void CopyAstVisitor::VisitCall(Call* expr) {
+  expr_ = new Call(expr,
+                   DeepCopyExpr(expr->expression()),
+                   DeepCopyExprList(expr->arguments()));
+}
+
+
+void CopyAstVisitor::VisitCallNew(CallNew* expr) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitCallRuntime(CallRuntime* expr) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitUnaryOperation(UnaryOperation* expr) {
+  expr_ = new UnaryOperation(expr, DeepCopyExpr(expr->expression()));
+}
+
+
+void CopyAstVisitor::VisitCountOperation(CountOperation* expr) {
+  expr_ = new CountOperation(expr,
+                             DeepCopyExpr(expr->expression()));
+}
+
+
+void CopyAstVisitor::VisitBinaryOperation(BinaryOperation* expr) {
+  expr_ = new BinaryOperation(expr,
+                              expr->op(),
+                              DeepCopyExpr(expr->left()),
+                              DeepCopyExpr(expr->right()));
+}
+
+
+void CopyAstVisitor::VisitCompareOperation(CompareOperation* expr) {
+  expr_ = new CompareOperation(expr,
+                               DeepCopyExpr(expr->left()),
+                               DeepCopyExpr(expr->right()));
+}
+
+
+void CopyAstVisitor::VisitThisFunction(ThisFunction* expr) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitDeclaration(Declaration* decl) {
+  UNREACHABLE();
+}
+
 
 } }  // namespace v8::internal
diff --git a/src/ast.h b/src/ast.h
index 927a9f5..dfc08ee 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -73,7 +73,7 @@
 
 #define EXPRESSION_NODE_LIST(V)                 \
   V(FunctionLiteral)                            \
-  V(FunctionBoilerplateLiteral)                 \
+  V(SharedFunctionInfoLiteral)                  \
   V(Conditional)                                \
   V(Slot)                                       \
   V(VariableProxy)                              \
@@ -103,6 +103,7 @@
 class TargetCollector;
 class MaterializedLiteral;
 class DefinitionInfo;
+class BitVector;
 
 #define DEF_FORWARD_DECLARATION(type) class type;
 AST_NODE_LIST(DEF_FORWARD_DECLARATION)
@@ -117,11 +118,18 @@
 
 class AstNode: public ZoneObject {
  public:
+  static const int kNoNumber = -1;
+
+  AstNode() : num_(kNoNumber) {}
+
+  explicit AstNode(AstNode* other);
+
   virtual ~AstNode() { }
   virtual void Accept(AstVisitor* v) = 0;
 
   // Type testing & conversion.
   virtual Statement* AsStatement() { return NULL; }
+  virtual Block* AsBlock() { return NULL; }
   virtual ExpressionStatement* AsExpressionStatement() { return NULL; }
   virtual EmptyStatement* AsEmptyStatement() { return NULL; }
   virtual Expression* AsExpression() { return NULL; }
@@ -133,7 +141,9 @@
   virtual TargetCollector* AsTargetCollector() { return NULL; }
   virtual BreakableStatement* AsBreakableStatement() { return NULL; }
   virtual IterationStatement* AsIterationStatement() { return NULL; }
+  virtual ForStatement* AsForStatement() { return NULL; }
   virtual UnaryOperation* AsUnaryOperation() { return NULL; }
+  virtual CountOperation* AsCountOperation() { return NULL; }
   virtual BinaryOperation* AsBinaryOperation() { return NULL; }
   virtual Assignment* AsAssignment() { return NULL; }
   virtual FunctionLiteral* AsFunctionLiteral() { return NULL; }
@@ -141,6 +151,20 @@
   virtual ObjectLiteral* AsObjectLiteral() { return NULL; }
   virtual ArrayLiteral* AsArrayLiteral() { return NULL; }
   virtual CompareOperation* AsCompareOperation() { return NULL; }
+
+  // True if the AST node is critical (its execution is needed or externally
+  // visible in some way).
+  virtual bool IsCritical() {
+    UNREACHABLE();
+    return true;
+  }
+
+  int num() { return num_; }
+  void set_num(int n) { num_ = n; }
+
+ private:
+  // Support for ast node numbering.
+  int num_;
 };
 
 
@@ -148,9 +172,14 @@
  public:
   Statement() : statement_pos_(RelocInfo::kNoPosition) {}
 
+  explicit Statement(Statement* other);
+
   virtual Statement* AsStatement()  { return this; }
   virtual ReturnStatement* AsReturnStatement() { return NULL; }
 
+  virtual Assignment* StatementAsSimpleAssignment() { return NULL; }
+  virtual CountOperation* StatementAsCountOperation() { return NULL; }
+
   bool IsEmpty() { return AsEmptyStatement() != NULL; }
 
   void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
@@ -181,14 +210,16 @@
     kTestValue
   };
 
-  static const int kNoLabel = -1;
+  Expression() : bitfields_(0) {}
 
-  Expression() : num_(kNoLabel), def_(NULL), defined_vars_(NULL) {}
+  explicit Expression(Expression* other);
 
   virtual Expression* AsExpression()  { return this; }
 
   virtual bool IsValidLeftHandSide() { return false; }
 
+  virtual Variable* AssignedVariable() { return NULL; }
+
   // Symbols that cannot be parsed as array indices are considered property
   // names.  We do not treat symbols that can be array indexes as property
   // names because [] for string objects is handled only by keyed ICs.
@@ -203,6 +234,10 @@
   // evaluate out of order.
   virtual bool IsTrivial() { return false; }
 
+  // True if the expression always has one of the non-Object JS types
+  // (Undefined, Null, Boolean, String, or Number).
+  virtual bool IsPrimitive() = 0;
+
   // Mark the expression as being compiled as an expression
   // statement. This is used to transform postfix increments to
   // (faster) prefix increments.
@@ -211,25 +246,66 @@
   // Static type information for this expression.
   StaticType* type() { return &type_; }
 
-  int num() { return num_; }
+  // True if the expression is a loop condition.
+  bool is_loop_condition() const {
+    return LoopConditionField::decode(bitfields_);
+  }
+  void set_is_loop_condition(bool flag) {
+    bitfields_ = (bitfields_ & ~LoopConditionField::mask()) |
+        LoopConditionField::encode(flag);
+  }
 
-  // AST node numbering ordered by evaluation order.
-  void set_num(int n) { num_ = n; }
+  // The value of the expression is guaranteed to be a smi, because the
+  // top operation is a bit operation with a mask, or a shift.
+  bool GuaranteedSmiResult();
 
-  // Data flow information.
-  DefinitionInfo* var_def() { return def_; }
-  void set_var_def(DefinitionInfo* def) { def_ = def; }
+  // AST analysis results
 
-  ZoneList<DefinitionInfo*>* defined_vars() { return defined_vars_; }
-  void set_defined_vars(ZoneList<DefinitionInfo*>* defined_vars) {
-    defined_vars_ = defined_vars;
+  // True if the expression rooted at this node can be compiled by the
+  // side-effect free compiler.
+  bool side_effect_free() { return SideEffectFreeField::decode(bitfields_); }
+  void set_side_effect_free(bool is_side_effect_free) {
+    bitfields_ &= ~SideEffectFreeField::mask();
+    bitfields_ |= SideEffectFreeField::encode(is_side_effect_free);
+  }
+
+  // Will the use of this expression treat -0 the same as 0 in all cases?
+  // If so, we can return 0 instead of -0 if we want to, to optimize code.
+  bool no_negative_zero() { return NoNegativeZeroField::decode(bitfields_); }
+  void set_no_negative_zero(bool no_negative_zero) {
+    bitfields_ &= ~NoNegativeZeroField::mask();
+    bitfields_ |= NoNegativeZeroField::encode(no_negative_zero);
+  }
+
+  // Will ToInt32 (ECMA 262-3 9.5) or ToUint32 (ECMA 262-3 9.6)
+  // be applied to the value of this expression?
+  // If so, we may be able to optimize the calculation of the value.
+  bool to_int32() { return ToInt32Field::decode(bitfields_); }
+  void set_to_int32(bool to_int32) {
+    bitfields_ &= ~ToInt32Field::mask();
+    bitfields_ |= ToInt32Field::encode(to_int32);
+  }
+
+  // How many bitwise logical or shift operators are used in this expression?
+  int num_bit_ops() { return NumBitOpsField::decode(bitfields_); }
+  void set_num_bit_ops(int num_bit_ops) {
+    bitfields_ &= ~NumBitOpsField::mask();
+    num_bit_ops = Min(num_bit_ops, kMaxNumBitOps);
+    bitfields_ |= NumBitOpsField::encode(num_bit_ops);
   }
 
  private:
+  static const int kMaxNumBitOps = (1 << 5) - 1;
+
+  uint32_t bitfields_;
   StaticType type_;
-  int num_;
-  DefinitionInfo* def_;
-  ZoneList<DefinitionInfo*>* defined_vars_;
+
+  // Using template BitField<type, start, size>.
+  class SideEffectFreeField : public BitField<bool, 0, 1> {};
+  class NoNegativeZeroField : public BitField<bool, 1, 1> {};
+  class ToInt32Field : public BitField<bool, 2, 1> {};
+  class NumBitOpsField : public BitField<int, 3, 5> {};
+  class LoopConditionField: public BitField<bool, 8, 1> {};
 };
 
 
@@ -243,6 +319,12 @@
   virtual bool IsValidLeftHandSide() { return true; }
   virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
   static ValidLeftHandSideSentinel* instance() { return &instance_; }
+
+  virtual bool IsPrimitive() {
+    UNREACHABLE();
+    return false;
+  }
+
  private:
   static ValidLeftHandSideSentinel instance_;
 };
@@ -274,6 +356,8 @@
     ASSERT(labels == NULL || labels->length() > 0);
   }
 
+  explicit BreakableStatement(BreakableStatement* other);
+
  private:
   ZoneStringList* labels_;
   Type type_;
@@ -288,8 +372,24 @@
         statements_(capacity),
         is_initializer_block_(is_initializer_block) { }
 
+  // Construct a clone initialized from the original block and
+  // a deep copy of all statements of the original block.
+  Block(Block* other, ZoneList<Statement*>* statements);
+
   virtual void Accept(AstVisitor* v);
 
+  virtual Block* AsBlock() { return this; }
+
+  virtual Assignment* StatementAsSimpleAssignment() {
+    if (statements_.length() != 1) return NULL;
+    return statements_[0]->StatementAsSimpleAssignment();
+  }
+
+  virtual CountOperation* StatementAsCountOperation() {
+    if (statements_.length() != 1) return NULL;
+    return statements_[0]->StatementAsCountOperation();
+  }
+
   void AddStatement(Statement* statement) { statements_.Add(statement); }
 
   ZoneList<Statement*>* statements() { return &statements_; }
@@ -331,6 +431,7 @@
   virtual IterationStatement* AsIterationStatement() { return this; }
 
   Statement* body() const { return body_; }
+  void set_body(Statement* stmt) { body_ = stmt; }
 
   // Code generation
   BreakTarget* continue_target()  { return &continue_target_; }
@@ -339,6 +440,10 @@
   explicit IterationStatement(ZoneStringList* labels)
       : BreakableStatement(labels, TARGET_FOR_ANONYMOUS), body_(NULL) { }
 
+  // Construct a clone initialized from  original and
+  // a deep copy of the original body.
+  IterationStatement(IterationStatement* other, Statement* body);
+
   void Initialize(Statement* body) {
     body_ = body;
   }
@@ -411,8 +516,19 @@
         init_(NULL),
         cond_(NULL),
         next_(NULL),
-        may_have_function_literal_(true) {
-  }
+        may_have_function_literal_(true),
+        loop_variable_(NULL),
+        peel_this_loop_(false) {}
+
+  // Construct a for-statement initialized from another for-statement
+  // and deep copies of all parts of the original statement.
+  ForStatement(ForStatement* other,
+               Statement* init,
+               Expression* cond,
+               Statement* next,
+               Statement* body);
+
+  virtual ForStatement* AsForStatement() { return this; }
 
   void Initialize(Statement* init,
                   Expression* cond,
@@ -427,18 +543,30 @@
   virtual void Accept(AstVisitor* v);
 
   Statement* init() const  { return init_; }
+  void set_init(Statement* stmt) { init_ = stmt; }
   Expression* cond() const  { return cond_; }
+  void set_cond(Expression* expr) { cond_ = expr; }
   Statement* next() const  { return next_; }
+  void set_next(Statement* stmt) { next_ = stmt; }
   bool may_have_function_literal() const {
     return may_have_function_literal_;
   }
 
+  bool is_fast_smi_loop() { return loop_variable_ != NULL; }
+  Variable* loop_variable() { return loop_variable_; }
+  void set_loop_variable(Variable* var) { loop_variable_ = var; }
+
+  bool peel_this_loop() { return peel_this_loop_; }
+  void set_peel_this_loop(bool b) { peel_this_loop_ = b; }
+
  private:
   Statement* init_;
   Expression* cond_;
   Statement* next_;
   // True if there is a function literal subexpression in the condition.
   bool may_have_function_literal_;
+  Variable* loop_variable_;
+  bool peel_this_loop_;
 
   friend class AstOptimizer;
 };
@@ -471,11 +599,18 @@
   explicit ExpressionStatement(Expression* expression)
       : expression_(expression) { }
 
+  // Construct an expression statement initialized from another
+  // expression statement and a deep copy of the original expression.
+  ExpressionStatement(ExpressionStatement* other, Expression* expression);
+
   virtual void Accept(AstVisitor* v);
 
   // Type testing & conversion.
   virtual ExpressionStatement* AsExpressionStatement() { return this; }
 
+  virtual Assignment* StatementAsSimpleAssignment();
+  virtual CountOperation* StatementAsCountOperation();
+
   void set_expression(Expression* e) { expression_ = e; }
   Expression* expression() { return expression_; }
 
@@ -610,6 +745,13 @@
         then_statement_(then_statement),
         else_statement_(else_statement) { }
 
+  // Construct an if-statement initialized from another if-statement
+  // and deep copies of all parts of the original.
+  IfStatement(IfStatement* other,
+              Expression* condition,
+              Statement* then_statement,
+              Statement* else_statement);
+
   virtual void Accept(AstVisitor* v);
 
   bool HasThenStatement() const { return !then_statement()->IsEmpty(); }
@@ -617,7 +759,9 @@
 
   Expression* condition() const { return condition_; }
   Statement* then_statement() const { return then_statement_; }
+  void set_then_statement(Statement* stmt) { then_statement_ = stmt; }
   Statement* else_statement() const { return else_statement_; }
+  void set_else_statement(Statement* stmt) { else_statement_ = stmt; }
 
  private:
   Expression* condition_;
@@ -712,6 +856,10 @@
 
 class EmptyStatement: public Statement {
  public:
+  EmptyStatement() {}
+
+  explicit EmptyStatement(EmptyStatement* other);
+
   virtual void Accept(AstVisitor* v);
 
   // Type testing & conversion.
@@ -743,6 +891,8 @@
 
   virtual bool IsLeaf() { return true; }
   virtual bool IsTrivial() { return true; }
+  virtual bool IsPrimitive();
+  virtual bool IsCritical();
 
   // Identity testers.
   bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); }
@@ -818,24 +968,31 @@
                 ZoneList<Property*>* properties,
                 int literal_index,
                 bool is_simple,
+                bool fast_elements,
                 int depth)
       : MaterializedLiteral(literal_index, is_simple, depth),
         constant_properties_(constant_properties),
-        properties_(properties) {}
+        properties_(properties),
+        fast_elements_(fast_elements) {}
 
   virtual ObjectLiteral* AsObjectLiteral() { return this; }
   virtual void Accept(AstVisitor* v);
 
   virtual bool IsLeaf() { return properties()->is_empty(); }
 
+  virtual bool IsPrimitive();
+
   Handle<FixedArray> constant_properties() const {
     return constant_properties_;
   }
   ZoneList<Property*>* properties() const { return properties_; }
 
+  bool fast_elements() const { return fast_elements_; }
+
  private:
   Handle<FixedArray> constant_properties_;
   ZoneList<Property*>* properties_;
+  bool fast_elements_;
 };
 
 
@@ -853,6 +1010,8 @@
 
   virtual bool IsLeaf() { return true; }
 
+  virtual bool IsPrimitive();
+
   Handle<String> pattern() const { return pattern_; }
   Handle<String> flags() const { return flags_; }
 
@@ -879,6 +1038,8 @@
 
   virtual bool IsLeaf() { return values()->is_empty(); }
 
+  virtual bool IsPrimitive();
+
   Handle<FixedArray> constant_elements() const { return constant_elements_; }
   ZoneList<Expression*>* values() const { return values_; }
 
@@ -899,6 +1060,8 @@
 
   virtual void Accept(AstVisitor* v);
 
+  virtual bool IsPrimitive();
+
   Literal* key() const { return key_; }
   VariableProxy* value() const { return value_; }
 
@@ -933,7 +1096,12 @@
 
   // Reading from a mutable variable is a side effect, but 'this' is
   // immutable.
-  virtual bool IsTrivial() { return is_this(); }
+  virtual bool IsTrivial() { return is_trivial_; }
+
+  virtual bool IsPrimitive();
+  virtual bool IsCritical();
+
+  void SetIsPrimitive(bool value) { is_primitive_ = value; }
 
   bool IsVariable(Handle<String> n) {
     return !is_this() && name().is_identical_to(n);
@@ -946,10 +1114,13 @@
 
   Handle<String> name() const  { return name_; }
   Variable* var() const  { return var_; }
-  UseCount* var_uses()  { return &var_uses_; }
-  UseCount* obj_uses()  { return &obj_uses_; }
   bool is_this() const  { return is_this_; }
   bool inside_with() const  { return inside_with_; }
+  bool is_trivial() { return is_trivial_; }
+  void set_is_trivial(bool b) { is_trivial_ = b; }
+
+  BitVector* reaching_definitions() { return reaching_definitions_; }
+  void set_reaching_definitions(BitVector* rd) { reaching_definitions_ = rd; }
 
   // Bind this proxy to the variable var.
   void BindTo(Variable* var);
@@ -959,10 +1130,9 @@
   Variable* var_;  // resolved variable, or NULL
   bool is_this_;
   bool inside_with_;
-
-  // VariableProxy usage info.
-  UseCount var_uses_;  // uses of the variable value
-  UseCount obj_uses_;  // uses of the object the variable points to
+  bool is_trivial_;
+  BitVector* reaching_definitions_;
+  bool is_primitive_;
 
   VariableProxy(Handle<String> name, bool is_this, bool inside_with);
   explicit VariableProxy(bool is_this);
@@ -979,6 +1149,11 @@
     return &identifier_proxy_;
   }
 
+  virtual bool IsPrimitive() {
+    UNREACHABLE();
+    return false;
+  }
+
  private:
   explicit VariableProxySentinel(bool is_this) : VariableProxy(is_this) { }
   static VariableProxySentinel this_proxy_;
@@ -1022,6 +1197,13 @@
 
   virtual bool IsLeaf() { return true; }
 
+  virtual bool IsPrimitive() {
+    UNREACHABLE();
+    return false;
+  }
+
+  bool IsStackAllocated() { return type_ == PARAMETER || type_ == LOCAL; }
+
   // Accessors
   Variable* var() const { return var_; }
   Type type() const { return type_; }
@@ -1045,6 +1227,8 @@
   Property(Expression* obj, Expression* key, int pos, Type type = NORMAL)
       : obj_(obj), key_(key), pos_(pos), type_(type) { }
 
+  Property(Property* other, Expression* obj, Expression* key);
+
   virtual void Accept(AstVisitor* v);
 
   // Type testing & conversion
@@ -1052,6 +1236,9 @@
 
   virtual bool IsValidLeftHandSide() { return true; }
 
+  virtual bool IsPrimitive();
+  virtual bool IsCritical();
+
   Expression* obj() const { return obj_; }
   Expression* key() const { return key_; }
   int position() const { return pos_; }
@@ -1077,11 +1264,16 @@
   Call(Expression* expression, ZoneList<Expression*>* arguments, int pos)
       : expression_(expression), arguments_(arguments), pos_(pos) { }
 
+  Call(Call* other, Expression* expression, ZoneList<Expression*>* arguments);
+
   virtual void Accept(AstVisitor* v);
 
   // Type testing and conversion.
   virtual Call* AsCall() { return this; }
 
+  virtual bool IsPrimitive();
+  virtual bool IsCritical();
+
   Expression* expression() const { return expression_; }
   ZoneList<Expression*>* arguments() const { return arguments_; }
   int position() { return pos_; }
@@ -1104,6 +1296,8 @@
 
   virtual void Accept(AstVisitor* v);
 
+  virtual bool IsPrimitive();
+
   Expression* expression() const { return expression_; }
   ZoneList<Expression*>* arguments() const { return arguments_; }
   int position() { return pos_; }
@@ -1128,6 +1322,8 @@
 
   virtual void Accept(AstVisitor* v);
 
+  virtual bool IsPrimitive();
+
   Handle<String> name() const { return name_; }
   Runtime::Function* function() const { return function_; }
   ZoneList<Expression*>* arguments() const { return arguments_; }
@@ -1147,11 +1343,16 @@
     ASSERT(Token::IsUnaryOp(op));
   }
 
+  UnaryOperation(UnaryOperation* other, Expression* expression);
+
   virtual void Accept(AstVisitor* v);
 
   // Type testing & conversion
   virtual UnaryOperation* AsUnaryOperation() { return this; }
 
+  virtual bool IsPrimitive();
+  virtual bool IsCritical();
+
   Token::Value op() const { return op_; }
   Expression* expression() const { return expression_; }
 
@@ -1168,11 +1369,22 @@
     ASSERT(Token::IsBinaryOp(op));
   }
 
+  // Construct a binary operation with a given operator and left and right
+  // subexpressions.  The rest of the expression state is copied from
+  // another expression.
+  BinaryOperation(Expression* other,
+                  Token::Value op,
+                  Expression* left,
+                  Expression* right);
+
   virtual void Accept(AstVisitor* v);
 
   // Type testing & conversion
   virtual BinaryOperation* AsBinaryOperation() { return this; }
 
+  virtual bool IsPrimitive();
+  virtual bool IsCritical();
+
   // True iff the result can be safely overwritten (to avoid allocation).
   // False for operations that can return one of their operands.
   bool ResultOverwriteAllowed() {
@@ -1217,8 +1429,19 @@
     ASSERT(Token::IsCountOp(op));
   }
 
+  CountOperation(CountOperation* other, Expression* expression);
+
   virtual void Accept(AstVisitor* v);
 
+  virtual CountOperation* AsCountOperation() { return this; }
+
+  virtual Variable* AssignedVariable() {
+    return expression()->AsVariableProxy()->AsVariable();
+  }
+
+  virtual bool IsPrimitive();
+  virtual bool IsCritical();
+
   bool is_prefix() const { return is_prefix_; }
   bool is_postfix() const { return !is_prefix_; }
   Token::Value op() const { return op_; }
@@ -1239,20 +1462,23 @@
 class CompareOperation: public Expression {
  public:
   CompareOperation(Token::Value op, Expression* left, Expression* right)
-      : op_(op), left_(left), right_(right), is_for_loop_condition_(false) {
+      : op_(op), left_(left), right_(right) {
     ASSERT(Token::IsCompareOp(op));
   }
 
+  CompareOperation(CompareOperation* other,
+                   Expression* left,
+                   Expression* right);
+
   virtual void Accept(AstVisitor* v);
 
+  virtual bool IsPrimitive();
+  virtual bool IsCritical();
+
   Token::Value op() const { return op_; }
   Expression* left() const { return left_; }
   Expression* right() const { return right_; }
 
-  // Accessors for flag whether this compare operation is hanging of a for loop.
-  bool is_for_loop_condition() const { return is_for_loop_condition_; }
-  void set_is_for_loop_condition() { is_for_loop_condition_ = true; }
-
   // Type testing & conversion
   virtual CompareOperation* AsCompareOperation() { return this; }
 
@@ -1260,7 +1486,6 @@
   Token::Value op_;
   Expression* left_;
   Expression* right_;
-  bool is_for_loop_condition_;
 };
 
 
@@ -1275,6 +1500,8 @@
 
   virtual void Accept(AstVisitor* v);
 
+  virtual bool IsPrimitive();
+
   Expression* condition() const { return condition_; }
   Expression* then_expression() const { return then_expression_; }
   Expression* else_expression() const { return else_expression_; }
@@ -1294,9 +1521,20 @@
     ASSERT(Token::IsAssignmentOp(op));
   }
 
+  Assignment(Assignment* other, Expression* target, Expression* value);
+
   virtual void Accept(AstVisitor* v);
   virtual Assignment* AsAssignment() { return this; }
 
+  virtual bool IsPrimitive();
+  virtual bool IsCritical();
+
+  Assignment* AsSimpleAssignment() { return !is_compound() ? this : NULL; }
+
+  virtual Variable* AssignedVariable() {
+    return target()->AsVariableProxy()->AsVariable();
+  }
+
   Token::Value binary_op() const;
 
   Token::Value op() const { return op_; }
@@ -1331,6 +1569,9 @@
       : exception_(exception), pos_(pos) {}
 
   virtual void Accept(AstVisitor* v);
+
+  virtual bool IsPrimitive();
+
   Expression* exception() const { return exception_; }
   int position() const { return pos_; }
 
@@ -1380,6 +1621,8 @@
 
   virtual bool IsLeaf() { return true; }
 
+  virtual bool IsPrimitive();
+
   Handle<String> name() const  { return name_; }
   Scope* scope() const  { return scope_; }
   ZoneList<Statement*>* body() const  { return body_; }
@@ -1437,21 +1680,24 @@
 };
 
 
-class FunctionBoilerplateLiteral: public Expression {
+class SharedFunctionInfoLiteral: public Expression {
  public:
-  explicit FunctionBoilerplateLiteral(Handle<JSFunction> boilerplate)
-      : boilerplate_(boilerplate) {
-    ASSERT(boilerplate->IsBoilerplate());
-  }
+  explicit SharedFunctionInfoLiteral(
+      Handle<SharedFunctionInfo> shared_function_info)
+      : shared_function_info_(shared_function_info) { }
 
-  Handle<JSFunction> boilerplate() const { return boilerplate_; }
+  Handle<SharedFunctionInfo> shared_function_info() const {
+    return shared_function_info_;
+  }
 
   virtual bool IsLeaf() { return true; }
 
   virtual void Accept(AstVisitor* v);
 
+  virtual bool IsPrimitive();
+
  private:
-  Handle<JSFunction> boilerplate_;
+  Handle<SharedFunctionInfo> shared_function_info_;
 };
 
 
@@ -1459,6 +1705,7 @@
  public:
   virtual void Accept(AstVisitor* v);
   virtual bool IsLeaf() { return true; }
+  virtual bool IsPrimitive();
 };
 
 
@@ -1819,29 +2066,23 @@
   AstVisitor() : stack_overflow_(false) { }
   virtual ~AstVisitor() { }
 
-  // Dispatch
-  void Visit(AstNode* node) { node->Accept(this); }
+  // Stack overflow check and dynamic dispatch.
+  void Visit(AstNode* node) { if (!CheckStackOverflow()) node->Accept(this); }
 
-  // Iteration
+  // Iteration left-to-right.
   virtual void VisitDeclarations(ZoneList<Declaration*>* declarations);
   virtual void VisitStatements(ZoneList<Statement*>* statements);
   virtual void VisitExpressions(ZoneList<Expression*>* expressions);
 
   // Stack overflow tracking support.
   bool HasStackOverflow() const { return stack_overflow_; }
-  bool CheckStackOverflow() {
-    if (stack_overflow_) return true;
-    StackLimitCheck check;
-    if (!check.HasOverflowed()) return false;
-    return (stack_overflow_ = true);
-  }
+  bool CheckStackOverflow();
 
   // If a stack-overflow exception is encountered when visiting a
   // node, calling SetStackOverflow will make sure that the visitor
   // bails out without visiting more nodes.
   void SetStackOverflow() { stack_overflow_ = true; }
 
-
   // Individual nodes
 #define DEF_VISIT(type)                         \
   virtual void Visit##type(type* node) = 0;
@@ -1853,6 +2094,28 @@
 };
 
 
+class CopyAstVisitor : public AstVisitor {
+ public:
+  Expression* DeepCopyExpr(Expression* expr);
+
+  Statement* DeepCopyStmt(Statement* stmt);
+
+ private:
+  ZoneList<Expression*>* DeepCopyExprList(ZoneList<Expression*>* expressions);
+
+  ZoneList<Statement*>* DeepCopyStmtList(ZoneList<Statement*>* statements);
+
+  // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+  // Holds the result of copying an expression.
+  Expression* expr_;
+  // Holds the result of copying a statement.
+  Statement* stmt_;
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_AST_H_
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 225865c..657d0dc 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -55,15 +55,16 @@
   }
 
   void Iterate(ObjectVisitor* v) {
-    v->VisitPointer(bit_cast<Object**, FixedArray**>(&cache_));
+    v->VisitPointer(BitCast<Object**, FixedArray**>(&cache_));
   }
 
 
-  bool Lookup(Vector<const char> name, Handle<JSFunction>* handle) {
+  bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle) {
     for (int i = 0; i < cache_->length(); i+=2) {
       SeqAsciiString* str = SeqAsciiString::cast(cache_->get(i));
       if (str->IsEqualTo(name)) {
-        *handle = Handle<JSFunction>(JSFunction::cast(cache_->get(i + 1)));
+        *handle = Handle<SharedFunctionInfo>(
+            SharedFunctionInfo::cast(cache_->get(i + 1)));
         return true;
       }
     }
@@ -71,8 +72,7 @@
   }
 
 
-  void Add(Vector<const char> name, Handle<JSFunction> fun) {
-    ASSERT(fun->IsBoilerplate());
+  void Add(Vector<const char> name, Handle<SharedFunctionInfo> shared) {
     HandleScope scope;
     int length = cache_->length();
     Handle<FixedArray> new_array =
@@ -81,8 +81,8 @@
     cache_ = *new_array;
     Handle<String> str = Factory::NewStringFromAscii(name, TENURED);
     cache_->set(length, *str);
-    cache_->set(length + 1, *fun);
-    Script::cast(fun->shared()->script())->set_type(Smi::FromInt(type_));
+    cache_->set(length + 1, *shared);
+    Script::cast(shared->script())->set_type(Smi::FromInt(type_));
   }
 
  private:
@@ -228,6 +228,7 @@
   // Used for creating a context from scratch.
   void InstallNativeFunctions();
   bool InstallNatives();
+  void InstallJSFunctionResultCaches();
   // Used both for deserialized and from-scratch contexts to add the extensions
   // provided.
   static bool InstallExtensions(Handle<Context> global_context,
@@ -247,17 +248,15 @@
   void TransferNamedProperties(Handle<JSObject> from, Handle<JSObject> to);
   void TransferIndexedProperties(Handle<JSObject> from, Handle<JSObject> to);
 
+  enum PrototypePropertyMode {
+    DONT_ADD_PROTOTYPE,
+    ADD_READONLY_PROTOTYPE,
+    ADD_WRITEABLE_PROTOTYPE
+  };
   Handle<DescriptorArray> ComputeFunctionInstanceDescriptor(
-      bool make_prototype_read_only,
-      bool make_prototype_enumerable = false);
+      PrototypePropertyMode prototypeMode);
   void MakeFunctionInstancePrototypeWritable();
 
-  void AddSpecialFunction(Handle<JSObject> prototype,
-                          const char* name,
-                          Handle<Code> code);
-
-  void BuildSpecialFunctionTable();
-
   static bool CompileBuiltin(int index);
   static bool CompileNative(Vector<const char> name, Handle<String> source);
   static bool CompileScriptCached(Vector<const char> name,
@@ -335,7 +334,8 @@
                                           bool is_ecma_native) {
   Handle<String> symbol = Factory::LookupAsciiSymbol(name);
   Handle<Code> call_code = Handle<Code>(Builtins::builtin(call));
-  Handle<JSFunction> function =
+  Handle<JSFunction> function = prototype.is_null() ?
+    Factory::NewFunctionWithoutPrototype(symbol, call_code) :
     Factory::NewFunctionWithPrototype(symbol,
                                       type,
                                       instance_size,
@@ -351,23 +351,23 @@
 
 
 Handle<DescriptorArray> Genesis::ComputeFunctionInstanceDescriptor(
-    bool make_prototype_read_only,
-    bool make_prototype_enumerable) {
+    PrototypePropertyMode prototypeMode) {
   Handle<DescriptorArray> result = Factory::empty_descriptor_array();
 
-  // Add prototype.
-  PropertyAttributes attributes = static_cast<PropertyAttributes>(
-      (make_prototype_enumerable ? 0 : DONT_ENUM)
-      | DONT_DELETE
-      | (make_prototype_read_only ? READ_ONLY : 0));
-  result =
-      Factory::CopyAppendProxyDescriptor(
-          result,
-          Factory::prototype_symbol(),
-          Factory::NewProxy(&Accessors::FunctionPrototype),
-          attributes);
+  if (prototypeMode != DONT_ADD_PROTOTYPE) {
+    PropertyAttributes attributes = static_cast<PropertyAttributes>(
+        DONT_ENUM |
+        DONT_DELETE |
+        (prototypeMode == ADD_READONLY_PROTOTYPE ? READ_ONLY : 0));
+    result =
+        Factory::CopyAppendProxyDescriptor(
+            result,
+            Factory::prototype_symbol(),
+            Factory::NewProxy(&Accessors::FunctionPrototype),
+            attributes);
+  }
 
-  attributes =
+  PropertyAttributes attributes =
       static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
   // Add length.
   result =
@@ -412,14 +412,29 @@
   // Please note that the prototype property for function instances must be
   // writable.
   Handle<DescriptorArray> function_map_descriptors =
-      ComputeFunctionInstanceDescriptor(false, false);
+      ComputeFunctionInstanceDescriptor(ADD_WRITEABLE_PROTOTYPE);
   fm->set_instance_descriptors(*function_map_descriptors);
+  fm->set_function_with_prototype(true);
+
+  // Functions with this map will not have a 'prototype' property, and
+  // can not be used as constructors.
+  Handle<Map> function_without_prototype_map =
+      Factory::NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
+  global_context()->set_function_without_prototype_map(
+      *function_without_prototype_map);
+  Handle<DescriptorArray> function_without_prototype_map_descriptors =
+      ComputeFunctionInstanceDescriptor(DONT_ADD_PROTOTYPE);
+  function_without_prototype_map->set_instance_descriptors(
+      *function_without_prototype_map_descriptors);
+  function_without_prototype_map->set_function_with_prototype(false);
 
   // Allocate the function map first and then patch the prototype later
   fm = Factory::NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
   global_context()->set_function_map(*fm);
-  function_map_descriptors = ComputeFunctionInstanceDescriptor(true);
+  function_map_descriptors =
+      ComputeFunctionInstanceDescriptor(ADD_READONLY_PROTOTYPE);
   fm->set_instance_descriptors(*function_map_descriptors);
+  fm->set_function_with_prototype(true);
 
   Handle<String> object_name = Handle<String>(Heap::Object_symbol());
 
@@ -447,7 +462,7 @@
   // 262 15.3.4.
   Handle<String> symbol = Factory::LookupAsciiSymbol("Empty");
   Handle<JSFunction> empty_function =
-      Factory::NewFunction(symbol, Factory::null_value());
+      Factory::NewFunctionWithoutPrototype(symbol);
 
   // --- E m p t y ---
   Handle<Code> code =
@@ -462,10 +477,14 @@
   empty_function->shared()->DontAdaptArguments();
   global_context()->function_map()->set_prototype(*empty_function);
   global_context()->function_instance_map()->set_prototype(*empty_function);
+  global_context()->function_without_prototype_map()->
+      set_prototype(*empty_function);
 
   // Allocate the function map first and then patch the prototype later
-  Handle<Map> empty_fm = Factory::CopyMapDropDescriptors(fm);
-  empty_fm->set_instance_descriptors(*function_map_descriptors);
+  Handle<Map> empty_fm = Factory::CopyMapDropDescriptors(
+      function_without_prototype_map);
+  empty_fm->set_instance_descriptors(
+      *function_without_prototype_map_descriptors);
   empty_fm->set_prototype(global_context()->object_function()->prototype());
   empty_function->set_map(*empty_fm);
   return empty_function;
@@ -729,8 +748,68 @@
         InstallFunction(global, "RegExp", JS_REGEXP_TYPE, JSRegExp::kSize,
                         Top::initial_object_prototype(), Builtins::Illegal,
                         true);
-
     global_context()->set_regexp_function(*regexp_fun);
+
+    ASSERT(regexp_fun->has_initial_map());
+    Handle<Map> initial_map(regexp_fun->initial_map());
+
+    ASSERT_EQ(0, initial_map->inobject_properties());
+
+    Handle<DescriptorArray> descriptors = Factory::NewDescriptorArray(5);
+    PropertyAttributes final =
+        static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+    int enum_index = 0;
+    {
+      // ECMA-262, section 15.10.7.1.
+      FieldDescriptor field(Heap::source_symbol(),
+                            JSRegExp::kSourceFieldIndex,
+                            final,
+                            enum_index++);
+      descriptors->Set(0, &field);
+    }
+    {
+      // ECMA-262, section 15.10.7.2.
+      FieldDescriptor field(Heap::global_symbol(),
+                            JSRegExp::kGlobalFieldIndex,
+                            final,
+                            enum_index++);
+      descriptors->Set(1, &field);
+    }
+    {
+      // ECMA-262, section 15.10.7.3.
+      FieldDescriptor field(Heap::ignore_case_symbol(),
+                            JSRegExp::kIgnoreCaseFieldIndex,
+                            final,
+                            enum_index++);
+      descriptors->Set(2, &field);
+    }
+    {
+      // ECMA-262, section 15.10.7.4.
+      FieldDescriptor field(Heap::multiline_symbol(),
+                            JSRegExp::kMultilineFieldIndex,
+                            final,
+                            enum_index++);
+      descriptors->Set(3, &field);
+    }
+    {
+      // ECMA-262, section 15.10.7.5.
+      PropertyAttributes writable =
+          static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
+      FieldDescriptor field(Heap::last_index_symbol(),
+                            JSRegExp::kLastIndexFieldIndex,
+                            writable,
+                            enum_index++);
+      descriptors->Set(4, &field);
+    }
+    descriptors->SetNextEnumerationIndex(enum_index);
+    descriptors->Sort();
+
+    initial_map->set_inobject_properties(5);
+    initial_map->set_pre_allocated_property_fields(5);
+    initial_map->set_unused_property_fields(0);
+    initial_map->set_instance_size(
+        initial_map->instance_size() + 5 * kPointerSize);
+    initial_map->set_instance_descriptors(*descriptors);
   }
 
   {  // -- J S O N
@@ -835,8 +914,6 @@
     delegate->shared()->DontAdaptArguments();
   }
 
-  global_context()->set_special_function_table(Heap::empty_fixed_array());
-
   // Initialize the out of memory slot.
   global_context()->set_out_of_memory(Heap::false_value());
 
@@ -879,14 +956,14 @@
                                   Handle<Context> top_context,
                                   bool use_runtime_context) {
   HandleScope scope;
-  Handle<JSFunction> boilerplate;
+  Handle<SharedFunctionInfo> function_info;
 
   // If we can't find the function in the cache, we compile a new
   // function and insert it into the cache.
-  if (cache == NULL || !cache->Lookup(name, &boilerplate)) {
+  if (cache == NULL || !cache->Lookup(name, &function_info)) {
     ASSERT(source->IsAsciiRepresentation());
     Handle<String> script_name = Factory::NewStringFromUtf8(name);
-    boilerplate = Compiler::Compile(
+    function_info = Compiler::Compile(
         source,
         script_name,
         0,
@@ -895,8 +972,8 @@
         NULL,
         Handle<String>::null(),
         use_runtime_context ? NATIVES_CODE : NOT_NATIVES_CODE);
-    if (boilerplate.is_null()) return false;
-    if (cache != NULL) cache->Add(name, boilerplate);
+    if (function_info.is_null()) return false;
+    if (cache != NULL) cache->Add(name, function_info);
   }
 
   // Setup the function context. Conceptually, we should clone the
@@ -908,7 +985,7 @@
                       ? Handle<Context>(top_context->runtime_context())
                       : top_context);
   Handle<JSFunction> fun =
-      Factory::NewFunctionFromBoilerplate(boilerplate, context);
+      Factory::NewFunctionFromSharedFunctionInfo(function_info, context);
 
   // Call function using either the runtime object or the global
   // object as the receiver. Provide no parameters.
@@ -1119,6 +1196,24 @@
     script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
     Heap::public_set_empty_script(*script);
   }
+  {
+    // Builtin function for OpaqueReference -- a JSValue-based object,
+    // that keeps its field isolated from JavaScript code. It may store
+    // objects, that JavaScript code may not access.
+    Handle<JSFunction> opaque_reference_fun =
+        InstallFunction(builtins, "OpaqueReference", JS_VALUE_TYPE,
+                        JSValue::kSize, Top::initial_object_prototype(),
+                        Builtins::Illegal, false);
+    Handle<JSObject> prototype =
+        Factory::NewJSObject(Top::object_function(), TENURED);
+    SetPrototype(opaque_reference_fun, prototype);
+    global_context()->set_opaque_reference_function(*opaque_reference_fun);
+  }
+
+  if (FLAG_disable_native_files) {
+    PrintF("Warning: Running without installed natives!\n");
+    return true;
+  }
 
   // Install natives.
   for (int i = Natives::GetDebuggerCount();
@@ -1144,12 +1239,12 @@
     // Install the call and the apply functions.
     Handle<JSFunction> call =
         InstallFunction(proto, "call", JS_OBJECT_TYPE, JSObject::kHeaderSize,
-                        Factory::NewJSObject(Top::object_function(), TENURED),
+                        Handle<JSObject>::null(),
                         Builtins::FunctionCall,
                         false);
     Handle<JSFunction> apply =
         InstallFunction(proto, "apply", JS_OBJECT_TYPE, JSObject::kHeaderSize,
-                        Factory::NewJSObject(Top::object_function(), TENURED),
+                        Handle<JSObject>::null(),
                         Builtins::FunctionApply,
                         false);
 
@@ -1167,6 +1262,62 @@
     apply->shared()->set_length(2);
   }
 
+  // Create a constructor for RegExp results (a variant of Array that
+  // predefines the two properties index and match).
+  {
+    // RegExpResult initial map.
+
+    // Find global.Array.prototype to inherit from.
+    Handle<JSFunction> array_constructor(global_context()->array_function());
+    Handle<JSObject> array_prototype(
+        JSObject::cast(array_constructor->instance_prototype()));
+
+    // Add initial map.
+    Handle<Map> initial_map =
+        Factory::NewMap(JS_ARRAY_TYPE, JSRegExpResult::kSize);
+    initial_map->set_constructor(*array_constructor);
+
+    // Set prototype on map.
+    initial_map->set_non_instance_prototype(false);
+    initial_map->set_prototype(*array_prototype);
+
+    // Update map with length accessor from Array and add "index" and "input".
+    Handle<Map> array_map(global_context()->js_array_map());
+    Handle<DescriptorArray> array_descriptors(
+        array_map->instance_descriptors());
+    ASSERT_EQ(1, array_descriptors->number_of_descriptors());
+
+    Handle<DescriptorArray> reresult_descriptors =
+        Factory::NewDescriptorArray(3);
+
+    reresult_descriptors->CopyFrom(0, *array_descriptors, 0);
+
+    int enum_index = 0;
+    {
+      FieldDescriptor index_field(Heap::index_symbol(),
+                                  JSRegExpResult::kIndexIndex,
+                                  NONE,
+                                  enum_index++);
+      reresult_descriptors->Set(1, &index_field);
+    }
+
+    {
+      FieldDescriptor input_field(Heap::input_symbol(),
+                                  JSRegExpResult::kInputIndex,
+                                  NONE,
+                                  enum_index++);
+      reresult_descriptors->Set(2, &input_field);
+    }
+    reresult_descriptors->Sort();
+
+    initial_map->set_inobject_properties(2);
+    initial_map->set_pre_allocated_property_fields(2);
+    initial_map->set_unused_property_fields(0);
+    initial_map->set_instance_descriptors(*reresult_descriptors);
+
+    global_context()->set_regexp_result_map(*initial_map);
+  }
+
 #ifdef DEBUG
   builtins->Verify();
 #endif
@@ -1175,6 +1326,42 @@
 }
 
 
+// Do not forget to update macros.py with named constant
+// of cache id.
+#define JSFUNCTION_RESULT_CACHE_LIST(F) \
+  F(16, global_context()->regexp_function())
+
+
+static FixedArray* CreateCache(int size, JSFunction* factory) {
+  // Caches are supposed to live for a long time, allocate in old space.
+  int array_size = JSFunctionResultCache::kEntriesIndex + 2 * size;
+  // Cannot use cast as object is not fully initialized yet.
+  JSFunctionResultCache* cache = reinterpret_cast<JSFunctionResultCache*>(
+      *Factory::NewFixedArrayWithHoles(array_size, TENURED));
+  cache->set(JSFunctionResultCache::kFactoryIndex, factory);
+  cache->MakeZeroSize();
+  return cache;
+}
+
+
+void Genesis::InstallJSFunctionResultCaches() {
+  const int kNumberOfCaches = 0 +
+#define F(size, func) + 1
+    JSFUNCTION_RESULT_CACHE_LIST(F)
+#undef F
+  ;
+
+  Handle<FixedArray> caches = Factory::NewFixedArray(kNumberOfCaches, TENURED);
+
+  int index = 0;
+#define F(size, func) caches->set(index++, CreateCache(size, func));
+    JSFUNCTION_RESULT_CACHE_LIST(F)
+#undef F
+
+  global_context()->set_jsfunction_result_caches(*caches);
+}
+
+
 int BootstrapperActive::nesting_ = 0;
 
 
@@ -1327,6 +1514,7 @@
     Handle<SharedFunctionInfo> shared
         = Handle<SharedFunctionInfo>(function->shared());
     if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
+    builtins->set_javascript_builtin_code(id, shared->code());
   }
   return true;
 }
@@ -1489,72 +1677,14 @@
   HandleScope scope;
 
   Handle<DescriptorArray> function_map_descriptors =
-      ComputeFunctionInstanceDescriptor(false);
+      ComputeFunctionInstanceDescriptor(ADD_WRITEABLE_PROTOTYPE);
   Handle<Map> fm = Factory::CopyMapDropDescriptors(Top::function_map());
   fm->set_instance_descriptors(*function_map_descriptors);
+  fm->set_function_with_prototype(true);
   Top::context()->global_context()->set_function_map(*fm);
 }
 
 
-void Genesis::AddSpecialFunction(Handle<JSObject> prototype,
-                                 const char* name,
-                                 Handle<Code> code) {
-  Handle<String> key = Factory::LookupAsciiSymbol(name);
-  Handle<Object> value = Handle<Object>(prototype->GetProperty(*key));
-  if (value->IsJSFunction()) {
-    Handle<JSFunction> optimized = Factory::NewFunction(key,
-                                                        JS_OBJECT_TYPE,
-                                                        JSObject::kHeaderSize,
-                                                        code,
-                                                        false);
-    optimized->shared()->DontAdaptArguments();
-    int len = global_context()->special_function_table()->length();
-    Handle<FixedArray> new_array = Factory::NewFixedArray(len + 3);
-    for (int index = 0; index < len; index++) {
-      new_array->set(index,
-                     global_context()->special_function_table()->get(index));
-    }
-    new_array->set(len+0, *prototype);
-    new_array->set(len+1, *value);
-    new_array->set(len+2, *optimized);
-    global_context()->set_special_function_table(*new_array);
-  }
-}
-
-
-void Genesis::BuildSpecialFunctionTable() {
-  HandleScope scope;
-  Handle<JSObject> global = Handle<JSObject>(global_context()->global());
-  // Add special versions for some Array.prototype functions.
-  Handle<JSFunction> function =
-      Handle<JSFunction>(
-          JSFunction::cast(global->GetProperty(Heap::Array_symbol())));
-  Handle<JSObject> visible_prototype =
-      Handle<JSObject>(JSObject::cast(function->prototype()));
-  // Remember to put those specializations on the hidden prototype if present.
-  Handle<JSObject> special_prototype;
-  Handle<Object> superproto(visible_prototype->GetPrototype());
-  if (superproto->IsJSObject() &&
-      JSObject::cast(*superproto)->map()->is_hidden_prototype()) {
-    special_prototype = Handle<JSObject>::cast(superproto);
-  } else {
-    special_prototype = visible_prototype;
-  }
-  AddSpecialFunction(special_prototype, "pop",
-                     Handle<Code>(Builtins::builtin(Builtins::ArrayPop)));
-  AddSpecialFunction(special_prototype, "push",
-                     Handle<Code>(Builtins::builtin(Builtins::ArrayPush)));
-  AddSpecialFunction(special_prototype, "shift",
-                     Handle<Code>(Builtins::builtin(Builtins::ArrayShift)));
-  AddSpecialFunction(special_prototype, "unshift",
-                     Handle<Code>(Builtins::builtin(Builtins::ArrayUnshift)));
-  AddSpecialFunction(special_prototype, "slice",
-                     Handle<Code>(Builtins::builtin(Builtins::ArraySlice)));
-  AddSpecialFunction(special_prototype, "splice",
-                     Handle<Code>(Builtins::builtin(Builtins::ArraySplice)));
-}
-
-
 Genesis::Genesis(Handle<Object> global_object,
                  v8::Handle<v8::ObjectTemplate> global_template,
                  v8::ExtensionConfiguration* extensions) {
@@ -1597,9 +1727,9 @@
     HookUpGlobalProxy(inner_global, global_proxy);
     InitializeGlobal(inner_global, empty_function);
     if (!InstallNatives()) return;
+    InstallJSFunctionResultCaches();
 
     MakeFunctionInstancePrototypeWritable();
-    BuildSpecialFunctionTable();
 
     if (!ConfigureGlobalObjects(global_template)) return;
     i::Counters::contexts_created_from_scratch.Increment();
diff --git a/src/bootstrapper.h b/src/bootstrapper.h
index 89eb381..66b8ff4 100644
--- a/src/bootstrapper.h
+++ b/src/bootstrapper.h
@@ -74,7 +74,7 @@
   // Traverses the pointers for memory management.
   static void Iterate(ObjectVisitor* v);
 
-  // Accessors for the native scripts cache. Used in lazy loading.
+  // Accessor for the native scripts source code.
   static Handle<String> NativesSourceLookup(int index);
 
   // Tells whether bootstrapping is active.
diff --git a/src/builtins.cc b/src/builtins.cc
index ee98769..e6cbd94 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -242,9 +242,165 @@
 }
 
 
+static Object* AllocateJSArray() {
+  JSFunction* array_function =
+      Top::context()->global_context()->array_function();
+  Object* result = Heap::AllocateJSObject(array_function);
+  if (result->IsFailure()) return result;
+  return result;
+}
+
+
+static Object* AllocateEmptyJSArray() {
+  Object* result = AllocateJSArray();
+  if (result->IsFailure()) return result;
+  JSArray* result_array = JSArray::cast(result);
+  result_array->set_length(Smi::FromInt(0));
+  result_array->set_elements(Heap::empty_fixed_array());
+  return result_array;
+}
+
+
+static void CopyElements(AssertNoAllocation* no_gc,
+                         FixedArray* dst,
+                         int dst_index,
+                         FixedArray* src,
+                         int src_index,
+                         int len) {
+  ASSERT(dst != src);  // Use MoveElements instead.
+  ASSERT(len > 0);
+  CopyWords(dst->data_start() + dst_index,
+            src->data_start() + src_index,
+            len);
+  WriteBarrierMode mode = dst->GetWriteBarrierMode(*no_gc);
+  if (mode == UPDATE_WRITE_BARRIER) {
+    Heap::RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
+  }
+}
+
+
+static void MoveElements(AssertNoAllocation* no_gc,
+                         FixedArray* dst,
+                         int dst_index,
+                         FixedArray* src,
+                         int src_index,
+                         int len) {
+  memmove(dst->data_start() + dst_index,
+          src->data_start() + src_index,
+          len * kPointerSize);
+  WriteBarrierMode mode = dst->GetWriteBarrierMode(*no_gc);
+  if (mode == UPDATE_WRITE_BARRIER) {
+    Heap::RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
+  }
+}
+
+
+static void FillWithHoles(FixedArray* dst, int from, int to) {
+  MemsetPointer(dst->data_start() + from, Heap::the_hole_value(), to - from);
+}
+
+
+static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
+  // For now this trick is only applied to fixed arrays in new space.
+  // In large object space the object's start must coincide with chunk
+  // and thus the trick is just not applicable.
+  // In old space we do not use this trick to avoid dealing with
+  // remembered sets.
+  ASSERT(Heap::new_space()->Contains(elms));
+
+  STATIC_ASSERT(FixedArray::kMapOffset == 0);
+  STATIC_ASSERT(FixedArray::kLengthOffset == kPointerSize);
+  STATIC_ASSERT(FixedArray::kHeaderSize == 2 * kPointerSize);
+
+  Object** former_start = HeapObject::RawField(elms, 0);
+
+  const int len = elms->length();
+
+  // Technically in new space this write might be omitted (except for
+  // debug mode which iterates through the heap), but to play safer
+  // we still do it.
+  Heap::CreateFillerObjectAt(elms->address(), to_trim * kPointerSize);
+
+  former_start[to_trim] = Heap::fixed_array_map();
+  former_start[to_trim + 1] = reinterpret_cast<Object*>(len - to_trim);
+
+  ASSERT_EQ(elms->address() + to_trim * kPointerSize,
+            (elms + to_trim * kPointerSize)->address());
+  return elms + to_trim * kPointerSize;
+}
+
+
+static bool ArrayPrototypeHasNoElements() {
+  // This method depends on non writability of Object and Array prototype
+  // fields.
+  Context* global_context = Top::context()->global_context();
+  // Array.prototype
+  JSObject* proto =
+      JSObject::cast(global_context->array_function()->prototype());
+  if (proto->elements() != Heap::empty_fixed_array()) return false;
+  // Hidden prototype
+  proto = JSObject::cast(proto->GetPrototype());
+  ASSERT(proto->elements() == Heap::empty_fixed_array());
+  // Object.prototype
+  proto = JSObject::cast(proto->GetPrototype());
+  if (proto != global_context->initial_object_prototype()) return false;
+  if (proto->elements() != Heap::empty_fixed_array()) return false;
+  ASSERT(proto->GetPrototype()->IsNull());
+  return true;
+}
+
+
+static bool IsJSArrayWithFastElements(Object* receiver,
+                                      FixedArray** elements) {
+  if (!receiver->IsJSArray()) {
+    return false;
+  }
+
+  JSArray* array = JSArray::cast(receiver);
+
+  HeapObject* elms = HeapObject::cast(array->elements());
+  if (elms->map() != Heap::fixed_array_map()) {
+    return false;
+  }
+
+  *elements = FixedArray::cast(elms);
+  return true;
+}
+
+
+static Object* CallJsBuiltin(const char* name,
+                             BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
+  HandleScope handleScope;
+
+  Handle<Object> js_builtin =
+      GetProperty(Handle<JSObject>(Top::global_context()->builtins()),
+                  name);
+  ASSERT(js_builtin->IsJSFunction());
+  Handle<JSFunction> function(Handle<JSFunction>::cast(js_builtin));
+  Vector<Object**> argv(Vector<Object**>::New(args.length() - 1));
+  int n_args = args.length() - 1;
+  for (int i = 0; i < n_args; i++) {
+    argv[i] = args.at<Object>(i + 1).location();
+  }
+  bool pending_exception = false;
+  Handle<Object> result = Execution::Call(function,
+                                          args.receiver(),
+                                          n_args,
+                                          argv.start(),
+                                          &pending_exception);
+  argv.Dispose();
+  if (pending_exception) return Failure::Exception();
+  return *result;
+}
+
+
 BUILTIN(ArrayPush) {
-  JSArray* array = JSArray::cast(*args.receiver());
-  ASSERT(array->HasFastElements());
+  Object* receiver = *args.receiver();
+  FixedArray* elms = NULL;
+  if (!IsJSArrayWithFastElements(receiver, &elms)) {
+    return CallJsBuiltin("ArrayPush", args);
+  }
+  JSArray* array = JSArray::cast(receiver);
 
   int len = Smi::cast(array->length())->value();
   int to_add = args.length() - 1;
@@ -256,27 +412,27 @@
   ASSERT(to_add <= (Smi::kMaxValue - len));
 
   int new_length = len + to_add;
-  FixedArray* elms = FixedArray::cast(array->elements());
 
   if (new_length > elms->length()) {
     // New backing storage is needed.
     int capacity = new_length + (new_length >> 1) + 16;
-    Object* obj = Heap::AllocateFixedArrayWithHoles(capacity);
+    Object* obj = Heap::AllocateUninitializedFixedArray(capacity);
     if (obj->IsFailure()) return obj;
+    FixedArray* new_elms = FixedArray::cast(obj);
 
     AssertNoAllocation no_gc;
-    FixedArray* new_elms = FixedArray::cast(obj);
-    WriteBarrierMode mode = new_elms->GetWriteBarrierMode(no_gc);
-    // Fill out the new array with old elements.
-    for (int i = 0; i < len; i++) new_elms->set(i, elms->get(i), mode);
+    if (len > 0) {
+      CopyElements(&no_gc, new_elms, 0, elms, 0, len);
+    }
+    FillWithHoles(new_elms, new_length, capacity);
+
     elms = new_elms;
     array->set_elements(elms);
   }
 
+  // Add the provided values.
   AssertNoAllocation no_gc;
   WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
-
-  // Add the provided values.
   for (int index = 0; index < to_add; index++) {
     elms->set(index + len, args[index + 1], mode);
   }
@@ -288,15 +444,17 @@
 
 
 BUILTIN(ArrayPop) {
-  JSArray* array = JSArray::cast(*args.receiver());
-  ASSERT(array->HasFastElements());
-  Object* undefined = Heap::undefined_value();
+  Object* receiver = *args.receiver();
+  FixedArray* elms = NULL;
+  if (!IsJSArrayWithFastElements(receiver, &elms)) {
+    return CallJsBuiltin("ArrayPop", args);
+  }
+  JSArray* array = JSArray::cast(receiver);
 
   int len = Smi::cast(array->length())->value();
-  if (len == 0) return undefined;
+  if (len == 0) return Heap::undefined_value();
 
   // Get top element
-  FixedArray* elms = FixedArray::cast(array->elements());
   Object* top = elms->get(len - 1);
 
   // Set the length.
@@ -318,42 +476,35 @@
 }
 
 
-static Object* GetElementToMove(uint32_t index,
-                                FixedArray* elms,
-                                JSObject* prototype) {
-  Object* e = elms->get(index);
-  if (e->IsTheHole() && prototype->HasElement(index)) {
-    e = prototype->GetElement(index);
-  }
-  return e;
-}
-
-
 BUILTIN(ArrayShift) {
-  JSArray* array = JSArray::cast(*args.receiver());
+  Object* receiver = *args.receiver();
+  FixedArray* elms = NULL;
+  if (!IsJSArrayWithFastElements(receiver, &elms)
+      || !ArrayPrototypeHasNoElements()) {
+    return CallJsBuiltin("ArrayShift", args);
+  }
+  JSArray* array = JSArray::cast(receiver);
   ASSERT(array->HasFastElements());
 
   int len = Smi::cast(array->length())->value();
   if (len == 0) return Heap::undefined_value();
 
-  // Fetch the prototype.
-  JSFunction* array_function =
-      Top::context()->global_context()->array_function();
-  JSObject* prototype = JSObject::cast(array_function->prototype());
-
-  FixedArray* elms = FixedArray::cast(array->elements());
-
   // Get first element
   Object* first = elms->get(0);
   if (first->IsTheHole()) {
-    first = prototype->GetElement(0);
+    first = Heap::undefined_value();
   }
 
-  // Shift the elements.
-  for (int i = 0; i < len - 1; i++) {
-    elms->set(i, GetElementToMove(i + 1, elms, prototype));
+  if (Heap::new_space()->Contains(elms)) {
+    // As elms still in the same space they used to be (new space),
+    // there is no need to update remembered set.
+    array->set_elements(LeftTrimFixedArray(elms, 1), SKIP_WRITE_BARRIER);
+  } else {
+    // Shift the elements.
+    AssertNoAllocation no_gc;
+    MoveElements(&no_gc, elms, 0, elms, 1, len - 1);
+    elms->set(len - 1, Heap::the_hole_value());
   }
-  elms->set(len - 1, Heap::the_hole_value());
 
   // Set the length.
   array->set_length(Smi::FromInt(len - 1));
@@ -363,54 +514,40 @@
 
 
 BUILTIN(ArrayUnshift) {
-  JSArray* array = JSArray::cast(*args.receiver());
+  Object* receiver = *args.receiver();
+  FixedArray* elms = NULL;
+  if (!IsJSArrayWithFastElements(receiver, &elms)
+      || !ArrayPrototypeHasNoElements()) {
+    return CallJsBuiltin("ArrayUnshift", args);
+  }
+  JSArray* array = JSArray::cast(receiver);
   ASSERT(array->HasFastElements());
 
   int len = Smi::cast(array->length())->value();
   int to_add = args.length() - 1;
-  // Note that we cannot quit early if to_add == 0 as
-  // values should be lifted from prototype into
-  // the array.
-
   int new_length = len + to_add;
   // Currently fixed arrays cannot grow too big, so
   // we should never hit this case.
   ASSERT(to_add <= (Smi::kMaxValue - len));
 
-  FixedArray* elms = FixedArray::cast(array->elements());
-
-  // Fetch the prototype.
-  JSFunction* array_function =
-      Top::context()->global_context()->array_function();
-  JSObject* prototype = JSObject::cast(array_function->prototype());
-
   if (new_length > elms->length()) {
     // New backing storage is needed.
     int capacity = new_length + (new_length >> 1) + 16;
-    Object* obj = Heap::AllocateFixedArrayWithHoles(capacity);
+    Object* obj = Heap::AllocateUninitializedFixedArray(capacity);
     if (obj->IsFailure()) return obj;
+    FixedArray* new_elms = FixedArray::cast(obj);
 
     AssertNoAllocation no_gc;
-    FixedArray* new_elms = FixedArray::cast(obj);
-    WriteBarrierMode mode = new_elms->GetWriteBarrierMode(no_gc);
-    // Fill out the new array with old elements.
-    for (int i = 0; i < len; i++)
-      new_elms->set(to_add + i,
-                    GetElementToMove(i, elms, prototype),
-                    mode);
+    if (len > 0) {
+      CopyElements(&no_gc, new_elms, to_add, elms, 0, len);
+    }
+    FillWithHoles(new_elms, new_length, capacity);
 
     elms = new_elms;
     array->set_elements(elms);
   } else {
     AssertNoAllocation no_gc;
-    WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
-
-    // Move elements to the right
-    for (int i = 0; i < len; i++) {
-      elms->set(new_length - i - 1,
-                GetElementToMove(len - i - 1, elms, prototype),
-                mode);
-    }
+    MoveElements(&no_gc, elms, to_add, elms, 0, len);
   }
 
   // Add the provided values.
@@ -426,33 +563,14 @@
 }
 
 
-static Object* CallJsBuiltin(const char* name,
-                             BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
-  HandleScope handleScope;
-
-  Handle<Object> js_builtin =
-      GetProperty(Handle<JSObject>(Top::global_context()->builtins()),
-                  name);
-  ASSERT(js_builtin->IsJSFunction());
-  Handle<JSFunction> function(Handle<JSFunction>::cast(js_builtin));
-  Vector<Object**> argv(Vector<Object**>::New(args.length() - 1));
-  int n_args = args.length() - 1;
-  for (int i = 0; i < n_args; i++) {
-    argv[i] = &args[i + 1];
-  }
-  bool pending_exception = false;
-  Handle<Object> result = Execution::Call(function,
-                                          args.receiver(),
-                                          n_args,
-                                          argv.start(),
-                                          &pending_exception);
-  if (pending_exception) return Failure::Exception();
-  return *result;
-}
-
-
 BUILTIN(ArraySlice) {
-  JSArray* array = JSArray::cast(*args.receiver());
+  Object* receiver = *args.receiver();
+  FixedArray* elms = NULL;
+  if (!IsJSArrayWithFastElements(receiver, &elms)
+      || !ArrayPrototypeHasNoElements()) {
+    return CallJsBuiltin("ArraySlice", args);
+  }
+  JSArray* array = JSArray::cast(receiver);
   ASSERT(array->HasFastElements());
 
   int len = Smi::cast(array->length())->value();
@@ -460,21 +578,21 @@
   int n_arguments = args.length() - 1;
 
   // Note carefully choosen defaults---if argument is missing,
-  // it's undefined which gets converted to 0 for relativeStart
-  // and to len for relativeEnd.
-  int relativeStart = 0;
-  int relativeEnd = len;
+  // it's undefined which gets converted to 0 for relative_start
+  // and to len for relative_end.
+  int relative_start = 0;
+  int relative_end = len;
   if (n_arguments > 0) {
     Object* arg1 = args[1];
     if (arg1->IsSmi()) {
-      relativeStart = Smi::cast(arg1)->value();
+      relative_start = Smi::cast(arg1)->value();
     } else if (!arg1->IsUndefined()) {
       return CallJsBuiltin("ArraySlice", args);
     }
     if (n_arguments > 1) {
       Object* arg2 = args[2];
       if (arg2->IsSmi()) {
-        relativeEnd = Smi::cast(arg2)->value();
+        relative_end = Smi::cast(arg2)->value();
       } else if (!arg2->IsUndefined()) {
         return CallJsBuiltin("ArraySlice", args);
       }
@@ -482,43 +600,29 @@
   }
 
   // ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 6.
-  int k = (relativeStart < 0) ? Max(len + relativeStart, 0)
-                              : Min(relativeStart, len);
+  int k = (relative_start < 0) ? Max(len + relative_start, 0)
+                               : Min(relative_start, len);
 
   // ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 8.
-  int final = (relativeEnd < 0) ? Max(len + relativeEnd, 0)
-                                : Min(relativeEnd, len);
+  int final = (relative_end < 0) ? Max(len + relative_end, 0)
+                                 : Min(relative_end, len);
 
   // Calculate the length of result array.
   int result_len = final - k;
-  if (result_len < 0) {
-    result_len = 0;
+  if (result_len <= 0) {
+    return AllocateEmptyJSArray();
   }
 
-  JSFunction* array_function =
-      Top::context()->global_context()->array_function();
-  Object* result = Heap::AllocateJSObject(array_function);
+  Object* result = AllocateJSArray();
   if (result->IsFailure()) return result;
   JSArray* result_array = JSArray::cast(result);
 
-  result = Heap::AllocateFixedArrayWithHoles(result_len);
+  result = Heap::AllocateUninitializedFixedArray(result_len);
   if (result->IsFailure()) return result;
   FixedArray* result_elms = FixedArray::cast(result);
 
-  FixedArray* elms = FixedArray::cast(array->elements());
-
-  // Fetch the prototype.
-  JSObject* prototype = JSObject::cast(array_function->prototype());
-
   AssertNoAllocation no_gc;
-  WriteBarrierMode mode = result_elms->GetWriteBarrierMode(no_gc);
-
-  // Fill newly created array.
-  for (int i = 0; i < result_len; i++) {
-    result_elms->set(i,
-                     GetElementToMove(k + i, elms, prototype),
-                     mode);
-  }
+  CopyElements(&no_gc, result_elms, 0, elms, k, result_len);
 
   // Set elements.
   result_array->set_elements(result_elms);
@@ -530,7 +634,13 @@
 
 
 BUILTIN(ArraySplice) {
-  JSArray* array = JSArray::cast(*args.receiver());
+  Object* receiver = *args.receiver();
+  FixedArray* elms = NULL;
+  if (!IsJSArrayWithFastElements(receiver, &elms)
+      || !ArrayPrototypeHasNoElements()) {
+    return CallJsBuiltin("ArraySplice", args);
+  }
+  JSArray* array = JSArray::cast(receiver);
   ASSERT(array->HasFastElements());
 
   int len = Smi::cast(array->length())->value();
@@ -546,118 +656,129 @@
     return Heap::undefined_value();
   }
 
-  int relativeStart = 0;
+  int relative_start = 0;
   Object* arg1 = args[1];
   if (arg1->IsSmi()) {
-    relativeStart = Smi::cast(arg1)->value();
+    relative_start = Smi::cast(arg1)->value();
   } else if (!arg1->IsUndefined()) {
     return CallJsBuiltin("ArraySplice", args);
   }
-  int actualStart = (relativeStart < 0) ? Max(len + relativeStart, 0)
-                                        : Min(relativeStart, len);
+  int actual_start = (relative_start < 0) ? Max(len + relative_start, 0)
+                                          : Min(relative_start, len);
 
   // SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
   // given differently from when an undefined delete count is given.
   // This does not follow ECMA-262, but we do the same for
   // compatibility.
-  int deleteCount = len;
+  int delete_count = len;
   if (n_arguments > 1) {
     Object* arg2 = args[2];
     if (arg2->IsSmi()) {
-      deleteCount = Smi::cast(arg2)->value();
+      delete_count = Smi::cast(arg2)->value();
     } else {
       return CallJsBuiltin("ArraySplice", args);
     }
   }
-  int actualDeleteCount = Min(Max(deleteCount, 0), len - actualStart);
+  int actual_delete_count = Min(Max(delete_count, 0), len - actual_start);
 
-  JSFunction* array_function =
-      Top::context()->global_context()->array_function();
+  JSArray* result_array = NULL;
+  if (actual_delete_count == 0) {
+    Object* result = AllocateEmptyJSArray();
+    if (result->IsFailure()) return result;
+    result_array = JSArray::cast(result);
+  } else {
+    // Allocate result array.
+    Object* result = AllocateJSArray();
+    if (result->IsFailure()) return result;
+    result_array = JSArray::cast(result);
 
-  // Allocate result array.
-  Object* result = Heap::AllocateJSObject(array_function);
-  if (result->IsFailure()) return result;
-  JSArray* result_array = JSArray::cast(result);
+    result = Heap::AllocateUninitializedFixedArray(actual_delete_count);
+    if (result->IsFailure()) return result;
+    FixedArray* result_elms = FixedArray::cast(result);
 
-  result = Heap::AllocateFixedArrayWithHoles(actualDeleteCount);
-  if (result->IsFailure()) return result;
-  FixedArray* result_elms = FixedArray::cast(result);
+    AssertNoAllocation no_gc;
+    // Fill newly created array.
+    CopyElements(&no_gc,
+                 result_elms, 0,
+                 elms, actual_start,
+                 actual_delete_count);
 
-  FixedArray* elms = FixedArray::cast(array->elements());
+    // Set elements.
+    result_array->set_elements(result_elms);
 
-  // Fetch the prototype.
-  JSObject* prototype = JSObject::cast(array_function->prototype());
-
-  AssertNoAllocation no_gc;
-  WriteBarrierMode mode = result_elms->GetWriteBarrierMode(no_gc);
-
-  // Fill newly created array.
-  for (int k = 0; k < actualDeleteCount; k++) {
-    result_elms->set(k,
-                     GetElementToMove(actualStart + k, elms, prototype),
-                     mode);
+    // Set the length.
+    result_array->set_length(Smi::FromInt(actual_delete_count));
   }
 
-  // Set elements.
-  result_array->set_elements(result_elms);
+  int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
 
-  // Set the length.
-  result_array->set_length(Smi::FromInt(actualDeleteCount));
+  int new_length = len - actual_delete_count + item_count;
 
-  int itemCount = (n_arguments > 1) ? (n_arguments - 2) : 0;
-
-  int new_length = len - actualDeleteCount + itemCount;
-
-  mode = elms->GetWriteBarrierMode(no_gc);
-  if (itemCount < actualDeleteCount) {
+  if (item_count < actual_delete_count) {
     // Shrink the array.
-    for (int k = actualStart; k < (len - actualDeleteCount); k++) {
-      elms->set(k + itemCount,
-                GetElementToMove(k + actualDeleteCount, elms, prototype),
-                mode);
-    }
+    const bool trim_array = Heap::new_space()->Contains(elms) &&
+      ((actual_start + item_count) <
+          (len - actual_delete_count - actual_start));
+    if (trim_array) {
+      const int delta = actual_delete_count - item_count;
 
-    for (int k = len; k > new_length; k--) {
-      elms->set(k - 1, Heap::the_hole_value());
+      if (actual_start > 0) {
+        Object** start = elms->data_start();
+        memmove(start + delta, start, actual_start * kPointerSize);
+      }
+
+      elms = LeftTrimFixedArray(elms, delta);
+      array->set_elements(elms, SKIP_WRITE_BARRIER);
+    } else {
+      AssertNoAllocation no_gc;
+      MoveElements(&no_gc,
+                   elms, actual_start + item_count,
+                   elms, actual_start + actual_delete_count,
+                   (len - actual_delete_count - actual_start));
+      FillWithHoles(elms, new_length, len);
     }
-  } else if (itemCount > actualDeleteCount) {
+  } else if (item_count > actual_delete_count) {
     // Currently fixed arrays cannot grow too big, so
     // we should never hit this case.
-    ASSERT((itemCount - actualDeleteCount) <= (Smi::kMaxValue - len));
-
-    FixedArray* source_elms = elms;
+    ASSERT((item_count - actual_delete_count) <= (Smi::kMaxValue - len));
 
     // Check if array need to grow.
     if (new_length > elms->length()) {
       // New backing storage is needed.
       int capacity = new_length + (new_length >> 1) + 16;
-      Object* obj = Heap::AllocateFixedArrayWithHoles(capacity);
+      Object* obj = Heap::AllocateUninitializedFixedArray(capacity);
       if (obj->IsFailure()) return obj;
-
       FixedArray* new_elms = FixedArray::cast(obj);
-      mode = new_elms->GetWriteBarrierMode(no_gc);
 
-      // Copy the part before actualStart as is.
-      for (int k = 0; k < actualStart; k++) {
-        new_elms->set(k, elms->get(k), mode);
+      AssertNoAllocation no_gc;
+      // Copy the part before actual_start as is.
+      if (actual_start > 0) {
+        CopyElements(&no_gc, new_elms, 0, elms, 0, actual_start);
       }
+      const int to_copy = len - actual_delete_count - actual_start;
+      if (to_copy > 0) {
+        CopyElements(&no_gc,
+                     new_elms, actual_start + item_count,
+                     elms, actual_start + actual_delete_count,
+                     to_copy);
+      }
+      FillWithHoles(new_elms, new_length, capacity);
 
-      source_elms = elms;
       elms = new_elms;
       array->set_elements(elms);
-    }
-
-    for (int k = len - actualDeleteCount; k > actualStart; k--) {
-      elms->set(k + itemCount - 1,
-                GetElementToMove(k + actualDeleteCount - 1,
-                                 source_elms,
-                                 prototype),
-                mode);
+    } else {
+      AssertNoAllocation no_gc;
+      MoveElements(&no_gc,
+                   elms, actual_start + item_count,
+                   elms, actual_start + actual_delete_count,
+                   (len - actual_delete_count - actual_start));
     }
   }
 
-  for (int k = actualStart; k < actualStart + itemCount; k++) {
-    elms->set(k, args[3 + k - actualStart], mode);
+  AssertNoAllocation no_gc;
+  WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+  for (int k = actual_start; k < actual_start + item_count; k++) {
+    elms->set(k, args[3 + k - actual_start], mode);
   }
 
   // Set the length.
@@ -667,6 +788,70 @@
 }
 
 
+BUILTIN(ArrayConcat) {
+  if (!ArrayPrototypeHasNoElements()) {
+    return CallJsBuiltin("ArrayConcat", args);
+  }
+
+  // Iterate through all the arguments performing checks
+  // and calculating total length.
+  int n_arguments = args.length();
+  int result_len = 0;
+  for (int i = 0; i < n_arguments; i++) {
+    Object* arg = args[i];
+    if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements()) {
+      return CallJsBuiltin("ArrayConcat", args);
+    }
+
+    int len = Smi::cast(JSArray::cast(arg)->length())->value();
+
+    // We shouldn't overflow when adding another len.
+    const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
+    STATIC_ASSERT(FixedArray::kMaxLength < kHalfOfMaxInt);
+    USE(kHalfOfMaxInt);
+    result_len += len;
+    ASSERT(result_len >= 0);
+
+    if (result_len > FixedArray::kMaxLength) {
+      return CallJsBuiltin("ArrayConcat", args);
+    }
+  }
+
+  if (result_len == 0) {
+    return AllocateEmptyJSArray();
+  }
+
+  // Allocate result.
+  Object* result = AllocateJSArray();
+  if (result->IsFailure()) return result;
+  JSArray* result_array = JSArray::cast(result);
+
+  result = Heap::AllocateUninitializedFixedArray(result_len);
+  if (result->IsFailure()) return result;
+  FixedArray* result_elms = FixedArray::cast(result);
+
+  // Copy data.
+  AssertNoAllocation no_gc;
+  int start_pos = 0;
+  for (int i = 0; i < n_arguments; i++) {
+    JSArray* array = JSArray::cast(args[i]);
+    int len = Smi::cast(array->length())->value();
+    if (len > 0) {
+      FixedArray* elms = FixedArray::cast(array->elements());
+      CopyElements(&no_gc, result_elms, start_pos, elms, 0, len);
+      start_pos += len;
+    }
+  }
+  ASSERT(start_pos == result_len);
+
+  // Set the length and elements.
+  result_array->set_length(Smi::FromInt(result_len));
+  result_array->set_elements(result_elms);
+
+  return result_array;
+}
+
+
 // -----------------------------------------------------------------------------
 //
 
@@ -726,20 +911,19 @@
 
   HandleScope scope;
   Handle<JSFunction> function = args.called_function();
+  ASSERT(function->shared()->IsApiFunction());
 
+  FunctionTemplateInfo* fun_data = function->shared()->get_api_func_data();
   if (is_construct) {
-    Handle<FunctionTemplateInfo> desc =
-        Handle<FunctionTemplateInfo>(
-            FunctionTemplateInfo::cast(function->shared()->function_data()));
+    Handle<FunctionTemplateInfo> desc(fun_data);
     bool pending_exception = false;
     Factory::ConfigureInstance(desc, Handle<JSObject>::cast(args.receiver()),
                                &pending_exception);
     ASSERT(Top::has_pending_exception() == pending_exception);
     if (pending_exception) return Failure::Exception();
+    fun_data = *desc;
   }
 
-  FunctionTemplateInfo* fun_data =
-      FunctionTemplateInfo::cast(function->shared()->function_data());
   Object* raw_holder = TypeCheck(args.length(), &args[0], fun_data);
 
   if (raw_holder->IsNull()) {
@@ -810,8 +994,8 @@
 
 static void VerifyTypeCheck(Handle<JSObject> object,
                             Handle<JSFunction> function) {
-  FunctionTemplateInfo* info =
-      FunctionTemplateInfo::cast(function->shared()->function_data());
+  ASSERT(function->shared()->IsApiFunction());
+  FunctionTemplateInfo* info = function->shared()->get_api_func_data();
   if (info->signature()->IsUndefined()) return;
   SignatureInfo* signature = SignatureInfo::cast(info->signature());
   Object* receiver_type = signature->receiver();
@@ -895,9 +1079,9 @@
   // used to create the called object.
   ASSERT(obj->map()->has_instance_call_handler());
   JSFunction* constructor = JSFunction::cast(obj->map()->constructor());
-  Object* template_info = constructor->shared()->function_data();
+  ASSERT(constructor->shared()->IsApiFunction());
   Object* handler =
-      FunctionTemplateInfo::cast(template_info)->instance_call_handler();
+      constructor->shared()->get_api_func_data()->instance_call_handler();
   ASSERT(!handler->IsUndefined());
   CallHandlerInfo* call_data = CallHandlerInfo::cast(handler);
   Object* callback_obj = call_data->callback();
@@ -1079,6 +1263,11 @@
 }
 
 
+static void Generate_StoreIC_ArrayLength(MacroAssembler* masm) {
+  StoreIC::GenerateArrayLength(masm);
+}
+
+
 static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
   KeyedStoreIC::GenerateGeneric(masm);
 }
@@ -1166,6 +1355,14 @@
 static void Generate_StubNoRegisters_DebugBreak(MacroAssembler* masm) {
   Debug::GenerateStubNoRegistersDebugBreak(masm);
 }
+
+static void Generate_PlainReturn_LiveEdit(MacroAssembler* masm) {
+  Debug::GeneratePlainReturnLiveEdit(masm);
+}
+
+static void Generate_FrameDropper_LiveEdit(MacroAssembler* masm) {
+  Debug::GenerateFrameDropperLiveEdit(masm);
+}
 #endif
 
 Object* Builtins::builtins_[builtin_count] = { NULL, };
@@ -1267,8 +1464,8 @@
         }
       }
       // Log the event and add the code to the builtins array.
-      LOG(CodeCreateEvent(Logger::BUILTIN_TAG,
-                          Code::cast(code), functions[i].s_name));
+      PROFILE(CodeCreateEvent(Logger::BUILTIN_TAG,
+                              Code::cast(code), functions[i].s_name));
       builtins_[i] = code;
 #ifdef ENABLE_DISASSEMBLER
       if (FLAG_print_builtin_code) {
diff --git a/src/builtins.h b/src/builtins.h
index 13322c2..dd2e3cb 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -52,6 +52,7 @@
   V(ArrayUnshift, NO_EXTRA_ARGUMENTS)                               \
   V(ArraySlice, NO_EXTRA_ARGUMENTS)                                 \
   V(ArraySplice, NO_EXTRA_ARGUMENTS)                                \
+  V(ArrayConcat, NO_EXTRA_ARGUMENTS)                                \
                                                                     \
   V(HandleApiCall, NEEDS_CALLED_FUNCTION)                           \
   V(FastHandleApiCall, NO_EXTRA_ARGUMENTS)                          \
@@ -96,6 +97,7 @@
   V(KeyedLoadIC_IndexedInterceptor,         KEYED_LOAD_IC, MEGAMORPHIC)   \
                                                                           \
   V(StoreIC_Initialize,         STORE_IC, UNINITIALIZED)                  \
+  V(StoreIC_ArrayLength,        STORE_IC, MONOMORPHIC)                    \
   V(StoreIC_Megamorphic,        STORE_IC, MEGAMORPHIC)                    \
                                                                           \
   V(KeyedStoreIC_Initialize,    KEYED_STORE_IC, UNINITIALIZED)            \
@@ -124,7 +126,9 @@
   V(LoadIC_DebugBreak,          LOAD_IC, DEBUG_BREAK)          \
   V(KeyedLoadIC_DebugBreak,     KEYED_LOAD_IC, DEBUG_BREAK)    \
   V(StoreIC_DebugBreak,         STORE_IC, DEBUG_BREAK)         \
-  V(KeyedStoreIC_DebugBreak,    KEYED_STORE_IC, DEBUG_BREAK)
+  V(KeyedStoreIC_DebugBreak,    KEYED_STORE_IC, DEBUG_BREAK)   \
+  V(PlainReturn_LiveEdit,       BUILTIN, DEBUG_BREAK)          \
+  V(FrameDropper_LiveEdit,      BUILTIN, DEBUG_BREAK)
 #else
 #define BUILTIN_LIST_DEBUG_A(V)
 #endif
@@ -160,8 +164,7 @@
   V(STRING_ADD_LEFT, 1)                  \
   V(STRING_ADD_RIGHT, 1)                 \
   V(APPLY_PREPARE, 1)                    \
-  V(APPLY_OVERFLOW, 1)                   \
-  V(STRING_CHAR_AT, 1)
+  V(APPLY_OVERFLOW, 1)
 
 
 class ObjectVisitor;
diff --git a/src/cached-powers.h b/src/cached-powers.h
new file mode 100644
index 0000000..314ccca
--- /dev/null
+++ b/src/cached-powers.h
@@ -0,0 +1,119 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CACHED_POWERS_H_
+#define V8_CACHED_POWERS_H_
+
+#include "diy-fp.h"
+
+namespace v8 {
+namespace internal {
+
+struct CachedPower {
+  uint64_t significand;
+  int16_t binary_exponent;
+  int16_t decimal_exponent;
+};
+
+// The following defines implement the interface between this file and the
+// generated 'powers_ten.h'.
+// GRISU_CACHE_NAME(1) contains all possible cached powers.
+// GRISU_CACHE_NAME(i) contains GRISU_CACHE_NAME(1) where only every 'i'th
+// element is kept. More formally GRISU_CACHE_NAME(i) contains the elements j*i
+// with 0 <= j < k with k such that j*k < the size of GRISU_CACHE_NAME(1).
+// The higher 'i' is the fewer elements we use.
+// Given that there are less elements, the exponent-distance between two
+// elements in the cache grows. The variable GRISU_CACHE_MAX_DISTANCE(i) stores
+// the maximum distance between two elements.
+#define GRISU_CACHE_STRUCT CachedPower
+#define GRISU_CACHE_NAME(i) kCachedPowers##i
+#define GRISU_CACHE_MAX_DISTANCE(i) kCachedPowersMaxDistance##i
+#define GRISU_CACHE_OFFSET kCachedPowerOffset
+#define GRISU_UINT64_C V8_2PART_UINT64_C
+// The following include imports the precompiled cached powers.
+#include "powers-ten.h"  // NOLINT
+
+static const double kD_1_LOG2_10 = 0.30102999566398114;  //  1 / lg(10)
+
+// We can't use a function since we reference variables depending on the 'i'.
+// This way the compiler is able to see at compile time that only one
+// cache-array variable is used and thus can remove all the others.
+#define COMPUTE_FOR_CACHE(i) \
+  if (!found && (gamma - alpha + 1 >= GRISU_CACHE_MAX_DISTANCE(i))) {   \
+    int kQ = DiyFp::kSignificandSize;                                   \
+    double k = ceiling((alpha - e + kQ - 1) * kD_1_LOG2_10);            \
+    int index = (GRISU_CACHE_OFFSET + static_cast<int>(k) - 1) / i + 1; \
+    cached_power = GRISU_CACHE_NAME(i)[index];                          \
+    found = true;                                                       \
+  }                                                                     \
+
+static void GetCachedPower(int e, int alpha, int gamma, int* mk, DiyFp* c_mk) {
+  // The following if statement should be optimized by the compiler so that only
+  // one array is referenced and the others are not included in the object file.
+  bool found = false;
+  CachedPower cached_power;
+  COMPUTE_FOR_CACHE(20);
+  COMPUTE_FOR_CACHE(19);
+  COMPUTE_FOR_CACHE(18);
+  COMPUTE_FOR_CACHE(17);
+  COMPUTE_FOR_CACHE(16);
+  COMPUTE_FOR_CACHE(15);
+  COMPUTE_FOR_CACHE(14);
+  COMPUTE_FOR_CACHE(13);
+  COMPUTE_FOR_CACHE(12);
+  COMPUTE_FOR_CACHE(11);
+  COMPUTE_FOR_CACHE(10);
+  COMPUTE_FOR_CACHE(9);
+  COMPUTE_FOR_CACHE(8);
+  COMPUTE_FOR_CACHE(7);
+  COMPUTE_FOR_CACHE(6);
+  COMPUTE_FOR_CACHE(5);
+  COMPUTE_FOR_CACHE(4);
+  COMPUTE_FOR_CACHE(3);
+  COMPUTE_FOR_CACHE(2);
+  COMPUTE_FOR_CACHE(1);
+  if (!found) {
+    UNIMPLEMENTED();
+    // Silence compiler warnings.
+    cached_power.significand = 0;
+    cached_power.binary_exponent = 0;
+    cached_power.decimal_exponent = 0;
+  }
+  *c_mk = DiyFp(cached_power.significand, cached_power.binary_exponent);
+  *mk = cached_power.decimal_exponent;
+  ASSERT((alpha <= c_mk->e() + e) && (c_mk->e() + e <= gamma));
+}
+#undef GRISU_REDUCTION
+#undef GRISU_CACHE_STRUCT
+#undef GRISU_CACHE_NAME
+#undef GRISU_CACHE_MAX_DISTANCE
+#undef GRISU_CACHE_OFFSET
+#undef GRISU_UINT64_C
+
+} }  // namespace v8::internal
+
+#endif  // V8_CACHED_POWERS_H_
diff --git a/src/checks.h b/src/checks.h
index eeb748b..cdcd18a 100644
--- a/src/checks.h
+++ b/src/checks.h
@@ -80,6 +80,7 @@
   }
 }
 
+
 // Helper function used by the CHECK_EQ function when given int64_t
 // arguments.  Should not be called directly.
 static inline void CheckEqualsHelper(const char* file, int line,
@@ -202,6 +203,27 @@
 }
 
 
+static inline void CheckNonEqualsHelper(const char* file,
+                                     int line,
+                                     const char* expected_source,
+                                     double expected,
+                                     const char* value_source,
+                                     double value) {
+  // Force values to 64 bit memory to truncate 80 bit precision on IA32.
+  volatile double* exp = new double[1];
+  *exp = expected;
+  volatile double* val = new double[1];
+  *val = value;
+  if (*exp == *val) {
+    V8_Fatal(file, line,
+             "CHECK_NE(%s, %s) failed\n#   Value: %f",
+             expected_source, value_source, *val);
+  }
+  delete[] exp;
+  delete[] val;
+}
+
+
 namespace v8 {
   class Value;
   template <class T> class Handle;
diff --git a/src/circular-queue-inl.h b/src/circular-queue-inl.h
new file mode 100644
index 0000000..90ab0f5
--- /dev/null
+++ b/src/circular-queue-inl.h
@@ -0,0 +1,101 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CIRCULAR_BUFFER_INL_H_
+#define V8_CIRCULAR_BUFFER_INL_H_
+
+#include "circular-queue.h"
+
+namespace v8 {
+namespace internal {
+
+
+template<typename Record>
+CircularQueue<Record>::CircularQueue(int desired_buffer_size_in_bytes)
+    : buffer_(NewArray<Record>(desired_buffer_size_in_bytes / sizeof(Record))),
+      buffer_end_(buffer_ + desired_buffer_size_in_bytes / sizeof(Record)),
+      enqueue_semaphore_(
+          OS::CreateSemaphore(static_cast<int>(buffer_end_ - buffer_) - 1)),
+      enqueue_pos_(buffer_),
+      dequeue_pos_(buffer_) {
+  // To be able to distinguish between a full and an empty queue
+  // state, the queue must be capable of containing at least 2
+  // records.
+  ASSERT((buffer_end_ - buffer_) >= 2);
+}
+
+
+template<typename Record>
+CircularQueue<Record>::~CircularQueue() {
+  DeleteArray(buffer_);
+  delete enqueue_semaphore_;
+}
+
+
+template<typename Record>
+void CircularQueue<Record>::Dequeue(Record* rec) {
+  ASSERT(!IsEmpty());
+  *rec = *dequeue_pos_;
+  dequeue_pos_ = Next(dequeue_pos_);
+  // Tell we have a spare record.
+  enqueue_semaphore_->Signal();
+}
+
+
+template<typename Record>
+void CircularQueue<Record>::Enqueue(const Record& rec) {
+  // Wait until we have at least one spare record.
+  enqueue_semaphore_->Wait();
+  ASSERT(Next(enqueue_pos_) != dequeue_pos_);
+  *enqueue_pos_ = rec;
+  enqueue_pos_ = Next(enqueue_pos_);
+}
+
+
+template<typename Record>
+Record* CircularQueue<Record>::Next(Record* curr) {
+  return ++curr != buffer_end_ ? curr : buffer_;
+}
+
+
+void* SamplingCircularQueue::Enqueue() {
+  WrapPositionIfNeeded(&producer_pos_->enqueue_pos);
+  void* result = producer_pos_->enqueue_pos;
+  producer_pos_->enqueue_pos += record_size_;
+  return result;
+}
+
+
+void SamplingCircularQueue::WrapPositionIfNeeded(
+    SamplingCircularQueue::Cell** pos) {
+  if (**pos == kEnd) *pos = buffer_;
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_CIRCULAR_BUFFER_INL_H_
diff --git a/src/circular-queue.cc b/src/circular-queue.cc
new file mode 100644
index 0000000..af650de
--- /dev/null
+++ b/src/circular-queue.cc
@@ -0,0 +1,121 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "circular-queue-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+SamplingCircularQueue::SamplingCircularQueue(int record_size_in_bytes,
+                                             int desired_chunk_size_in_bytes,
+                                             int buffer_size_in_chunks)
+    : record_size_(record_size_in_bytes / sizeof(Cell)),
+      chunk_size_in_bytes_(desired_chunk_size_in_bytes / record_size_in_bytes *
+                        record_size_in_bytes),
+      chunk_size_(chunk_size_in_bytes_ / sizeof(Cell)),
+      buffer_size_(chunk_size_ * buffer_size_in_chunks),
+      // The distance ensures that producer and consumer never step on
+      // each other's chunks and helps eviction of produced data from
+      // the CPU cache (having that chunk size is bigger than the cache.)
+      producer_consumer_distance_(2 * chunk_size_),
+      buffer_(NewArray<Cell>(buffer_size_ + 1)) {
+  ASSERT(buffer_size_in_chunks > 2);
+  // Only need to keep the first cell of a chunk clean.
+  for (int i = 0; i < buffer_size_; i += chunk_size_) {
+    buffer_[i] = kClear;
+  }
+  buffer_[buffer_size_] = kEnd;
+
+  // Layout producer and consumer position pointers each on their own
+  // cache lines to avoid cache lines thrashing due to simultaneous
+  // updates of positions by different processor cores.
+  const int positions_size =
+      RoundUp(1, kProcessorCacheLineSize) +
+      RoundUp(static_cast<int>(sizeof(ProducerPosition)),
+              kProcessorCacheLineSize) +
+      RoundUp(static_cast<int>(sizeof(ConsumerPosition)),
+              kProcessorCacheLineSize);
+  positions_ = NewArray<byte>(positions_size);
+
+  producer_pos_ = reinterpret_cast<ProducerPosition*>(
+      RoundUp(positions_, kProcessorCacheLineSize));
+  producer_pos_->enqueue_pos = buffer_;
+
+  consumer_pos_ = reinterpret_cast<ConsumerPosition*>(
+      reinterpret_cast<byte*>(producer_pos_) + kProcessorCacheLineSize);
+  ASSERT(reinterpret_cast<byte*>(consumer_pos_ + 1) <=
+         positions_ + positions_size);
+  consumer_pos_->dequeue_chunk_pos = buffer_;
+  consumer_pos_->dequeue_chunk_poll_pos = buffer_ + producer_consumer_distance_;
+  consumer_pos_->dequeue_pos = NULL;
+}
+
+
+SamplingCircularQueue::~SamplingCircularQueue() {
+  DeleteArray(positions_);
+  DeleteArray(buffer_);
+}
+
+
+void* SamplingCircularQueue::StartDequeue() {
+  if (consumer_pos_->dequeue_pos != NULL) {
+    return consumer_pos_->dequeue_pos;
+  } else {
+    if (*consumer_pos_->dequeue_chunk_poll_pos != kClear) {
+      consumer_pos_->dequeue_pos = consumer_pos_->dequeue_chunk_pos;
+      consumer_pos_->dequeue_end_pos = consumer_pos_->dequeue_pos + chunk_size_;
+      return consumer_pos_->dequeue_pos;
+    } else {
+      return NULL;
+    }
+  }
+}
+
+
+void SamplingCircularQueue::FinishDequeue() {
+  consumer_pos_->dequeue_pos += record_size_;
+  if (consumer_pos_->dequeue_pos < consumer_pos_->dequeue_end_pos) return;
+  // Move to next chunk.
+  consumer_pos_->dequeue_pos = NULL;
+  *consumer_pos_->dequeue_chunk_pos = kClear;
+  consumer_pos_->dequeue_chunk_pos += chunk_size_;
+  WrapPositionIfNeeded(&consumer_pos_->dequeue_chunk_pos);
+  consumer_pos_->dequeue_chunk_poll_pos += chunk_size_;
+  WrapPositionIfNeeded(&consumer_pos_->dequeue_chunk_poll_pos);
+}
+
+
+void SamplingCircularQueue::FlushResidualRecords() {
+  // Eliminate producer / consumer distance.
+  consumer_pos_->dequeue_chunk_poll_pos = consumer_pos_->dequeue_chunk_pos;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/circular-queue.h b/src/circular-queue.h
new file mode 100644
index 0000000..486f107
--- /dev/null
+++ b/src/circular-queue.h
@@ -0,0 +1,129 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CIRCULAR_QUEUE_H_
+#define V8_CIRCULAR_QUEUE_H_
+
+namespace v8 {
+namespace internal {
+
+
+// Lock-based blocking circular queue for small records.  Intended for
+// transfer of small records between a single producer and a single
+// consumer. Blocks on enqueue operation if the queue is full.
+template<typename Record>
+class CircularQueue {
+ public:
+  inline explicit CircularQueue(int desired_buffer_size_in_bytes);
+  inline ~CircularQueue();
+
+  INLINE(void Dequeue(Record* rec));
+  INLINE(void Enqueue(const Record& rec));
+  INLINE(bool IsEmpty()) { return enqueue_pos_ == dequeue_pos_; }
+
+ private:
+  INLINE(Record* Next(Record* curr));
+
+  Record* buffer_;
+  Record* const buffer_end_;
+  Semaphore* enqueue_semaphore_;
+  Record* enqueue_pos_;
+  Record* dequeue_pos_;
+
+  DISALLOW_COPY_AND_ASSIGN(CircularQueue);
+};
+
+
+// Lock-free cache-friendly sampling circular queue for large
+// records. Intended for fast transfer of large records between a
+// single producer and a single consumer. If the queue is full,
+// previous unread records are overwritten. The queue is designed with
+// a goal in mind to evade cache lines thrashing by preventing
+// simultaneous reads and writes to adjanced memory locations.
+//
+// IMPORTANT: as a producer never checks for chunks cleanness, it is
+// possible that it can catch up and overwrite a chunk that a consumer
+// is currently reading, resulting in a corrupt record being read.
+class SamplingCircularQueue {
+ public:
+  // Executed on the application thread.
+  SamplingCircularQueue(int record_size_in_bytes,
+                        int desired_chunk_size_in_bytes,
+                        int buffer_size_in_chunks);
+  ~SamplingCircularQueue();
+
+  // Enqueue returns a pointer to a memory location for storing the next
+  // record.
+  INLINE(void* Enqueue());
+
+  // Executed on the consumer (analyzer) thread.
+  // StartDequeue returns a pointer to a memory location for retrieving
+  // the next record. After the record had been read by a consumer,
+  // FinishDequeue must be called. Until that moment, subsequent calls
+  // to StartDequeue will return the same pointer.
+  void* StartDequeue();
+  void FinishDequeue();
+  // Due to a presence of slipping between the producer and the consumer,
+  // the queue must be notified whether producing has been finished in order
+  // to process remaining records from the buffer.
+  void FlushResidualRecords();
+
+  typedef AtomicWord Cell;
+  // Reserved values for the first cell of a record.
+  static const Cell kClear = 0;  // Marks clean (processed) chunks.
+  static const Cell kEnd = -1;   // Marks the end of the buffer.
+
+ private:
+  struct ProducerPosition {
+    Cell* enqueue_pos;
+  };
+  struct ConsumerPosition {
+    Cell* dequeue_chunk_pos;
+    Cell* dequeue_chunk_poll_pos;
+    Cell* dequeue_pos;
+    Cell* dequeue_end_pos;
+  };
+
+  INLINE(void WrapPositionIfNeeded(Cell** pos));
+
+  const int record_size_;
+  const int chunk_size_in_bytes_;
+  const int chunk_size_;
+  const int buffer_size_;
+  const int producer_consumer_distance_;
+  Cell* buffer_;
+  byte* positions_;
+  ProducerPosition* producer_pos_;
+  ConsumerPosition* consumer_pos_;
+
+  DISALLOW_COPY_AND_ASSIGN(SamplingCircularQueue);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_CIRCULAR_QUEUE_H_
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 4d0fd29..9d5969b 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -61,14 +61,10 @@
 void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
   code->set_major_key(MajorKey());
 
-#ifdef ENABLE_OPROFILE_AGENT
-  // Register the generated stub with the OPROFILE agent.
-  OProfileAgent::CreateNativeCodeRegion(GetName(),
-                                        code->instruction_start(),
-                                        code->instruction_size());
-#endif
-
-  LOG(CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
+  OPROFILE(CreateNativeCodeRegion(GetName(),
+                                  code->instruction_start(),
+                                  code->instruction_size()));
+  PROFILE(CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
   Counters::total_stubs_code_size.Increment(code->instruction_size());
 
 #ifdef ENABLE_DISASSEMBLER
@@ -83,6 +79,11 @@
 }
 
 
+int CodeStub::GetCodeKind() {
+  return Code::STUB;
+}
+
+
 Handle<Code> CodeStub::GetCode() {
   Code* code;
   if (!FindCodeInCache(&code)) {
@@ -97,7 +98,10 @@
     masm.GetCode(&desc);
 
     // Copy the generated code into a heap object.
-    Code::Flags flags = Code::ComputeFlags(Code::STUB, InLoop());
+    Code::Flags flags = Code::ComputeFlags(
+        static_cast<Code::Kind>(GetCodeKind()),
+        InLoop(),
+        GetICState());
     Handle<Code> new_object =
         Factory::NewCode(desc, NULL, flags, masm.CodeObject());
     RecordCodeGeneration(*new_object, &masm);
@@ -132,7 +136,10 @@
     masm.GetCode(&desc);
 
     // Try to copy the generated code into a heap object.
-    Code::Flags flags = Code::ComputeFlags(Code::STUB, InLoop());
+    Code::Flags flags = Code::ComputeFlags(
+        static_cast<Code::Kind>(GetCodeKind()),
+        InLoop(),
+        GetICState());
     Object* new_object =
         Heap::CreateCode(desc, NULL, flags, masm.CodeObject());
     if (new_object->IsFailure()) return new_object;
diff --git a/src/code-stubs.h b/src/code-stubs.h
index d5189c2..de2ad56 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -28,6 +28,8 @@
 #ifndef V8_CODE_STUBS_H_
 #define V8_CODE_STUBS_H_
 
+#include "globals.h"
+
 namespace v8 {
 namespace internal {
 
@@ -139,6 +141,14 @@
   // lazily generated function should be fully optimized or not.
   virtual InLoopFlag InLoop() { return NOT_IN_LOOP; }
 
+  // GenericBinaryOpStub needs to override this.
+  virtual int GetCodeKind();
+
+  // GenericBinaryOpStub needs to override this.
+  virtual InlineCacheState GetICState() {
+    return UNINITIALIZED;
+  }
+
   // Returns a name for logging/debugging purposes.
   virtual const char* GetName() { return MajorName(MajorKey(), false); }
 
diff --git a/src/codegen-inl.h b/src/codegen-inl.h
index da8cbf7..6534e7f 100644
--- a/src/codegen-inl.h
+++ b/src/codegen-inl.h
@@ -50,8 +50,11 @@
 namespace internal {
 
 Handle<Script> CodeGenerator::script() { return info_->script(); }
+
 bool CodeGenerator::is_eval() { return info_->is_eval(); }
 
+Scope* CodeGenerator::scope() { return info_->function()->scope(); }
+
 } }  // namespace v8::internal
 
 #endif  // V8_CODEGEN_INL_H_
diff --git a/src/codegen.cc b/src/codegen.cc
index bc722bb..f89399a 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -31,7 +31,6 @@
 #include "codegen-inl.h"
 #include "compiler.h"
 #include "debug.h"
-#include "liveedit.h"
 #include "oprofile-agent.h"
 #include "prettyprinter.h"
 #include "register-allocator-inl.h"
@@ -39,6 +38,7 @@
 #include "runtime.h"
 #include "scopeinfo.h"
 #include "stub-cache.h"
+#include "virtual-frame-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -65,38 +65,6 @@
 CodeGenerator* CodeGeneratorScope::top_ = NULL;
 
 
-DeferredCode::DeferredCode()
-    : masm_(CodeGeneratorScope::Current()->masm()),
-      statement_position_(masm_->current_statement_position()),
-      position_(masm_->current_position()) {
-  ASSERT(statement_position_ != RelocInfo::kNoPosition);
-  ASSERT(position_ != RelocInfo::kNoPosition);
-
-  CodeGeneratorScope::Current()->AddDeferred(this);
-#ifdef DEBUG
-  comment_ = "";
-#endif
-
-  // Copy the register locations from the code generator's frame.
-  // These are the registers that will be spilled on entry to the
-  // deferred code and restored on exit.
-  VirtualFrame* frame = CodeGeneratorScope::Current()->frame();
-  int sp_offset = frame->fp_relative(frame->stack_pointer_);
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    int loc = frame->register_location(i);
-    if (loc == VirtualFrame::kIllegalIndex) {
-      registers_[i] = kIgnore;
-    } else if (frame->elements_[loc].is_synced()) {
-      // Needs to be restored on exit but not saved on entry.
-      registers_[i] = frame->fp_relative(loc) | kSyncedFlag;
-    } else {
-      int offset = frame->fp_relative(loc);
-      registers_[i] = (offset < sp_offset) ? kPush : offset;
-    }
-  }
-}
-
-
 void CodeGenerator::ProcessDeferred() {
   while (!deferred_.is_empty()) {
     DeferredCode* code = deferred_.RemoveLast();
@@ -235,7 +203,6 @@
 // all the pieces into a Code object. This function is only to be called by
 // the compiler.cc code.
 Handle<Code> CodeGenerator::MakeCode(CompilationInfo* info) {
-  LiveEditFunctionTracker live_edit_tracker(info->function());
   Handle<Script> script = info->script();
   if (!script->IsUndefined() && !script->source()->IsUndefined()) {
     int len = String::cast(script->source())->length();
@@ -247,7 +214,6 @@
   MacroAssembler masm(NULL, kInitialBufferSize);
   CodeGenerator cgen(&masm);
   CodeGeneratorScope scope(&cgen);
-  live_edit_tracker.RecordFunctionScope(info->function()->scope());
   cgen.Generate(info);
   if (cgen.HasStackOverflow()) {
     ASSERT(!Top::has_pending_exception());
@@ -256,9 +222,7 @@
 
   InLoopFlag in_loop = (cgen.loop_nesting() != 0) ? IN_LOOP : NOT_IN_LOOP;
   Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
-  Handle<Code> result = MakeCodeEpilogue(cgen.masm(), flags, info);
-  live_edit_tracker.RecordFunctionCode(result);
-  return result;
+  return MakeCodeEpilogue(cgen.masm(), flags, info);
 }
 
 
@@ -266,7 +230,7 @@
 
 bool CodeGenerator::ShouldGenerateLog(Expression* type) {
   ASSERT(type != NULL);
-  if (!Logger::is_logging()) return false;
+  if (!Logger::is_logging() && !CpuProfiler::is_profiling()) return false;
   Handle<String> name = Handle<String>::cast(type->AsLiteral()->handle());
   if (FLAG_log_regexp) {
     static Vector<const char> kRegexp = CStrVector("regexp");
@@ -335,8 +299,8 @@
           array->set_undefined(j++);
         }
       } else {
-        Handle<JSFunction> function =
-            Compiler::BuildBoilerplate(node->fun(), script(), this);
+        Handle<SharedFunctionInfo> function =
+            Compiler::BuildFunctionInfo(node->fun(), script(), this);
         // Check for stack-overflow exception.
         if (HasStackOverflow()) return;
         array->set(j++, *function);
@@ -350,39 +314,18 @@
 }
 
 
+// List of special runtime calls which are generated inline. For some of these
+// functions the code will be generated inline, and for others a call to a code
+// stub will be inlined.
 
-// Special cases: These 'runtime calls' manipulate the current
-// frame and are only used 1 or two places, so we generate them
-// inline instead of generating calls to them.  They are used
-// for implementing Function.prototype.call() and
-// Function.prototype.apply().
+#define INLINE_RUNTIME_ENTRY(Name, argc, ressize)                             \
+    {&CodeGenerator::Generate##Name,  "_" #Name, argc},                       \
+
 CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = {
-  {&CodeGenerator::GenerateIsSmi, "_IsSmi"},
-  {&CodeGenerator::GenerateIsNonNegativeSmi, "_IsNonNegativeSmi"},
-  {&CodeGenerator::GenerateIsArray, "_IsArray"},
-  {&CodeGenerator::GenerateIsRegExp, "_IsRegExp"},
-  {&CodeGenerator::GenerateIsConstructCall, "_IsConstructCall"},
-  {&CodeGenerator::GenerateArgumentsLength, "_ArgumentsLength"},
-  {&CodeGenerator::GenerateArgumentsAccess, "_Arguments"},
-  {&CodeGenerator::GenerateClassOf, "_ClassOf"},
-  {&CodeGenerator::GenerateValueOf, "_ValueOf"},
-  {&CodeGenerator::GenerateSetValueOf, "_SetValueOf"},
-  {&CodeGenerator::GenerateFastCharCodeAt, "_FastCharCodeAt"},
-  {&CodeGenerator::GenerateObjectEquals, "_ObjectEquals"},
-  {&CodeGenerator::GenerateLog, "_Log"},
-  {&CodeGenerator::GenerateRandomPositiveSmi, "_RandomPositiveSmi"},
-  {&CodeGenerator::GenerateIsObject, "_IsObject"},
-  {&CodeGenerator::GenerateIsFunction, "_IsFunction"},
-  {&CodeGenerator::GenerateIsUndetectableObject, "_IsUndetectableObject"},
-  {&CodeGenerator::GenerateStringAdd, "_StringAdd"},
-  {&CodeGenerator::GenerateSubString, "_SubString"},
-  {&CodeGenerator::GenerateStringCompare, "_StringCompare"},
-  {&CodeGenerator::GenerateRegExpExec, "_RegExpExec"},
-  {&CodeGenerator::GenerateNumberToString, "_NumberToString"},
-  {&CodeGenerator::GenerateMathSin, "_Math_sin"},
-  {&CodeGenerator::GenerateMathCos, "_Math_cos"},
+  INLINE_RUNTIME_FUNCTION_LIST(INLINE_RUNTIME_ENTRY)
 };
 
+#undef INLINE_RUNTIME_ENTRY
 
 CodeGenerator::InlineRuntimeLUT* CodeGenerator::FindInlineRuntimeLUT(
     Handle<String> name) {
@@ -427,6 +370,14 @@
 }
 
 
+int CodeGenerator::InlineRuntimeCallArgumentsCount(Handle<String> name) {
+  CodeGenerator::InlineRuntimeLUT* f =
+      CodeGenerator::FindInlineRuntimeLUT(name);
+  if (f != NULL) return f->nargs;
+  return -1;
+}
+
+
 // Simple condition analysis.  ALWAYS_TRUE and ALWAYS_FALSE represent a
 // known result for the test expression, with no side effects.
 CodeGenerator::ConditionAnalysis CodeGenerator::AnalyzeCondition(
@@ -498,7 +449,6 @@
 
 void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
   switch (type_) {
-    case READ_LENGTH: GenerateReadLength(masm); break;
     case READ_ELEMENT: GenerateReadElement(masm); break;
     case NEW_OBJECT: GenerateNewObject(masm); break;
   }
diff --git a/src/codegen.h b/src/codegen.h
index 8dcde84..a5bb31f 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -31,7 +31,7 @@
 #include "ast.h"
 #include "code-stubs.h"
 #include "runtime.h"
-#include "number-info.h"
+#include "type-info.h"
 
 // Include the declaration of the architecture defined class CodeGenerator.
 // The contract  to the shared code is that the the CodeGenerator is a subclass
@@ -58,7 +58,7 @@
 //   ProcessDeferred
 //   Generate
 //   ComputeLazyCompile
-//   BuildBoilerplate
+//   BuildFunctionInfo
 //   ComputeCallInitialize
 //   ComputeCallInitializeInLoop
 //   ProcessDeclarations
@@ -99,6 +99,40 @@
 namespace internal {
 
 
+#define INLINE_RUNTIME_FUNCTION_LIST(F) \
+  F(IsSmi, 1, 1)                                                             \
+  F(IsNonNegativeSmi, 1, 1)                                                  \
+  F(IsArray, 1, 1)                                                           \
+  F(IsRegExp, 1, 1)                                                          \
+  F(CallFunction, -1 /* receiver + n args + function */, 1)                  \
+  F(IsConstructCall, 0, 1)                                                   \
+  F(ArgumentsLength, 0, 1)                                                   \
+  F(Arguments, 1, 1)                                                         \
+  F(ClassOf, 1, 1)                                                           \
+  F(ValueOf, 1, 1)                                                           \
+  F(SetValueOf, 2, 1)                                                        \
+  F(FastCharCodeAt, 2, 1)                                                    \
+  F(CharFromCode, 1, 1)                                                      \
+  F(ObjectEquals, 2, 1)                                                      \
+  F(Log, 3, 1)                                                               \
+  F(RandomHeapNumber, 0, 1)                                          \
+  F(IsObject, 1, 1)                                                          \
+  F(IsFunction, 1, 1)                                                        \
+  F(IsUndetectableObject, 1, 1)                                              \
+  F(StringAdd, 2, 1)                                                         \
+  F(SubString, 3, 1)                                                         \
+  F(StringCompare, 2, 1)                                                     \
+  F(RegExpExec, 4, 1)                                                        \
+  F(RegExpConstructResult, 3, 1)                                             \
+  F(GetFromCache, 2, 1)                                                      \
+  F(NumberToString, 1, 1)                                                    \
+  F(SwapElements, 3, 1)                                                      \
+  F(MathPow, 2, 1)                                                           \
+  F(MathSin, 1, 1)                                                           \
+  F(MathCos, 1, 1)                                                           \
+  F(MathSqrt, 1, 1)
+
+
 // Support for "structured" code comments.
 #ifdef DEBUG
 
@@ -199,7 +233,12 @@
   Label entry_label_;
   Label exit_label_;
 
-  int registers_[RegisterAllocator::kNumRegisters];
+  // C++ doesn't allow zero length arrays, so we make the array length 1 even
+  // if we don't need it.
+  static const int kRegistersArrayLength =
+      (RegisterAllocator::kNumRegisters == 0) ?
+          1 : RegisterAllocator::kNumRegisters;
+  int registers_[kRegistersArrayLength];
 
 #ifdef DEBUG
   const char* comment_;
@@ -316,8 +355,13 @@
  public:
   CompareStub(Condition cc,
               bool strict,
-              NaNInformation nan_info = kBothCouldBeNaN) :
-      cc_(cc), strict_(strict), never_nan_nan_(nan_info == kCantBothBeNaN) { }
+              NaNInformation nan_info = kBothCouldBeNaN,
+              bool include_number_compare = true) :
+      cc_(cc),
+      strict_(strict),
+      never_nan_nan_(nan_info == kCantBothBeNaN),
+      include_number_compare_(include_number_compare),
+      name_(NULL) { }
 
   void Generate(MacroAssembler* masm);
 
@@ -330,6 +374,16 @@
   // generating the minor key for other comparisons to avoid creating more
   // stubs.
   bool never_nan_nan_;
+  // Do generate the number comparison code in the stub. Stubs without number
+  // comparison code is used when the number comparison has been inlined, and
+  // the stub will be called if one of the operands is not a number.
+  bool include_number_compare_;
+
+  // Encoding of the minor key CCCCCCCCCCCCCCNS.
+  class StrictField: public BitField<bool, 0, 1> {};
+  class NeverNanNanField: public BitField<bool, 1, 1> {};
+  class IncludeNumberCompareField: public BitField<bool, 2, 1> {};
+  class ConditionField: public BitField<int, 3, 13> {};
 
   Major MajorKey() { return Compare; }
 
@@ -343,12 +397,16 @@
 
   // Unfortunately you have to run without snapshots to see most of these
   // names in the profile since most compare stubs end up in the snapshot.
+  char* name_;
   const char* GetName();
 #ifdef DEBUG
   void Print() {
-    PrintF("CompareStub (cc %d), (strict %s)\n",
+    PrintF("CompareStub (cc %d), (strict %s), "
+           "(never_nan_nan %s), (number_compare %s)\n",
            static_cast<int>(cc_),
-           strict_ ? "true" : "false");
+           strict_ ? "true" : "false",
+           never_nan_nan_ ? "true" : "false",
+           include_number_compare_ ? "included" : "not included");
   }
 #endif
 };
@@ -368,7 +426,8 @@
                     Label* throw_termination_exception,
                     Label* throw_out_of_memory_exception,
                     bool do_gc,
-                    bool always_allocate_scope);
+                    bool always_allocate_scope,
+                    int alignment_skew = 0);
   void GenerateThrowTOS(MacroAssembler* masm);
   void GenerateThrowUncatchable(MacroAssembler* masm,
                                 UncatchableExceptionType type);
@@ -401,7 +460,7 @@
   virtual bool GetCustomCache(Code** code_out);
   virtual void SetCustomCache(Code* value);
 
-  static const int kStackSpace = 6;
+  static const int kStackSpace = 5;
   static const int kArgc = 4;
  private:
   Handle<AccessorInfo> info() { return info_; }
@@ -449,7 +508,6 @@
 class ArgumentsAccessStub: public CodeStub {
  public:
   enum Type {
-    READ_LENGTH,
     READ_ELEMENT,
     NEW_OBJECT
   };
@@ -463,7 +521,6 @@
   int MinorKey() { return type_; }
 
   void Generate(MacroAssembler* masm);
-  void GenerateReadLength(MacroAssembler* masm);
   void GenerateReadElement(MacroAssembler* masm);
   void GenerateNewObject(MacroAssembler* masm);
 
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
index d049d26..cec10fd 100644
--- a/src/compilation-cache.cc
+++ b/src/compilation-cache.cc
@@ -33,7 +33,6 @@
 namespace v8 {
 namespace internal {
 
-
 // The number of sub caches covering the different types to cache.
 static const int kSubCacheCount = 4;
 
@@ -48,6 +47,9 @@
 // Initial size of each compilation cache table allocated.
 static const int kInitialCacheSize = 64;
 
+// Index for the first generation in the cache.
+static const int kFirstGeneration = 0;
+
 // The compilation cache consists of several generational sub-caches which uses
 // this class as a base class. A sub-cache contains a compilation cache tables
 // for each generation of the sub-cache. Since the same source code string has
@@ -64,6 +66,15 @@
   // Get the compilation cache tables for a specific generation.
   Handle<CompilationCacheTable> GetTable(int generation);
 
+  // Accessors for first generation.
+  Handle<CompilationCacheTable> GetFirstTable() {
+    return GetTable(kFirstGeneration);
+  }
+  void SetFirstTable(Handle<CompilationCacheTable> value) {
+    ASSERT(kFirstGeneration < generations_);
+    tables_[kFirstGeneration] = *value;
+  }
+
   // Age the sub-cache by evicting the oldest generation and creating a new
   // young generation.
   void Age();
@@ -91,14 +102,18 @@
   explicit CompilationCacheScript(int generations)
       : CompilationSubCache(generations) { }
 
-  Handle<JSFunction> Lookup(Handle<String> source,
-                            Handle<Object> name,
-                            int line_offset,
-                            int column_offset);
-  void Put(Handle<String> source, Handle<JSFunction> boilerplate);
+  Handle<SharedFunctionInfo> Lookup(Handle<String> source,
+                                    Handle<Object> name,
+                                    int line_offset,
+                                    int column_offset);
+  void Put(Handle<String> source, Handle<SharedFunctionInfo> function_info);
 
  private:
-  bool HasOrigin(Handle<JSFunction> boilerplate,
+  // Note: Returns a new hash table if operation results in expansion.
+  Handle<CompilationCacheTable> TablePut(
+      Handle<String> source, Handle<SharedFunctionInfo> function_info);
+
+  bool HasOrigin(Handle<SharedFunctionInfo> function_info,
                  Handle<Object> name,
                  int line_offset,
                  int column_offset);
@@ -113,11 +128,19 @@
   explicit CompilationCacheEval(int generations)
       : CompilationSubCache(generations) { }
 
-  Handle<JSFunction> Lookup(Handle<String> source, Handle<Context> context);
+  Handle<SharedFunctionInfo> Lookup(Handle<String> source,
+                                    Handle<Context> context);
 
   void Put(Handle<String> source,
            Handle<Context> context,
-           Handle<JSFunction> boilerplate);
+           Handle<SharedFunctionInfo> function_info);
+
+ private:
+  // Note: Returns a new hash table if operation results in expansion.
+  Handle<CompilationCacheTable> TablePut(
+      Handle<String> source,
+      Handle<Context> context,
+      Handle<SharedFunctionInfo> function_info);
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
 };
@@ -134,6 +157,11 @@
   void Put(Handle<String> source,
            JSRegExp::Flags flags,
            Handle<FixedArray> data);
+ private:
+  // Note: Returns a new hash table if operation results in expansion.
+  Handle<CompilationCacheTable> TablePut(Handle<String> source,
+                                         JSRegExp::Flags flags,
+                                         Handle<FixedArray> data);
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp);
 };
@@ -193,21 +221,20 @@
 
 
 void CompilationSubCache::Clear() {
-  for (int i = 0; i < generations_; i++) {
-    tables_[i] = Heap::undefined_value();
-  }
+  MemsetPointer(tables_, Heap::undefined_value(), generations_);
 }
 
 
 // We only re-use a cached function for some script source code if the
 // script originates from the same place. This is to avoid issues
 // when reporting errors, etc.
-bool CompilationCacheScript::HasOrigin(Handle<JSFunction> boilerplate,
-                                       Handle<Object> name,
-                                       int line_offset,
-                                       int column_offset) {
+bool CompilationCacheScript::HasOrigin(
+    Handle<SharedFunctionInfo> function_info,
+    Handle<Object> name,
+    int line_offset,
+    int column_offset) {
   Handle<Script> script =
-      Handle<Script>(Script::cast(boilerplate->shared()->script()));
+      Handle<Script>(Script::cast(function_info->script()));
   // If the script name isn't set, the boilerplate script should have
   // an undefined name to have the same origin.
   if (name.is_null()) {
@@ -227,10 +254,10 @@
 // be cached in the same script generation. Currently the first use
 // will be cached, but subsequent code from different source / line
 // won't.
-Handle<JSFunction> CompilationCacheScript::Lookup(Handle<String> source,
-                                                  Handle<Object> name,
-                                                  int line_offset,
-                                                  int column_offset) {
+Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(Handle<String> source,
+                                                          Handle<Object> name,
+                                                          int line_offset,
+                                                          int column_offset) {
   Object* result = NULL;
   int generation;
 
@@ -240,12 +267,13 @@
     for (generation = 0; generation < generations(); generation++) {
       Handle<CompilationCacheTable> table = GetTable(generation);
       Handle<Object> probe(table->Lookup(*source));
-      if (probe->IsJSFunction()) {
-        Handle<JSFunction> boilerplate = Handle<JSFunction>::cast(probe);
-        // Break when we've found a suitable boilerplate function that
+      if (probe->IsSharedFunctionInfo()) {
+        Handle<SharedFunctionInfo> function_info =
+            Handle<SharedFunctionInfo>::cast(probe);
+        // Break when we've found a suitable shared function info that
         // matches the origin.
-        if (HasOrigin(boilerplate, name, line_offset, column_offset)) {
-          result = *boilerplate;
+        if (HasOrigin(function_info, name, line_offset, column_offset)) {
+          result = *function_info;
           break;
         }
       }
@@ -267,31 +295,37 @@
   // to see if we actually found a cached script. If so, we return a
   // handle created in the caller's handle scope.
   if (result != NULL) {
-    Handle<JSFunction> boilerplate(JSFunction::cast(result));
-    ASSERT(HasOrigin(boilerplate, name, line_offset, column_offset));
+    Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result));
+    ASSERT(HasOrigin(shared, name, line_offset, column_offset));
     // If the script was found in a later generation, we promote it to
     // the first generation to let it survive longer in the cache.
-    if (generation != 0) Put(source, boilerplate);
+    if (generation != 0) Put(source, shared);
     Counters::compilation_cache_hits.Increment();
-    return boilerplate;
+    return shared;
   } else {
     Counters::compilation_cache_misses.Increment();
-    return Handle<JSFunction>::null();
+    return Handle<SharedFunctionInfo>::null();
   }
 }
 
 
-void CompilationCacheScript::Put(Handle<String> source,
-                                 Handle<JSFunction> boilerplate) {
-  HandleScope scope;
-  ASSERT(boilerplate->IsBoilerplate());
-  Handle<CompilationCacheTable> table = GetTable(0);
-  CALL_HEAP_FUNCTION_VOID(table->Put(*source, *boilerplate));
+Handle<CompilationCacheTable> CompilationCacheScript::TablePut(
+    Handle<String> source,
+    Handle<SharedFunctionInfo> function_info) {
+  CALL_HEAP_FUNCTION(GetFirstTable()->Put(*source, *function_info),
+                     CompilationCacheTable);
 }
 
 
-Handle<JSFunction> CompilationCacheEval::Lookup(Handle<String> source,
-                                                Handle<Context> context) {
+void CompilationCacheScript::Put(Handle<String> source,
+                                 Handle<SharedFunctionInfo> function_info) {
+  HandleScope scope;
+  SetFirstTable(TablePut(source, function_info));
+}
+
+
+Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
+    Handle<String> source, Handle<Context> context) {
   // Make sure not to leak the table into the surrounding handle
   // scope. Otherwise, we risk keeping old tables around even after
   // having cleared the cache.
@@ -301,32 +335,42 @@
     for (generation = 0; generation < generations(); generation++) {
       Handle<CompilationCacheTable> table = GetTable(generation);
       result = table->LookupEval(*source, *context);
-      if (result->IsJSFunction()) {
+      if (result->IsSharedFunctionInfo()) {
         break;
       }
     }
   }
-  if (result->IsJSFunction()) {
-    Handle<JSFunction> boilerplate(JSFunction::cast(result));
+  if (result->IsSharedFunctionInfo()) {
+    Handle<SharedFunctionInfo>
+        function_info(SharedFunctionInfo::cast(result));
     if (generation != 0) {
-      Put(source, context, boilerplate);
+      Put(source, context, function_info);
     }
     Counters::compilation_cache_hits.Increment();
-    return boilerplate;
+    return function_info;
   } else {
     Counters::compilation_cache_misses.Increment();
-    return Handle<JSFunction>::null();
+    return Handle<SharedFunctionInfo>::null();
   }
 }
 
 
+Handle<CompilationCacheTable> CompilationCacheEval::TablePut(
+    Handle<String> source,
+    Handle<Context> context,
+    Handle<SharedFunctionInfo> function_info) {
+  CALL_HEAP_FUNCTION(GetFirstTable()->PutEval(*source,
+                                              *context,
+                                              *function_info),
+                     CompilationCacheTable);
+}
+
+
 void CompilationCacheEval::Put(Handle<String> source,
                                Handle<Context> context,
-                               Handle<JSFunction> boilerplate) {
+                               Handle<SharedFunctionInfo> function_info) {
   HandleScope scope;
-  ASSERT(boilerplate->IsBoilerplate());
-  Handle<CompilationCacheTable> table = GetTable(0);
-  CALL_HEAP_FUNCTION_VOID(table->PutEval(*source, *context, *boilerplate));
+  SetFirstTable(TablePut(source, context, function_info));
 }
 
 
@@ -360,35 +404,43 @@
 }
 
 
+Handle<CompilationCacheTable> CompilationCacheRegExp::TablePut(
+    Handle<String> source,
+    JSRegExp::Flags flags,
+    Handle<FixedArray> data) {
+  CALL_HEAP_FUNCTION(GetFirstTable()->PutRegExp(*source, flags, *data),
+                     CompilationCacheTable);
+}
+
+
 void CompilationCacheRegExp::Put(Handle<String> source,
                                  JSRegExp::Flags flags,
                                  Handle<FixedArray> data) {
   HandleScope scope;
-  Handle<CompilationCacheTable> table = GetTable(0);
-  CALL_HEAP_FUNCTION_VOID(table->PutRegExp(*source, flags, *data));
+  SetFirstTable(TablePut(source, flags, data));
 }
 
 
-Handle<JSFunction> CompilationCache::LookupScript(Handle<String> source,
-                                                  Handle<Object> name,
-                                                  int line_offset,
-                                                  int column_offset) {
+Handle<SharedFunctionInfo> CompilationCache::LookupScript(Handle<String> source,
+                                                          Handle<Object> name,
+                                                          int line_offset,
+                                                          int column_offset) {
   if (!IsEnabled()) {
-    return Handle<JSFunction>::null();
+    return Handle<SharedFunctionInfo>::null();
   }
 
   return script.Lookup(source, name, line_offset, column_offset);
 }
 
 
-Handle<JSFunction> CompilationCache::LookupEval(Handle<String> source,
-                                                Handle<Context> context,
-                                                bool is_global) {
+Handle<SharedFunctionInfo> CompilationCache::LookupEval(Handle<String> source,
+                                                        Handle<Context> context,
+                                                        bool is_global) {
   if (!IsEnabled()) {
-    return Handle<JSFunction>::null();
+    return Handle<SharedFunctionInfo>::null();
   }
 
-  Handle<JSFunction> result;
+  Handle<SharedFunctionInfo> result;
   if (is_global) {
     result = eval_global.Lookup(source, context);
   } else {
@@ -409,30 +461,28 @@
 
 
 void CompilationCache::PutScript(Handle<String> source,
-                                 Handle<JSFunction> boilerplate) {
+                                 Handle<SharedFunctionInfo> function_info) {
   if (!IsEnabled()) {
     return;
   }
 
-  ASSERT(boilerplate->IsBoilerplate());
-  script.Put(source, boilerplate);
+  script.Put(source, function_info);
 }
 
 
 void CompilationCache::PutEval(Handle<String> source,
                                Handle<Context> context,
                                bool is_global,
-                               Handle<JSFunction> boilerplate) {
+                               Handle<SharedFunctionInfo> function_info) {
   if (!IsEnabled()) {
     return;
   }
 
   HandleScope scope;
-  ASSERT(boilerplate->IsBoilerplate());
   if (is_global) {
-    eval_global.Put(source, context, boilerplate);
+    eval_global.Put(source, context, function_info);
   } else {
-    eval_contextual.Put(source, context, boilerplate);
+    eval_contextual.Put(source, context, function_info);
   }
 }
 
diff --git a/src/compilation-cache.h b/src/compilation-cache.h
index 3487c08..6358a26 100644
--- a/src/compilation-cache.h
+++ b/src/compilation-cache.h
@@ -32,42 +32,43 @@
 namespace internal {
 
 
-// The compilation cache keeps function boilerplates for compiled
-// scripts and evals. The boilerplates are looked up using the source
-// string as the key. For regular expressions the compilation data is cached.
+// The compilation cache keeps shared function infos for compiled
+// scripts and evals. The shared function infos are looked up using
+// the source string as the key. For regular expressions the
+// compilation data is cached.
 class CompilationCache {
  public:
-  // Finds the script function boilerplate for a source
+  // Finds the script shared function info for a source
   // string. Returns an empty handle if the cache doesn't contain a
   // script for the given source string with the right origin.
-  static Handle<JSFunction> LookupScript(Handle<String> source,
-                                         Handle<Object> name,
-                                         int line_offset,
-                                         int column_offset);
+  static Handle<SharedFunctionInfo> LookupScript(Handle<String> source,
+                                                 Handle<Object> name,
+                                                 int line_offset,
+                                                 int column_offset);
 
-  // Finds the function boilerplate for a source string for eval in a
+  // Finds the shared function info for a source string for eval in a
   // given context.  Returns an empty handle if the cache doesn't
   // contain a script for the given source string.
-  static Handle<JSFunction> LookupEval(Handle<String> source,
-                                       Handle<Context> context,
-                                       bool is_global);
+  static Handle<SharedFunctionInfo> LookupEval(Handle<String> source,
+                                               Handle<Context> context,
+                                               bool is_global);
 
   // Returns the regexp data associated with the given regexp if it
   // is in cache, otherwise an empty handle.
   static Handle<FixedArray> LookupRegExp(Handle<String> source,
                                          JSRegExp::Flags flags);
 
-  // Associate the (source, kind) pair to the boilerplate. This may
-  // overwrite an existing mapping.
+  // Associate the (source, kind) pair to the shared function
+  // info. This may overwrite an existing mapping.
   static void PutScript(Handle<String> source,
-                        Handle<JSFunction> boilerplate);
+                        Handle<SharedFunctionInfo> function_info);
 
   // Associate the (source, context->closure()->shared(), kind) triple
-  // with the boilerplate. This may overwrite an existing mapping.
+  // with the shared function info. This may overwrite an existing mapping.
   static void PutEval(Handle<String> source,
                       Handle<Context> context,
                       bool is_global,
-                      Handle<JSFunction> boilerplate);
+                      Handle<SharedFunctionInfo> function_info);
 
   // Associate the (source, flags) pair to the given regexp data.
   // This may overwrite an existing mapping.
diff --git a/src/compiler.cc b/src/compiler.cc
index 7b6734a..901f218 100755
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,14 +31,15 @@
 #include "codegen-inl.h"
 #include "compilation-cache.h"
 #include "compiler.h"
+#include "data-flow.h"
 #include "debug.h"
 #include "fast-codegen.h"
+#include "flow-graph.h"
 #include "full-codegen.h"
+#include "liveedit.h"
 #include "oprofile-agent.h"
 #include "rewriter.h"
 #include "scopes.h"
-#include "usage-analyzer.h"
-#include "liveedit.h"
 
 namespace v8 {
 namespace internal {
@@ -48,7 +49,7 @@
   FunctionLiteral* function = info->function();
   ASSERT(function != NULL);
   // Rewrite the AST by introducing .result assignments where needed.
-  if (!Rewriter::Process(function) || !AnalyzeVariableUsage(function)) {
+  if (!Rewriter::Process(function)) {
     // Signal a stack overflow by returning a null handle.  The stack
     // overflow exception will be thrown by the caller.
     return Handle<Code>::null();
@@ -79,6 +80,27 @@
     return Handle<Code>::null();
   }
 
+  if (function->scope()->num_parameters() > 0 ||
+      function->scope()->num_stack_slots()) {
+    AssignedVariablesAnalyzer ava(function);
+    ava.Analyze();
+    if (ava.HasStackOverflow()) {
+      return Handle<Code>::null();
+    }
+  }
+
+  if (FLAG_use_flow_graph) {
+    FlowGraphBuilder builder;
+    FlowGraph* graph = builder.Build(function);
+    USE(graph);
+
+#ifdef DEBUG
+    if (FLAG_print_graph_text && !builder.HasStackOverflow()) {
+      graph->PrintAsText(function->name());
+    }
+#endif
+  }
+
   // Generate code and return it.  Code generator selection is governed by
   // which backends are enabled and whether the function is considered
   // run-once code or not:
@@ -117,13 +139,21 @@
 }
 
 
-static Handle<JSFunction> MakeFunction(bool is_global,
-                                       bool is_eval,
-                                       Compiler::ValidationState validate,
-                                       Handle<Script> script,
-                                       Handle<Context> context,
-                                       v8::Extension* extension,
-                                       ScriptDataImpl* pre_data) {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+Handle<Code> MakeCodeForLiveEdit(CompilationInfo* info) {
+  Handle<Context> context = Handle<Context>::null();
+  return MakeCode(context, info);
+}
+#endif
+
+
+static Handle<SharedFunctionInfo> MakeFunctionInfo(bool is_global,
+    bool is_eval,
+    Compiler::ValidationState validate,
+    Handle<Script> script,
+    Handle<Context> context,
+    v8::Extension* extension,
+    ScriptDataImpl* pre_data) {
   CompilationZoneScope zone_scope(DELETE_ON_EXIT);
 
   PostponeInterruptsScope postpone;
@@ -162,10 +192,12 @@
   FunctionLiteral* lit =
       MakeAST(is_global, script, extension, pre_data, is_json);
 
+  LiveEditFunctionTracker live_edit_tracker(lit);
+
   // Check for parse errors.
   if (lit == NULL) {
     ASSERT(Top::has_pending_exception());
-    return Handle<JSFunction>::null();
+    return Handle<SharedFunctionInfo>::null();
   }
 
   // Measure how long it takes to do the compilation; only take the
@@ -183,64 +215,63 @@
   // Check for stack-overflow exceptions.
   if (code.is_null()) {
     Top::StackOverflow();
-    return Handle<JSFunction>::null();
+    return Handle<SharedFunctionInfo>::null();
   }
 
-#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
-  // Log the code generation for the script. Check explicit whether logging is
-  // to avoid allocating when not required.
-  if (Logger::is_logging() || OProfileAgent::is_enabled()) {
-    if (script->name()->IsString()) {
-      SmartPointer<char> data =
-          String::cast(script->name())->ToCString(DISALLOW_NULLS);
-      LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG,
-                          *code, *data));
-      OProfileAgent::CreateNativeCodeRegion(*data,
-                                            code->instruction_start(),
-                                            code->instruction_size());
-    } else {
-      LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG,
-                          *code, ""));
-      OProfileAgent::CreateNativeCodeRegion(is_eval ? "Eval" : "Script",
-                                            code->instruction_start(),
-                                            code->instruction_size());
-    }
+  if (script->name()->IsString()) {
+    PROFILE(CodeCreateEvent(
+        is_eval ? Logger::EVAL_TAG :
+            Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
+        *code, String::cast(script->name())));
+    OPROFILE(CreateNativeCodeRegion(String::cast(script->name()),
+                                    code->instruction_start(),
+                                    code->instruction_size()));
+  } else {
+    PROFILE(CodeCreateEvent(
+        is_eval ? Logger::EVAL_TAG :
+            Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
+        *code, ""));
+    OPROFILE(CreateNativeCodeRegion(is_eval ? "Eval" : "Script",
+                                    code->instruction_start(),
+                                    code->instruction_size()));
   }
-#endif
 
   // Allocate function.
-  Handle<JSFunction> fun =
-      Factory::NewFunctionBoilerplate(lit->name(),
-                                      lit->materialized_literal_count(),
-                                      code);
+  Handle<SharedFunctionInfo> result =
+      Factory::NewSharedFunctionInfo(lit->name(),
+                                     lit->materialized_literal_count(),
+                                     code);
 
   ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
-  Compiler::SetFunctionInfo(fun, lit, true, script);
+  Compiler::SetFunctionInfo(result, lit, true, script);
 
   // Hint to the runtime system used when allocating space for initial
   // property space by setting the expected number of properties for
   // the instances of the function.
-  SetExpectedNofPropertiesFromEstimate(fun, lit->expected_property_count());
+  SetExpectedNofPropertiesFromEstimate(result, lit->expected_property_count());
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Notify debugger
-  Debugger::OnAfterCompile(script, fun);
+  Debugger::OnAfterCompile(script, Debugger::NO_AFTER_COMPILE_FLAGS);
 #endif
 
-  return fun;
+  live_edit_tracker.RecordFunctionInfo(result, lit);
+
+  return result;
 }
 
 
 static StaticResource<SafeStringInputBuffer> safe_string_input_buffer;
 
 
-Handle<JSFunction> Compiler::Compile(Handle<String> source,
-                                     Handle<Object> script_name,
-                                     int line_offset, int column_offset,
-                                     v8::Extension* extension,
-                                     ScriptDataImpl* input_pre_data,
-                                     Handle<Object> script_data,
-                                     NativesFlag natives) {
+Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
+                                             Handle<Object> script_name,
+                                             int line_offset,
+                                             int column_offset,
+                                             v8::Extension* extension,
+                                             ScriptDataImpl* input_pre_data,
+                                             Handle<Object> script_data,
+                                             NativesFlag natives) {
   int source_length = source->length();
   Counters::total_load_size.Increment(source_length);
   Counters::total_compile_size.Increment(source_length);
@@ -249,7 +280,7 @@
   VMState state(COMPILER);
 
   // Do a lookup in the compilation cache but not for extensions.
-  Handle<JSFunction> result;
+  Handle<SharedFunctionInfo> result;
   if (extension == NULL) {
     result = CompilationCache::LookupScript(source,
                                             script_name,
@@ -281,13 +312,13 @@
                                            : *script_data);
 
     // Compile the function and add it to the cache.
-    result = MakeFunction(true,
-                          false,
-                          DONT_VALIDATE_JSON,
-                          script,
-                          Handle<Context>::null(),
-                          extension,
-                          pre_data);
+    result = MakeFunctionInfo(true,
+                              false,
+                              DONT_VALIDATE_JSON,
+                              script,
+                              Handle<Context>::null(),
+                              extension,
+                              pre_data);
     if (extension == NULL && !result.is_null()) {
       CompilationCache::PutScript(source, result);
     }
@@ -303,10 +334,10 @@
 }
 
 
-Handle<JSFunction> Compiler::CompileEval(Handle<String> source,
-                                         Handle<Context> context,
-                                         bool is_global,
-                                         ValidationState validate) {
+Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
+                                                 Handle<Context> context,
+                                                 bool is_global,
+                                                 ValidationState validate) {
   // Note that if validation is required then no path through this
   // function is allowed to return a value without validating that
   // the input is legal json.
@@ -322,20 +353,20 @@
   // invoke the compiler and add the result to the cache.  If we're
   // evaluating json we bypass the cache since we can't be sure a
   // potential value in the cache has been validated.
-  Handle<JSFunction> result;
+  Handle<SharedFunctionInfo> result;
   if (validate == DONT_VALIDATE_JSON)
     result = CompilationCache::LookupEval(source, context, is_global);
 
   if (result.is_null()) {
     // Create a script object describing the script to be compiled.
     Handle<Script> script = Factory::NewScript(source);
-    result = MakeFunction(is_global,
-                          true,
-                          validate,
-                          script,
-                          context,
-                          NULL,
-                          NULL);
+    result = MakeFunctionInfo(is_global,
+                              true,
+                              validate,
+                              script,
+                              context,
+                              NULL,
+                              NULL);
     if (!result.is_null() && validate != VALIDATE_JSON) {
       // For json it's unlikely that we'll ever see exactly the same
       // string again so we don't use the compilation cache.
@@ -393,14 +424,12 @@
     return false;
   }
 
-#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
-  LogCodeCreateEvent(Logger::LAZY_COMPILE_TAG,
-                     name,
-                     Handle<String>(shared->inferred_name()),
-                     start_position,
-                     info->script(),
-                     code);
-#endif
+  RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG,
+                            name,
+                            Handle<String>(shared->inferred_name()),
+                            start_position,
+                            info->script(),
+                            code);
 
   // Update the shared function info with the compiled code.
   shared->set_code(*code);
@@ -420,9 +449,10 @@
 }
 
 
-Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
-                                              Handle<Script> script,
-                                              AstVisitor* caller) {
+Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
+                                                       Handle<Script> script,
+                                                       AstVisitor* caller) {
+  LiveEditFunctionTracker live_edit_tracker(literal);
 #ifdef DEBUG
   // We should not try to compile the same function literal more than
   // once.
@@ -445,7 +475,28 @@
     // The bodies of function literals have not yet been visited by
     // the AST optimizer/analyzer.
     if (!Rewriter::Optimize(literal)) {
-      return Handle<JSFunction>::null();
+      return Handle<SharedFunctionInfo>::null();
+    }
+
+    if (literal->scope()->num_parameters() > 0 ||
+        literal->scope()->num_stack_slots()) {
+      AssignedVariablesAnalyzer ava(literal);
+      ava.Analyze();
+      if (ava.HasStackOverflow()) {
+        return Handle<SharedFunctionInfo>::null();
+      }
+    }
+
+    if (FLAG_use_flow_graph) {
+      FlowGraphBuilder builder;
+      FlowGraph* graph = builder.Build(literal);
+      USE(graph);
+
+#ifdef DEBUG
+      if (FLAG_print_graph_text && !builder.HasStackOverflow()) {
+        graph->PrintAsText(literal->name());
+      }
+#endif
     }
 
     // Generate code and return it.  The way that the compilation mode
@@ -483,38 +534,31 @@
     // Check for stack-overflow exception.
     if (code.is_null()) {
       caller->SetStackOverflow();
-      return Handle<JSFunction>::null();
+      return Handle<SharedFunctionInfo>::null();
     }
 
     // Function compilation complete.
-
-#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
-    LogCodeCreateEvent(Logger::FUNCTION_TAG,
-                       literal->name(),
-                       literal->inferred_name(),
-                       literal->start_position(),
-                       script,
-                       code);
-#endif
+    RecordFunctionCompilation(Logger::FUNCTION_TAG,
+                              literal->name(),
+                              literal->inferred_name(),
+                              literal->start_position(),
+                              script,
+                              code);
   }
 
-  // Create a boilerplate function.
-  Handle<JSFunction> function =
-      Factory::NewFunctionBoilerplate(literal->name(),
-                                      literal->materialized_literal_count(),
-                                      code);
-  SetFunctionInfo(function, literal, false, script);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  // Notify debugger that a new function has been added.
-  Debugger::OnNewFunction(function);
-#endif
+  // Create a shared function info object.
+  Handle<SharedFunctionInfo> result =
+      Factory::NewSharedFunctionInfo(literal->name(),
+                                     literal->materialized_literal_count(),
+                                     code);
+  SetFunctionInfo(result, literal, false, script);
 
   // Set the expected number of properties for instances and return
   // the resulting function.
-  SetExpectedNofPropertiesFromEstimate(function,
+  SetExpectedNofPropertiesFromEstimate(result,
                                        literal->expected_property_count());
-  return function;
+  live_edit_tracker.RecordFunctionInfo(result, literal);
+  return result;
 }
 
 
@@ -522,55 +566,58 @@
 // The start_position points to the first '(' character after the function name
 // in the full script source. When counting characters in the script source the
 // the first character is number 0 (not 1).
-void Compiler::SetFunctionInfo(Handle<JSFunction> fun,
+void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
                                FunctionLiteral* lit,
                                bool is_toplevel,
                                Handle<Script> script) {
-  fun->shared()->set_length(lit->num_parameters());
-  fun->shared()->set_formal_parameter_count(lit->num_parameters());
-  fun->shared()->set_script(*script);
-  fun->shared()->set_function_token_position(lit->function_token_position());
-  fun->shared()->set_start_position(lit->start_position());
-  fun->shared()->set_end_position(lit->end_position());
-  fun->shared()->set_is_expression(lit->is_expression());
-  fun->shared()->set_is_toplevel(is_toplevel);
-  fun->shared()->set_inferred_name(*lit->inferred_name());
-  fun->shared()->SetThisPropertyAssignmentsInfo(
+  function_info->set_length(lit->num_parameters());
+  function_info->set_formal_parameter_count(lit->num_parameters());
+  function_info->set_script(*script);
+  function_info->set_function_token_position(lit->function_token_position());
+  function_info->set_start_position(lit->start_position());
+  function_info->set_end_position(lit->end_position());
+  function_info->set_is_expression(lit->is_expression());
+  function_info->set_is_toplevel(is_toplevel);
+  function_info->set_inferred_name(*lit->inferred_name());
+  function_info->SetThisPropertyAssignmentsInfo(
       lit->has_only_simple_this_property_assignments(),
       *lit->this_property_assignments());
-  fun->shared()->set_try_full_codegen(lit->try_full_codegen());
+  function_info->set_try_full_codegen(lit->try_full_codegen());
 }
 
 
-#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
-void Compiler::LogCodeCreateEvent(Logger::LogEventsAndTags tag,
-                                  Handle<String> name,
-                                  Handle<String> inferred_name,
-                                  int start_position,
-                                  Handle<Script> script,
-                                  Handle<Code> code) {
+void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
+                                         Handle<String> name,
+                                         Handle<String> inferred_name,
+                                         int start_position,
+                                         Handle<Script> script,
+                                         Handle<Code> code) {
   // Log the code generation. If source information is available
   // include script name and line number. Check explicitly whether
   // logging is enabled as finding the line number is not free.
-  if (Logger::is_logging() || OProfileAgent::is_enabled()) {
+  if (Logger::is_logging()
+      || OProfileAgent::is_enabled()
+      || CpuProfiler::is_profiling()) {
     Handle<String> func_name(name->length() > 0 ? *name : *inferred_name);
     if (script->name()->IsString()) {
       int line_num = GetScriptLineNumber(script, start_position) + 1;
-      LOG(CodeCreateEvent(tag, *code, *func_name,
-                          String::cast(script->name()), line_num));
-      OProfileAgent::CreateNativeCodeRegion(*func_name,
-                                            String::cast(script->name()),
-                                            line_num,
-                                            code->instruction_start(),
-                                            code->instruction_size());
+      USE(line_num);
+      PROFILE(CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
+                              *code, *func_name,
+                              String::cast(script->name()), line_num));
+      OPROFILE(CreateNativeCodeRegion(*func_name,
+                                      String::cast(script->name()),
+                                      line_num,
+                                      code->instruction_start(),
+                                      code->instruction_size()));
     } else {
-      LOG(CodeCreateEvent(tag, *code, *func_name));
-      OProfileAgent::CreateNativeCodeRegion(*func_name,
-                                            code->instruction_start(),
-                                            code->instruction_size());
+      PROFILE(CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
+                              *code, *func_name));
+      OPROFILE(CreateNativeCodeRegion(*func_name,
+                                      code->instruction_start(),
+                                      code->instruction_size()));
     }
   }
 }
-#endif
 
 } }  // namespace v8::internal
diff --git a/src/compiler.h b/src/compiler.h
index f972ac9..ade21f5 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -138,10 +138,7 @@
   // There should always be a function literal, but it may be set after
   // construction (for lazy compilation).
   FunctionLiteral* function() { return function_; }
-  void set_function(FunctionLiteral* literal) {
-    ASSERT(function_ == NULL);
-    function_ = literal;
-  }
+  void set_function(FunctionLiteral* literal) { function_ = literal; }
 
   // Simple accessors.
   bool is_eval() { return is_eval_; }
@@ -219,9 +216,9 @@
 // functions, they will be compiled and allocated as part of the compilation
 // of the source code.
 
-// Please note this interface returns function boilerplates.
-// This means you need to call Factory::NewFunctionFromBoilerplate
-// before you have a real function with context.
+// Please note this interface returns shared function infos.
+// This means you need to call Factory::NewFunctionFromSharedFunctionInfo
+// before you have a real function with a context.
 
 class Compiler : public AllStatic {
  public:
@@ -232,51 +229,56 @@
   // the return handle contains NULL.
 
   // Compile a String source within a context.
-  static Handle<JSFunction> Compile(Handle<String> source,
-                                    Handle<Object> script_name,
-                                    int line_offset, int column_offset,
-                                    v8::Extension* extension,
-                                    ScriptDataImpl* pre_data,
-                                    Handle<Object> script_data,
-                                    NativesFlag is_natives_code);
+  static Handle<SharedFunctionInfo> Compile(Handle<String> source,
+                                            Handle<Object> script_name,
+                                            int line_offset,
+                                            int column_offset,
+                                            v8::Extension* extension,
+                                            ScriptDataImpl* pre_data,
+                                            Handle<Object> script_data,
+                                            NativesFlag is_natives_code);
 
   // Compile a String source within a context for Eval.
-  static Handle<JSFunction> CompileEval(Handle<String> source,
-                                        Handle<Context> context,
-                                        bool is_global,
-                                        ValidationState validation);
+  static Handle<SharedFunctionInfo> CompileEval(Handle<String> source,
+                                                Handle<Context> context,
+                                                bool is_global,
+                                                ValidationState validation);
 
   // Compile from function info (used for lazy compilation). Returns
   // true on success and false if the compilation resulted in a stack
   // overflow.
   static bool CompileLazy(CompilationInfo* info);
 
-  // Compile a function boilerplate object (the function is possibly
+  // Compile a shared function info object (the function is possibly
   // lazily compiled). Called recursively from a backend code
-  // generator 'caller' to build the boilerplate.
-  static Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node,
-                                             Handle<Script> script,
-                                             AstVisitor* caller);
+  // generator 'caller' to build the shared function info.
+  static Handle<SharedFunctionInfo> BuildFunctionInfo(FunctionLiteral* node,
+                                                      Handle<Script> script,
+                                                      AstVisitor* caller);
 
   // Set the function info for a newly compiled function.
-  static void SetFunctionInfo(Handle<JSFunction> fun,
+  static void SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
                               FunctionLiteral* lit,
                               bool is_toplevel,
                               Handle<Script> script);
 
  private:
-
-#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
-  static void LogCodeCreateEvent(Logger::LogEventsAndTags tag,
-                                 Handle<String> name,
-                                 Handle<String> inferred_name,
-                                 int start_position,
-                                 Handle<Script> script,
-                                 Handle<Code> code);
-#endif
+  static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
+                                        Handle<String> name,
+                                        Handle<String> inferred_name,
+                                        int start_position,
+                                        Handle<Script> script,
+                                        Handle<Code> code);
 };
 
 
+#ifdef ENABLE_DEBUGGER_SUPPORT
+
+Handle<Code> MakeCodeForLiveEdit(CompilationInfo* info);
+
+#endif
+
+
 // During compilation we need a global list of handles to constants
 // for frame elements.  When the zone gets deleted, we make sure to
 // clear this list of handles as well.
diff --git a/src/contexts.h b/src/contexts.h
index 2453db7..01bb21b 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -50,12 +50,6 @@
 // must always be allocated via Heap::AllocateContext() or
 // Factory::NewContext.
 
-// Comment for special_function_table:
-// Table for providing optimized/specialized functions.
-// The array contains triplets [object, general_function, optimized_function].
-// Primarily added to support built-in optimized variants of
-// Array.prototype.{push,pop}.
-
 #define GLOBAL_CONTEXT_FIELDS(V) \
   V(GLOBAL_PROXY_INDEX, JSObject, global_proxy_object) \
   V(SECURITY_TOKEN_INDEX, Object, security_token) \
@@ -80,20 +74,23 @@
   V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \
   V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \
   V(FUNCTION_MAP_INDEX, Map, function_map) \
+  V(FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, function_without_prototype_map) \
   V(FUNCTION_INSTANCE_MAP_INDEX, Map, function_instance_map) \
   V(JS_ARRAY_MAP_INDEX, Map, js_array_map)\
-  V(SPECIAL_FUNCTION_TABLE_INDEX, FixedArray, special_function_table) \
+  V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map)\
   V(ARGUMENTS_BOILERPLATE_INDEX, JSObject, arguments_boilerplate) \
   V(MESSAGE_LISTENERS_INDEX, JSObject, message_listeners) \
   V(MAKE_MESSAGE_FUN_INDEX, JSFunction, make_message_fun) \
   V(GET_STACK_TRACE_LINE_INDEX, JSFunction, get_stack_trace_line_fun) \
   V(CONFIGURE_GLOBAL_INDEX, JSFunction, configure_global_fun) \
   V(FUNCTION_CACHE_INDEX, JSObject, function_cache) \
+  V(JSFUNCTION_RESULT_CACHES_INDEX, FixedArray, jsfunction_result_caches) \
   V(RUNTIME_CONTEXT_INDEX, Context, runtime_context) \
   V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \
   V(CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, JSFunction, \
     call_as_constructor_delegate) \
   V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
+  V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \
   V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
   V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \
   V(MAP_CACHE_INDEX, Object, map_cache) \
@@ -181,7 +178,9 @@
     SECURITY_TOKEN_INDEX,
     ARGUMENTS_BOILERPLATE_INDEX,
     JS_ARRAY_MAP_INDEX,
+    REGEXP_RESULT_MAP_INDEX,
     FUNCTION_MAP_INDEX,
+    FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
     FUNCTION_INSTANCE_MAP_INDEX,
     INITIAL_OBJECT_PROTOTYPE_INDEX,
     BOOLEAN_FUNCTION_INDEX,
@@ -204,16 +203,17 @@
     GLOBAL_EVAL_FUN_INDEX,
     INSTANTIATE_FUN_INDEX,
     CONFIGURE_INSTANCE_FUN_INDEX,
-    SPECIAL_FUNCTION_TABLE_INDEX,
     MESSAGE_LISTENERS_INDEX,
     MAKE_MESSAGE_FUN_INDEX,
     GET_STACK_TRACE_LINE_INDEX,
     CONFIGURE_GLOBAL_INDEX,
     FUNCTION_CACHE_INDEX,
+    JSFUNCTION_RESULT_CACHES_INDEX,
     RUNTIME_CONTEXT_INDEX,
     CALL_AS_FUNCTION_DELEGATE_INDEX,
     CALL_AS_CONSTRUCTOR_DELEGATE_INDEX,
     SCRIPT_FUNCTION_INDEX,
+    OPAQUE_REFERENCE_FUNCTION_INDEX,
     CONTEXT_EXTENSION_FUNCTION_INDEX,
     OUT_OF_MEMORY_INDEX,
     MAP_CACHE_INDEX,
diff --git a/src/conversions-inl.h b/src/conversions-inl.h
index ba7220a..bf02947 100644
--- a/src/conversions-inl.h
+++ b/src/conversions-inl.h
@@ -41,21 +41,36 @@
 namespace v8 {
 namespace internal {
 
-// The fast double-to-int conversion routine does not guarantee
-// rounding towards zero.
-static inline int FastD2I(double x) {
-#ifdef __USE_ISOC99
-  // The ISO C99 standard defines the lrint() function which rounds a
-  // double to an integer according to the current rounding direction.
-  return lrint(x);
+// The fast double-to-unsigned-int conversion routine does not guarantee
+// rounding towards zero, or any reasonable value if the argument is larger
+// than what fits in an unsigned 32-bit integer.
+static inline unsigned int FastD2UI(double x) {
+  // There is no unsigned version of lrint, so there is no fast path
+  // in this function as there is in FastD2I. Using lrint doesn't work
+  // for values of 2^31 and above.
+
+  // Convert "small enough" doubles to uint32_t by fixing the 32
+  // least significant non-fractional bits in the low 32 bits of the
+  // double, and reading them from there.
+  const double k2Pow52 = 4503599627370496.0;
+  bool negative = x < 0;
+  if (negative) {
+    x = -x;
+  }
+  if (x < k2Pow52) {
+    x += k2Pow52;
+    uint32_t result;
+#ifdef BIG_ENDIAN_FLOATING_POINT
+    Address mantissa_ptr = reinterpret_cast<Address>(&x) + kIntSize;
 #else
-  // This is incredibly slow on Intel x86. The reason is that rounding
-  // towards zero is implied by the C standard. This means that the
-  // status register of the FPU has to be changed with the 'fldcw'
-  // instruction. This completely stalls the pipeline and takes many
-  // hundreds of clock cycles.
-  return static_cast<int>(x);
+    Address mantissa_ptr = reinterpret_cast<Address>(&x);
 #endif
+    // Copy least significant 32 bits of mantissa.
+    memcpy(&result, mantissa_ptr, sizeof(result));
+    return negative ? ~result + 1 : result;
+  }
+  // Large number (outside uint32 range), Infinity or NaN.
+  return 0x80000000u;  // Return integer indefinite.
 }
 
 
diff --git a/src/conversions.cc b/src/conversions.cc
index fd6d38d..66faae8 100644
--- a/src/conversions.cc
+++ b/src/conversions.cc
@@ -26,11 +26,13 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include <stdarg.h>
+#include <limits.h>
 
 #include "v8.h"
 
 #include "conversions-inl.h"
 #include "factory.h"
+#include "fast-dtoa.h"
 #include "scanner.h"
 
 namespace v8 {
@@ -46,134 +48,320 @@
   return -1;
 }
 
+namespace {
 
-// Provide a common interface to getting a character at a certain
-// index from a char* or a String object.
-static inline int GetChar(const char* str, int index) {
-  ASSERT(index >= 0 && index < StrLength(str));
-  return str[index];
+// C++-style iterator adaptor for StringInputBuffer
+// (unlike C++ iterators the end-marker has different type).
+class StringInputBufferIterator {
+ public:
+  class EndMarker {};
+
+  explicit StringInputBufferIterator(StringInputBuffer* buffer);
+
+  int operator*() const;
+  void operator++();
+  bool operator==(EndMarker const&) const { return end_; }
+  bool operator!=(EndMarker const& m) const { return !end_; }
+
+ private:
+  StringInputBuffer* const buffer_;
+  int current_;
+  bool end_;
+};
+
+
+StringInputBufferIterator::StringInputBufferIterator(
+    StringInputBuffer* buffer) : buffer_(buffer) {
+  ++(*this);
+}
+
+int StringInputBufferIterator::operator*() const {
+  return current_;
 }
 
 
-static inline int GetChar(String* str, int index) {
-  return str->Get(index);
-}
-
-
-static inline int GetLength(const char* str) {
-  return StrLength(str);
-}
-
-
-static inline int GetLength(String* str) {
-  return str->length();
-}
-
-
-static inline const char* GetCString(const char* str, int index) {
-  return str + index;
-}
-
-
-static inline const char* GetCString(String* str, int index) {
-  int length = str->length();
-  char* result = NewArray<char>(length + 1);
-  for (int i = index; i < length; i++) {
-    uc16 c = str->Get(i);
-    if (c <= 127) {
-      result[i - index] = static_cast<char>(c);
-    } else {
-      result[i - index] = 127;  // Force number parsing to fail.
-    }
+void StringInputBufferIterator::operator++() {
+  end_ = !buffer_->has_more();
+  if (!end_) {
+    current_ = buffer_->GetNext();
   }
-  result[length - index] = '\0';
-  return result;
+}
 }
 
 
-static inline void ReleaseCString(const char* original, const char* str) {
-}
-
-
-static inline void ReleaseCString(String* original, const char* str) {
-  DeleteArray(const_cast<char *>(str));
-}
-
-
-static inline bool IsSpace(const char* str, int index) {
-  ASSERT(index >= 0 && index < StrLength(str));
-  return Scanner::kIsWhiteSpace.get(str[index]);
-}
-
-
-static inline bool IsSpace(String* str, int index) {
-  return Scanner::kIsWhiteSpace.get(str->Get(index));
-}
-
-
-static inline bool SubStringEquals(const char* str,
-                                   int index,
-                                   const char* other) {
-  return strncmp(str + index, other, strlen(other)) != 0;
-}
-
-
-static inline bool SubStringEquals(String* str, int index, const char* other) {
-  HandleScope scope;
-  int str_length = str->length();
-  int other_length = StrLength(other);
-  int end = index + other_length < str_length ?
-            index + other_length :
-            str_length;
-  Handle<String> substring =
-      Factory::NewSubString(Handle<String>(str), index, end);
-  return substring->IsEqualTo(Vector<const char>(other, other_length));
-}
-
-
-// Check if a string should be parsed as an octal number.  The string
-// can be either a char* or a String*.
-template<class S>
-static bool ShouldParseOctal(S* s, int i) {
-  int index = i;
-  int len = GetLength(s);
-  if (index < len && GetChar(s, index) != '0') return false;
-
-  // If the first real character (following '0') is not an octal
-  // digit, bail out early. This also takes care of numbers of the
-  // forms 0.xxx and 0exxx by not allowing the first 0 to be
-  // interpreted as an octal.
-  index++;
-  if (index < len) {
-    int d = GetChar(s, index) - '0';
-    if (d < 0 || d > 7) return false;
-  } else {
-    return false;
+template <class Iterator, class EndMark>
+static bool SubStringEquals(Iterator* current,
+                            EndMark end,
+                            const char* substring) {
+  ASSERT(**current == *substring);
+  for (substring++; *substring != '\0'; substring++) {
+    ++*current;
+    if (*current == end || **current != *substring) return false;
   }
-
-  // Traverse all digits (including the first). If there is an octal
-  // prefix which is not a part of a longer decimal prefix, we return
-  // true. Otherwise, false is returned.
-  while (index < len) {
-    int d = GetChar(s, index++) - '0';
-    if (d == 8 || d == 9) return false;
-    if (d <  0 || d >  7) return true;
-  }
+  ++*current;
   return true;
 }
 
 
 extern "C" double gay_strtod(const char* s00, const char** se);
 
+// Maximum number of significant digits in decimal representation.
+// The longest possible double in decimal representation is
+// (2^53 - 1) * 2 ^ -1074 that is (2 ^ 53 - 1) * 5 ^ 1074 / 10 ^ 1074
+// (768 digits). If we parse a number whose first digits are equal to a
+// mean of 2 adjacent doubles (that could have up to 769 digits) the result
+// must be rounded to the bigger one unless the tail consists of zeros, so
+// we don't need to preserve all the digits.
+const int kMaxSignificantDigits = 772;
 
-// Parse an int from a string starting a given index and in a given
-// radix.  The string can be either a char* or a String*.
-template <class S>
-static int InternalStringToInt(S* s, int i, int radix, double* value) {
-  int len = GetLength(s);
 
-  // Setup limits for computing the value.
-  ASSERT(2 <= radix && radix <= 36);
+static const double JUNK_STRING_VALUE = OS::nan_value();
+
+
+// Returns true if a nonspace found and false if the end has reached.
+template <class Iterator, class EndMark>
+static inline bool AdvanceToNonspace(Iterator* current, EndMark end) {
+  while (*current != end) {
+    if (!Scanner::kIsWhiteSpace.get(**current)) return true;
+    ++*current;
+  }
+  return false;
+}
+
+
+static bool isDigit(int x, int radix) {
+  return (x >= '0' && x <= '9' && x < '0' + radix)
+      || (radix > 10 && x >= 'a' && x < 'a' + radix - 10)
+      || (radix > 10 && x >= 'A' && x < 'A' + radix - 10);
+}
+
+
+static double SignedZero(bool sign) {
+  return sign ? -0.0 : 0.0;
+}
+
+
+// Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end.
+template <int radix_log_2, class Iterator, class EndMark>
+static double InternalStringToIntDouble(Iterator current,
+                                        EndMark end,
+                                        bool sign,
+                                        bool allow_trailing_junk) {
+  ASSERT(current != end);
+
+  // Skip leading 0s.
+  while (*current == '0') {
+    ++current;
+    if (current == end) return SignedZero(sign);
+  }
+
+  int64_t number = 0;
+  int exponent = 0;
+  const int radix = (1 << radix_log_2);
+
+  do {
+    int digit;
+    if (*current >= '0' && *current <= '9' && *current < '0' + radix) {
+      digit = static_cast<char>(*current) - '0';
+    } else if (radix > 10 && *current >= 'a' && *current < 'a' + radix - 10) {
+      digit = static_cast<char>(*current) - 'a' + 10;
+    } else if (radix > 10 && *current >= 'A' && *current < 'A' + radix - 10) {
+      digit = static_cast<char>(*current) - 'A' + 10;
+    } else {
+      if (allow_trailing_junk || !AdvanceToNonspace(&current, end)) {
+        break;
+      } else {
+        return JUNK_STRING_VALUE;
+      }
+    }
+
+    number = number * radix + digit;
+    int overflow = static_cast<int>(number >> 53);
+    if (overflow != 0) {
+      // Overflow occurred. Need to determine which direction to round the
+      // result.
+      int overflow_bits_count = 1;
+      while (overflow > 1) {
+        overflow_bits_count++;
+        overflow >>= 1;
+      }
+
+      int dropped_bits_mask = ((1 << overflow_bits_count) - 1);
+      int dropped_bits = static_cast<int>(number) & dropped_bits_mask;
+      number >>= overflow_bits_count;
+      exponent = overflow_bits_count;
+
+      bool zero_tail = true;
+      while (true) {
+        ++current;
+        if (current == end || !isDigit(*current, radix)) break;
+        zero_tail = zero_tail && *current == '0';
+        exponent += radix_log_2;
+      }
+
+      if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
+        return JUNK_STRING_VALUE;
+      }
+
+      int middle_value = (1 << (overflow_bits_count - 1));
+      if (dropped_bits > middle_value) {
+        number++;  // Rounding up.
+      } else if (dropped_bits == middle_value) {
+        // Rounding to even to consistency with decimals: half-way case rounds
+        // up if significant part is odd and down otherwise.
+        if ((number & 1) != 0 || !zero_tail) {
+          number++;  // Rounding up.
+        }
+      }
+
+      // Rounding up may cause overflow.
+      if ((number & ((int64_t)1 << 53)) != 0) {
+        exponent++;
+        number >>= 1;
+      }
+      break;
+    }
+    ++current;
+  } while (current != end);
+
+  ASSERT(number < ((int64_t)1 << 53));
+  ASSERT(static_cast<int64_t>(static_cast<double>(number)) == number);
+
+  if (exponent == 0) {
+    if (sign) {
+      if (number == 0) return -0.0;
+      number = -number;
+    }
+    return static_cast<double>(number);
+  }
+
+  ASSERT(number != 0);
+  // The double could be constructed faster from number (mantissa), exponent
+  // and sign. Assuming it's a rare case more simple code is used.
+  return static_cast<double>(sign ? -number : number) * pow(2.0, exponent);
+}
+
+
+template <class Iterator, class EndMark>
+static double InternalStringToInt(Iterator current, EndMark end, int radix) {
+  const bool allow_trailing_junk = true;
+  const double empty_string_val = JUNK_STRING_VALUE;
+
+  if (!AdvanceToNonspace(&current, end)) return empty_string_val;
+
+  bool sign = false;
+  bool leading_zero = false;
+
+  if (*current == '+') {
+    // Ignore leading sign; skip following spaces.
+    ++current;
+    if (!AdvanceToNonspace(&current, end)) return JUNK_STRING_VALUE;
+  } else if (*current == '-') {
+    ++current;
+    if (!AdvanceToNonspace(&current, end)) return JUNK_STRING_VALUE;
+    sign = true;
+  }
+
+  if (radix == 0) {
+    // Radix detection.
+    if (*current == '0') {
+      ++current;
+      if (current == end) return SignedZero(sign);
+      if (*current == 'x' || *current == 'X') {
+        radix = 16;
+        ++current;
+        if (current == end) return JUNK_STRING_VALUE;
+      } else {
+        radix = 8;
+        leading_zero = true;
+      }
+    } else {
+      radix = 10;
+    }
+  } else if (radix == 16) {
+    if (*current == '0') {
+      // Allow "0x" prefix.
+      ++current;
+      if (current == end) return SignedZero(sign);
+      if (*current == 'x' || *current == 'X') {
+        ++current;
+        if (current == end) return JUNK_STRING_VALUE;
+      } else {
+        leading_zero = true;
+      }
+    }
+  }
+
+  if (radix < 2 || radix > 36) return JUNK_STRING_VALUE;
+
+  // Skip leading zeros.
+  while (*current == '0') {
+    leading_zero = true;
+    ++current;
+    if (current == end) return SignedZero(sign);
+  }
+
+  if (!leading_zero && !isDigit(*current, radix)) {
+    return JUNK_STRING_VALUE;
+  }
+
+  if (IsPowerOf2(radix)) {
+    switch (radix) {
+      case 2:
+        return InternalStringToIntDouble<1>(
+                   current, end, sign, allow_trailing_junk);
+      case 4:
+        return InternalStringToIntDouble<2>(
+                   current, end, sign, allow_trailing_junk);
+      case 8:
+        return InternalStringToIntDouble<3>(
+                   current, end, sign, allow_trailing_junk);
+
+      case 16:
+        return InternalStringToIntDouble<4>(
+                   current, end, sign, allow_trailing_junk);
+
+      case 32:
+        return InternalStringToIntDouble<5>(
+                   current, end, sign, allow_trailing_junk);
+      default:
+        UNREACHABLE();
+    }
+  }
+
+  if (radix == 10) {
+    // Parsing with strtod.
+    const int kMaxSignificantDigits = 309;  // Doubles are less than 1.8e308.
+    // The buffer may contain up to kMaxSignificantDigits + 1 digits and a zero
+    // end.
+    const int kBufferSize = kMaxSignificantDigits + 2;
+    char buffer[kBufferSize];
+    int buffer_pos = 0;
+    while (*current >= '0' && *current <= '9') {
+      if (buffer_pos <= kMaxSignificantDigits) {
+        // If the number has more than kMaxSignificantDigits it will be parsed
+        // as infinity.
+        ASSERT(buffer_pos < kBufferSize);
+        buffer[buffer_pos++] = static_cast<char>(*current);
+      }
+      ++current;
+      if (current == end) break;
+    }
+
+    if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
+      return JUNK_STRING_VALUE;
+    }
+
+    ASSERT(buffer_pos < kBufferSize);
+    buffer[buffer_pos++] = '\0';
+    return sign ? -gay_strtod(buffer, NULL) : gay_strtod(buffer, NULL);
+  }
+
+  // The following code causes accumulating rounding error for numbers greater
+  // than ~2^56. It's explicitly allowed in the spec: "if R is not 2, 4, 8, 10,
+  // 16, or 32, then mathInt may be an implementation-dependent approximation to
+  // the mathematical integer value" (15.1.2.2).
+
   int lim_0 = '0' + (radix < 10 ? radix : 10);
   int lim_a = 'a' + (radix - 10);
   int lim_A = 'A' + (radix - 10);
@@ -183,22 +371,22 @@
   // loops as long as possible to avoid loosing precision.
 
   double v = 0.0;
-  int j;
-  for (j = i; j < len;) {
+  bool done = false;
+  do {
     // Parse the longest part of the string starting at index j
     // possible while keeping the multiplier, and thus the part
     // itself, within 32 bits.
-    uint32_t part = 0, multiplier = 1;
-    int k;
-    for (k = j; k < len; k++) {
-      int c = GetChar(s, k);
-      if (c >= '0' && c < lim_0) {
-        c = c - '0';
-      } else if (c >= 'a' && c < lim_a) {
-        c = c - 'a' + 10;
-      } else if (c >= 'A' && c < lim_A) {
-        c = c - 'A' + 10;
+    unsigned int part = 0, multiplier = 1;
+    while (true) {
+      int d;
+      if (*current >= '0' && *current < lim_0) {
+        d = *current - '0';
+      } else if (*current >= 'a' && *current < lim_a) {
+        d = *current - 'a' + 10;
+      } else if (*current >= 'A' && *current < lim_A) {
+        d = *current - 'A' + 10;
       } else {
+        done = true;
         break;
       }
 
@@ -206,150 +394,347 @@
       // in 32 bits. When we can't guarantee that the next iteration
       // will not overflow the multiplier, we stop parsing the part
       // by leaving the loop.
-      static const uint32_t kMaximumMultiplier = 0xffffffffU / 36;
+      const unsigned int kMaximumMultiplier = 0xffffffffU / 36;
       uint32_t m = multiplier * radix;
       if (m > kMaximumMultiplier) break;
-      part = part * radix + c;
+      part = part * radix + d;
       multiplier = m;
       ASSERT(multiplier > part);
+
+      ++current;
+      if (current == end) {
+        done = true;
+        break;
+      }
     }
 
-    // Compute the number of part digits. If no digits were parsed;
-    // we're done parsing the entire string.
-    int digits = k - j;
-    if (digits == 0) break;
-
     // Update the value and skip the part in the string.
-    ASSERT(multiplier ==
-           pow(static_cast<double>(radix), static_cast<double>(digits)));
     v = v * multiplier + part;
-    j = k;
-  }
+  } while (!done);
 
-  // If the resulting value is larger than 2^53 the value does not fit
-  // in the mantissa of the double and there is a loss of precision.
-  // When the value is larger than 2^53 the rounding depends on the
-  // code generation.  If the code generator spills the double value
-  // it uses 64 bits and if it does not it uses 80 bits.
-  //
-  // If there is a potential for overflow we resort to strtod for
-  // radix 10 numbers to get higher precision.  For numbers in another
-  // radix we live with the loss of precision.
-  static const double kPreciseConversionLimit = 9007199254740992.0;
-  if (radix == 10 && v > kPreciseConversionLimit) {
-    const char* cstr = GetCString(s, i);
-    const char* end;
-    v = gay_strtod(cstr, &end);
-    ReleaseCString(s, cstr);
-  }
-
-  *value = v;
-  return j;
-}
-
-
-int StringToInt(String* str, int index, int radix, double* value) {
-  return InternalStringToInt(str, index, radix, value);
-}
-
-
-int StringToInt(const char* str, int index, int radix, double* value) {
-  return InternalStringToInt(const_cast<char*>(str), index, radix, value);
-}
-
-
-static const double JUNK_STRING_VALUE = OS::nan_value();
-
-
-// Convert a string to a double value.  The string can be either a
-// char* or a String*.
-template<class S>
-static double InternalStringToDouble(S* str,
-                                     int flags,
-                                     double empty_string_val) {
-  double result = 0.0;
-  int index = 0;
-
-  int len = GetLength(str);
-
-  // Skip leading spaces.
-  while ((index < len) && IsSpace(str, index)) index++;
-
-  // Is the string empty?
-  if (index >= len) return empty_string_val;
-
-  // Get the first character.
-  uint16_t first = GetChar(str, index);
-
-  // Numbers can only start with '-', '+', '.', 'I' (Infinity), or a digit.
-  if (first != '-' && first != '+' && first != '.' && first != 'I' &&
-      (first > '9' || first < '0')) {
+  if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
     return JUNK_STRING_VALUE;
   }
 
-  // Compute sign of result based on first character.
-  int sign = 1;
-  if (first == '-') {
-    sign = -1;
-    index++;
-    // String only containing a '-' are junk chars.
-    if (index == len) return JUNK_STRING_VALUE;
-  }
-
-  // do we have a hex number?
-  // (since the string is 0-terminated, it's ok to look one char beyond the end)
-  if ((flags & ALLOW_HEX) != 0 &&
-      (index + 1) < len &&
-      GetChar(str, index) == '0' &&
-      (GetChar(str, index + 1) == 'x' || GetChar(str, index + 1) == 'X')) {
-    index += 2;
-    index = StringToInt(str, index, 16, &result);
-  } else if ((flags & ALLOW_OCTALS) != 0 && ShouldParseOctal(str, index)) {
-    // NOTE: We optimistically try to parse the number as an octal (if
-    // we're allowed to), even though this is not as dictated by
-    // ECMA-262. The reason for doing this is compatibility with IE and
-    // Firefox.
-    index = StringToInt(str, index, 8, &result);
-  } else {
-    const char* cstr = GetCString(str, index);
-    const char* end;
-    // Optimistically parse the number and then, if that fails,
-    // check if it might have been {+,-,}Infinity.
-    result = gay_strtod(cstr, &end);
-    ReleaseCString(str, cstr);
-    if (result != 0.0 || end != cstr) {
-      // It appears that strtod worked
-      index += static_cast<int>(end - cstr);
-    } else {
-      // Check for {+,-,}Infinity
-      bool is_negative = (GetChar(str, index) == '-');
-      if (GetChar(str, index) == '+' || GetChar(str, index) == '-')
-        index++;
-      if (!SubStringEquals(str, index, "Infinity"))
-        return JUNK_STRING_VALUE;
-      result = is_negative ? -V8_INFINITY : V8_INFINITY;
-      index += 8;
-    }
-  }
-
-  if ((flags & ALLOW_TRAILING_JUNK) == 0) {
-    // skip trailing spaces
-    while ((index < len) && IsSpace(str, index)) index++;
-    // string ending with junk?
-    if (index < len) return JUNK_STRING_VALUE;
-  }
-
-  return sign * result;
+  return sign ? -v : v;
 }
 
 
+// Converts a string to a double value. Assumes the Iterator supports
+// the following operations:
+// 1. current == end (other ops are not allowed), current != end.
+// 2. *current - gets the current character in the sequence.
+// 3. ++current (advances the position).
+template <class Iterator, class EndMark>
+static double InternalStringToDouble(Iterator current,
+                                     EndMark end,
+                                     int flags,
+                                     double empty_string_val) {
+  // To make sure that iterator dereferencing is valid the following
+  // convention is used:
+  // 1. Each '++current' statement is followed by check for equality to 'end'.
+  // 2. If AdvanceToNonspace returned false then current == end.
+  // 3. If 'current' becomes be equal to 'end' the function returns or goes to
+  // 'parsing_done'.
+  // 4. 'current' is not dereferenced after the 'parsing_done' label.
+  // 5. Code before 'parsing_done' may rely on 'current != end'.
+  if (!AdvanceToNonspace(&current, end)) return empty_string_val;
+
+  const bool allow_trailing_junk = (flags & ALLOW_TRAILING_JUNK) != 0;
+
+  // The longest form of simplified number is: "-<significant digits>'.1eXXX\0".
+  const int kBufferSize = kMaxSignificantDigits + 10;
+  char buffer[kBufferSize];  // NOLINT: size is known at compile time.
+  int buffer_pos = 0;
+
+  // Exponent will be adjusted if insignificant digits of the integer part
+  // or insignificant leading zeros of the fractional part are dropped.
+  int exponent = 0;
+  int significant_digits = 0;
+  int insignificant_digits = 0;
+  bool nonzero_digit_dropped = false;
+  bool fractional_part = false;
+
+  bool sign = false;
+
+  if (*current == '+') {
+    // Ignore leading sign; skip following spaces.
+    ++current;
+    if (!AdvanceToNonspace(&current, end)) return JUNK_STRING_VALUE;
+  } else if (*current == '-') {
+    buffer[buffer_pos++] = '-';
+    ++current;
+    if (!AdvanceToNonspace(&current, end)) return JUNK_STRING_VALUE;
+    sign = true;
+  }
+
+  static const char kInfinitySymbol[] = "Infinity";
+  if (*current == kInfinitySymbol[0]) {
+    if (!SubStringEquals(&current, end, kInfinitySymbol)) {
+      return JUNK_STRING_VALUE;
+    }
+
+    if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
+      return JUNK_STRING_VALUE;
+    }
+
+    ASSERT(buffer_pos == 0 || buffer[0] == '-');
+    return buffer_pos > 0 ? -V8_INFINITY : V8_INFINITY;
+  }
+
+  bool leading_zero = false;
+  if (*current == '0') {
+    ++current;
+    if (current == end) return SignedZero(sign);
+
+    leading_zero = true;
+
+    // It could be hexadecimal value.
+    if ((flags & ALLOW_HEX) && (*current == 'x' || *current == 'X')) {
+      ++current;
+      if (current == end || !isDigit(*current, 16)) {
+        return JUNK_STRING_VALUE;  // "0x".
+      }
+
+      bool sign = (buffer_pos > 0 && buffer[0] == '-');
+      return InternalStringToIntDouble<4>(current,
+                                          end,
+                                          sign,
+                                          allow_trailing_junk);
+    }
+
+    // Ignore leading zeros in the integer part.
+    while (*current == '0') {
+      ++current;
+      if (current == end) return SignedZero(sign);
+    }
+  }
+
+  bool octal = leading_zero && (flags & ALLOW_OCTALS) != 0;
+
+  // Copy significant digits of the integer part (if any) to the buffer.
+  while (*current >= '0' && *current <= '9') {
+    if (significant_digits < kMaxSignificantDigits) {
+      ASSERT(buffer_pos < kBufferSize);
+      buffer[buffer_pos++] = static_cast<char>(*current);
+      significant_digits++;
+      // Will later check if it's an octal in the buffer.
+    } else {
+      insignificant_digits++;  // Move the digit into the exponential part.
+      nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
+    }
+    octal = octal && *current < '8';
+    ++current;
+    if (current == end) goto parsing_done;
+  }
+
+  if (significant_digits == 0) {
+    octal = false;
+  }
+
+  if (*current == '.') {
+    ++current;
+    if (current == end) {
+      if (significant_digits == 0 && !leading_zero) {
+        return JUNK_STRING_VALUE;
+      } else {
+        goto parsing_done;
+      }
+    }
+
+    if (significant_digits == 0) {
+      // octal = false;
+      // Integer part consists of 0 or is absent. Significant digits start after
+      // leading zeros (if any).
+      while (*current == '0') {
+        ++current;
+        if (current == end) return SignedZero(sign);
+        exponent--;  // Move this 0 into the exponent.
+      }
+    }
+
+    ASSERT(buffer_pos < kBufferSize);
+    buffer[buffer_pos++] = '.';
+    fractional_part = true;
+
+    // There is the fractional part.
+    while (*current >= '0' && *current <= '9') {
+      if (significant_digits < kMaxSignificantDigits) {
+        ASSERT(buffer_pos < kBufferSize);
+        buffer[buffer_pos++] = static_cast<char>(*current);
+        significant_digits++;
+      } else {
+        // Ignore insignificant digits in the fractional part.
+        nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
+      }
+      ++current;
+      if (current == end) goto parsing_done;
+    }
+  }
+
+  if (!leading_zero && exponent == 0 && significant_digits == 0) {
+    // If leading_zeros is true then the string contains zeros.
+    // If exponent < 0 then string was [+-]\.0*...
+    // If significant_digits != 0 the string is not equal to 0.
+    // Otherwise there are no digits in the string.
+    return JUNK_STRING_VALUE;
+  }
+
+  // Parse exponential part.
+  if (*current == 'e' || *current == 'E') {
+    if (octal) return JUNK_STRING_VALUE;
+    ++current;
+    if (current == end) {
+      if (allow_trailing_junk) {
+        goto parsing_done;
+      } else {
+        return JUNK_STRING_VALUE;
+      }
+    }
+    char sign = '+';
+    if (*current == '+' || *current == '-') {
+      sign = static_cast<char>(*current);
+      ++current;
+      if (current == end) {
+        if (allow_trailing_junk) {
+          goto parsing_done;
+        } else {
+          return JUNK_STRING_VALUE;
+        }
+      }
+    }
+
+    if (current == end || *current < '0' || *current > '9') {
+      if (allow_trailing_junk) {
+        goto parsing_done;
+      } else {
+        return JUNK_STRING_VALUE;
+      }
+    }
+
+    const int max_exponent = INT_MAX / 2;
+    ASSERT(-max_exponent / 2 <= exponent && exponent <= max_exponent / 2);
+    int num = 0;
+    do {
+      // Check overflow.
+      int digit = *current - '0';
+      if (num >= max_exponent / 10
+          && !(num == max_exponent / 10 && digit <= max_exponent % 10)) {
+        num = max_exponent;
+      } else {
+        num = num * 10 + digit;
+      }
+      ++current;
+    } while (current != end && *current >= '0' && *current <= '9');
+
+    exponent += (sign == '-' ? -num : num);
+  }
+
+  if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
+    return JUNK_STRING_VALUE;
+  }
+
+  parsing_done:
+  exponent += insignificant_digits;
+
+  if (octal) {
+    bool sign = buffer[0] == '-';
+    int start_pos = (sign ? 1 : 0);
+
+    return InternalStringToIntDouble<3>(buffer + start_pos,
+                                        buffer + buffer_pos,
+                                        sign,
+                                        allow_trailing_junk);
+  }
+
+  if (nonzero_digit_dropped) {
+    if (insignificant_digits) buffer[buffer_pos++] = '.';
+    buffer[buffer_pos++] = '1';
+  }
+
+  // If the number has no more than kMaxDigitsInInt digits and doesn't have
+  // fractional part it could be parsed faster (without checks for
+  // spaces, overflow, etc.).
+  const int kMaxDigitsInInt = 9 * sizeof(int) / 4;  // NOLINT
+
+  if (exponent != 0) {
+    ASSERT(buffer_pos < kBufferSize);
+    buffer[buffer_pos++] = 'e';
+    if (exponent < 0) {
+      ASSERT(buffer_pos < kBufferSize);
+      buffer[buffer_pos++] = '-';
+      exponent = -exponent;
+    }
+    if (exponent > 999) exponent = 999;  // Result will be Infinity or 0 or -0.
+
+    const int exp_digits = 3;
+    for (int i = 0; i < exp_digits; i++) {
+      buffer[buffer_pos + exp_digits - 1 - i] = '0' + exponent % 10;
+      exponent /= 10;
+    }
+    ASSERT(exponent == 0);
+    buffer_pos += exp_digits;
+  } else if (!fractional_part && significant_digits <= kMaxDigitsInInt) {
+    if (significant_digits == 0) return SignedZero(sign);
+    ASSERT(buffer_pos > 0);
+    int num = 0;
+    int start_pos = (buffer[0] == '-' ? 1 : 0);
+    for (int i = start_pos; i < buffer_pos; i++) {
+      ASSERT(buffer[i] >= '0' && buffer[i] <= '9');
+      num = 10 * num + (buffer[i] - '0');
+    }
+    return static_cast<double>(start_pos == 0 ? num : -num);
+  }
+
+  ASSERT(buffer_pos < kBufferSize);
+  buffer[buffer_pos] = '\0';
+
+  return gay_strtod(buffer, NULL);
+}
+
 double StringToDouble(String* str, int flags, double empty_string_val) {
-  return InternalStringToDouble(str, flags, empty_string_val);
+  StringShape shape(str);
+  if (shape.IsSequentialAscii()) {
+    const char* begin = SeqAsciiString::cast(str)->GetChars();
+    const char* end = begin + str->length();
+    return InternalStringToDouble(begin, end, flags, empty_string_val);
+  } else if (shape.IsSequentialTwoByte()) {
+    const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
+    const uc16* end = begin + str->length();
+    return InternalStringToDouble(begin, end, flags, empty_string_val);
+  } else {
+    StringInputBuffer buffer(str);
+    return InternalStringToDouble(StringInputBufferIterator(&buffer),
+                                  StringInputBufferIterator::EndMarker(),
+                                  flags,
+                                  empty_string_val);
+  }
+}
+
+
+double StringToInt(String* str, int radix) {
+  StringShape shape(str);
+  if (shape.IsSequentialAscii()) {
+    const char* begin = SeqAsciiString::cast(str)->GetChars();
+    const char* end = begin + str->length();
+    return InternalStringToInt(begin, end, radix);
+  } else if (shape.IsSequentialTwoByte()) {
+    const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
+    const uc16* end = begin + str->length();
+    return InternalStringToInt(begin, end, radix);
+  } else {
+    StringInputBuffer buffer(str);
+    return InternalStringToInt(StringInputBufferIterator(&buffer),
+                               StringInputBufferIterator::EndMarker(),
+                               radix);
+  }
 }
 
 
 double StringToDouble(const char* str, int flags, double empty_string_val) {
-  return InternalStringToDouble(str, flags, empty_string_val);
+  const char* end = str + StrLength(str);
+
+  return InternalStringToDouble(str, end, flags, empty_string_val);
 }
 
 
@@ -382,8 +767,19 @@
       int decimal_point;
       int sign;
 
-      char* decimal_rep = dtoa(v, 0, 0, &decimal_point, &sign, NULL);
-      int length = StrLength(decimal_rep);
+      char* decimal_rep;
+      bool used_gay_dtoa = false;
+      const int kFastDtoaBufferCapacity = kFastDtoaMaximalLength + 1;
+      char fast_dtoa_buffer[kFastDtoaBufferCapacity];
+      int length;
+      if (FastDtoa(v, Vector<char>(fast_dtoa_buffer, kFastDtoaBufferCapacity),
+                   &sign, &length, &decimal_point)) {
+        decimal_rep = fast_dtoa_buffer;
+      } else {
+        decimal_rep = dtoa(v, 0, 0, &decimal_point, &sign, NULL);
+        used_gay_dtoa = true;
+        length = StrLength(decimal_rep);
+      }
 
       if (sign) builder.AddCharacter('-');
 
@@ -418,7 +814,7 @@
         builder.AddFormatted("%d", exponent);
       }
 
-      freedtoa(decimal_rep);
+      if (used_gay_dtoa) freedtoa(decimal_rep);
     }
   }
   return builder.Finalize();
diff --git a/src/conversions.h b/src/conversions.h
index 67f7d53..c4ceea6 100644
--- a/src/conversions.h
+++ b/src/conversions.h
@@ -32,11 +32,17 @@
 namespace internal {
 
 
-// The fast double-to-int conversion routine does not guarantee
+// The fast double-to-(unsigned-)int conversion routine does not guarantee
 // rounding towards zero.
 // The result is unspecified if x is infinite or NaN, or if the rounded
 // integer value is outside the range of type int.
-static inline int FastD2I(double x);
+static inline int FastD2I(double x) {
+  // The static_cast convertion from double to int used to be slow, but
+  // as new benchmarks show, now it is much faster than lrint().
+  return static_cast<int>(x);
+}
+
+static inline unsigned int FastD2UI(double x);
 
 
 static inline double FastI2D(int x) {
@@ -94,8 +100,7 @@
 double StringToDouble(String* str, int flags, double empty_string_val = 0);
 
 // Converts a string into an integer.
-int StringToInt(String* str, int index, int radix, double* value);
-int StringToInt(const char* str, int index, int radix, double* value);
+double StringToInt(String* str, int radix);
 
 // Converts a double to a string value according to ECMA-262 9.8.1.
 // The buffer should be large enough for any floating point number.
diff --git a/src/counters.h b/src/counters.h
index 5f4dca9..aed46cf 100644
--- a/src/counters.h
+++ b/src/counters.h
@@ -65,7 +65,7 @@
   // may receive a different location to store it's counter.
   // The return value must not be cached and re-used across
   // threads, although a single thread is free to cache it.
-  static int *FindLocation(const char* name) {
+  static int* FindLocation(const char* name) {
     if (!lookup_function_) return NULL;
     return lookup_function_(name);
   }
diff --git a/src/cpu-profiler-inl.h b/src/cpu-profiler-inl.h
new file mode 100644
index 0000000..e454a9a
--- /dev/null
+++ b/src/cpu-profiler-inl.h
@@ -0,0 +1,99 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CPU_PROFILER_INL_H_
+#define V8_CPU_PROFILER_INL_H_
+
+#include "cpu-profiler.h"
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+#include "circular-queue-inl.h"
+#include "profile-generator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+void CodeCreateEventRecord::UpdateCodeMap(CodeMap* code_map) {
+  code_map->AddCode(start, entry, size);
+}
+
+
+void CodeMoveEventRecord::UpdateCodeMap(CodeMap* code_map) {
+  code_map->MoveCode(from, to);
+}
+
+
+void CodeDeleteEventRecord::UpdateCodeMap(CodeMap* code_map) {
+  code_map->DeleteCode(start);
+}
+
+
+void CodeAliasEventRecord::UpdateCodeMap(CodeMap* code_map) {
+  code_map->AddAlias(alias, start);
+}
+
+
+TickSampleEventRecord* TickSampleEventRecord::init(void* value) {
+  TickSampleEventRecord* result =
+      reinterpret_cast<TickSampleEventRecord*>(value);
+  result->filler = 1;
+  ASSERT(result->filler != SamplingCircularQueue::kClear);
+  // Init the required fields only.
+  result->sample.pc = NULL;
+  result->sample.frames_count = 0;
+  return result;
+}
+
+
+TickSample* ProfilerEventsProcessor::TickSampleEvent() {
+  generator_->Tick();
+  TickSampleEventRecord* evt =
+      TickSampleEventRecord::init(ticks_buffer_.Enqueue());
+  evt->order = enqueue_order_;  // No increment!
+  return &evt->sample;
+}
+
+
+bool ProfilerEventsProcessor::FilterOutCodeCreateEvent(
+    Logger::LogEventsAndTags tag) {
+  // In browser mode, leave only callbacks and non-native JS entries.
+  // We filter out regular expressions as currently we can't tell
+  // whether they origin from native scripts, so let's not confise people by
+  // showing them weird regexes they didn't wrote.
+  return FLAG_prof_browser_mode
+      && (tag != Logger::CALLBACK_TAG
+          && tag != Logger::FUNCTION_TAG
+          && tag != Logger::LAZY_COMPILE_TAG
+          && tag != Logger::SCRIPT_TAG);
+}
+
+} }  // namespace v8::internal
+
+#endif  // ENABLE_LOGGING_AND_PROFILING
+
+#endif  // V8_CPU_PROFILER_INL_H_
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
new file mode 100644
index 0000000..ed3f692
--- /dev/null
+++ b/src/cpu-profiler.cc
@@ -0,0 +1,494 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "cpu-profiler-inl.h"
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+#include "log-inl.h"
+
+#include "../include/v8-profiler.h"
+
+namespace v8 {
+namespace internal {
+
+static const int kEventsBufferSize = 256*KB;
+static const int kTickSamplesBufferChunkSize = 64*KB;
+static const int kTickSamplesBufferChunksCount = 16;
+
+
+ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
+    : generator_(generator),
+      running_(false),
+      events_buffer_(kEventsBufferSize),
+      ticks_buffer_(sizeof(TickSampleEventRecord),
+                    kTickSamplesBufferChunkSize,
+                    kTickSamplesBufferChunksCount),
+      enqueue_order_(0) { }
+
+
+void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag,
+                                                  const char* prefix,
+                                                  String* name,
+                                                  Address start) {
+  if (FilterOutCodeCreateEvent(tag)) return;
+  CodeEventsContainer evt_rec;
+  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+  rec->type = CodeEventRecord::CODE_CREATION;
+  rec->order = ++enqueue_order_;
+  rec->start = start;
+  rec->entry = generator_->NewCodeEntry(tag, prefix, name);
+  rec->size = 1;
+  events_buffer_.Enqueue(evt_rec);
+}
+
+
+void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
+                                              String* name,
+                                              String* resource_name,
+                                              int line_number,
+                                              Address start,
+                                              unsigned size) {
+  if (FilterOutCodeCreateEvent(tag)) return;
+  CodeEventsContainer evt_rec;
+  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+  rec->type = CodeEventRecord::CODE_CREATION;
+  rec->order = ++enqueue_order_;
+  rec->start = start;
+  rec->entry = generator_->NewCodeEntry(tag, name, resource_name, line_number);
+  rec->size = size;
+  events_buffer_.Enqueue(evt_rec);
+}
+
+
+void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
+                                              const char* name,
+                                              Address start,
+                                              unsigned size) {
+  if (FilterOutCodeCreateEvent(tag)) return;
+  CodeEventsContainer evt_rec;
+  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+  rec->type = CodeEventRecord::CODE_CREATION;
+  rec->order = ++enqueue_order_;
+  rec->start = start;
+  rec->entry = generator_->NewCodeEntry(tag, name);
+  rec->size = size;
+  events_buffer_.Enqueue(evt_rec);
+}
+
+
+void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
+                                              int args_count,
+                                              Address start,
+                                              unsigned size) {
+  if (FilterOutCodeCreateEvent(tag)) return;
+  CodeEventsContainer evt_rec;
+  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+  rec->type = CodeEventRecord::CODE_CREATION;
+  rec->order = ++enqueue_order_;
+  rec->start = start;
+  rec->entry = generator_->NewCodeEntry(tag, args_count);
+  rec->size = size;
+  events_buffer_.Enqueue(evt_rec);
+}
+
+
+void ProfilerEventsProcessor::CodeMoveEvent(Address from, Address to) {
+  CodeEventsContainer evt_rec;
+  CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
+  rec->type = CodeEventRecord::CODE_MOVE;
+  rec->order = ++enqueue_order_;
+  rec->from = from;
+  rec->to = to;
+  events_buffer_.Enqueue(evt_rec);
+}
+
+
+void ProfilerEventsProcessor::CodeDeleteEvent(Address from) {
+  CodeEventsContainer evt_rec;
+  CodeDeleteEventRecord* rec = &evt_rec.CodeDeleteEventRecord_;
+  rec->type = CodeEventRecord::CODE_DELETE;
+  rec->order = ++enqueue_order_;
+  rec->start = from;
+  events_buffer_.Enqueue(evt_rec);
+}
+
+
+void ProfilerEventsProcessor::FunctionCreateEvent(Address alias,
+                                                  Address start) {
+  CodeEventsContainer evt_rec;
+  CodeAliasEventRecord* rec = &evt_rec.CodeAliasEventRecord_;
+  rec->type = CodeEventRecord::CODE_ALIAS;
+  rec->order = ++enqueue_order_;
+  rec->alias = alias;
+  rec->start = start;
+  events_buffer_.Enqueue(evt_rec);
+}
+
+
+void ProfilerEventsProcessor::FunctionMoveEvent(Address from, Address to) {
+  CodeMoveEvent(from, to);
+}
+
+
+void ProfilerEventsProcessor::FunctionDeleteEvent(Address from) {
+  CodeDeleteEvent(from);
+}
+
+
+void ProfilerEventsProcessor::RegExpCodeCreateEvent(
+    Logger::LogEventsAndTags tag,
+    const char* prefix,
+    String* name,
+    Address start,
+    unsigned size) {
+  if (FilterOutCodeCreateEvent(tag)) return;
+  CodeEventsContainer evt_rec;
+  CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+  rec->type = CodeEventRecord::CODE_CREATION;
+  rec->order = ++enqueue_order_;
+  rec->start = start;
+  rec->entry = generator_->NewCodeEntry(tag, prefix, name);
+  rec->size = size;
+  events_buffer_.Enqueue(evt_rec);
+}
+
+
+bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
+  if (!events_buffer_.IsEmpty()) {
+    CodeEventsContainer record;
+    events_buffer_.Dequeue(&record);
+    switch (record.generic.type) {
+#define PROFILER_TYPE_CASE(type, clss)                          \
+      case CodeEventRecord::type:                               \
+        record.clss##_.UpdateCodeMap(generator_->code_map());   \
+        break;
+
+      CODE_EVENTS_TYPE_LIST(PROFILER_TYPE_CASE)
+
+#undef PROFILER_TYPE_CASE
+      default: return true;  // Skip record.
+    }
+    *dequeue_order = record.generic.order;
+    return true;
+  }
+  return false;
+}
+
+
+bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
+  while (true) {
+    const TickSampleEventRecord* rec =
+        TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
+    if (rec == NULL) return false;
+    if (rec->order == dequeue_order) {
+      generator_->RecordTickSample(rec->sample);
+      ticks_buffer_.FinishDequeue();
+    } else {
+      return true;
+    }
+  }
+}
+
+
+void ProfilerEventsProcessor::Run() {
+  unsigned dequeue_order = 0;
+  running_ = true;
+
+  while (running_) {
+    // Process ticks until we have any.
+    if (ProcessTicks(dequeue_order)) {
+      // All ticks of the current dequeue_order are processed,
+      // proceed to the next code event.
+      ProcessCodeEvent(&dequeue_order);
+    }
+    YieldCPU();
+  }
+
+  // Process remaining tick events.
+  ticks_buffer_.FlushResidualRecords();
+  // Perform processing until we have tick events, skip remaining code events.
+  while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { }
+}
+
+
+CpuProfiler* CpuProfiler::singleton_ = NULL;
+
+void CpuProfiler::StartProfiling(const char* title) {
+  ASSERT(singleton_ != NULL);
+  singleton_->StartCollectingProfile(title);
+}
+
+
+void CpuProfiler::StartProfiling(String* title) {
+  ASSERT(singleton_ != NULL);
+  singleton_->StartCollectingProfile(title);
+}
+
+
+CpuProfile* CpuProfiler::StopProfiling(const char* title) {
+  return is_profiling() ? singleton_->StopCollectingProfile(title) : NULL;
+}
+
+
+CpuProfile* CpuProfiler::StopProfiling(String* title) {
+  return is_profiling() ? singleton_->StopCollectingProfile(title) : NULL;
+}
+
+
+int CpuProfiler::GetProfilesCount() {
+  ASSERT(singleton_ != NULL);
+  return singleton_->profiles_->profiles()->length();
+}
+
+
+CpuProfile* CpuProfiler::GetProfile(int index) {
+  ASSERT(singleton_ != NULL);
+  return singleton_->profiles_->profiles()->at(index);
+}
+
+
+CpuProfile* CpuProfiler::FindProfile(unsigned uid) {
+  ASSERT(singleton_ != NULL);
+  return singleton_->profiles_->GetProfile(uid);
+}
+
+
+TickSample* CpuProfiler::TickSampleEvent() {
+  if (CpuProfiler::is_profiling()) {
+    return singleton_->processor_->TickSampleEvent();
+  } else {
+    return NULL;
+  }
+}
+
+
+void CpuProfiler::CallbackEvent(String* name, Address entry_point) {
+  singleton_->processor_->CallbackCreateEvent(
+      Logger::CALLBACK_TAG, CodeEntry::kEmptyNamePrefix, name, entry_point);
+}
+
+
+void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
+                           Code* code, const char* comment) {
+  singleton_->processor_->CodeCreateEvent(
+      tag, comment, code->address(), code->ExecutableSize());
+}
+
+
+void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
+                           Code* code, String* name) {
+  singleton_->processor_->CodeCreateEvent(
+      tag,
+      name,
+      Heap::empty_string(),
+      v8::CpuProfileNode::kNoLineNumberInfo,
+      code->address(),
+      code->ExecutableSize());
+}
+
+
+void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
+                           Code* code, String* name,
+                           String* source, int line) {
+  singleton_->processor_->CodeCreateEvent(
+      tag,
+      name,
+      source,
+      line,
+      code->address(),
+      code->ExecutableSize());
+}
+
+
+void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
+                           Code* code, int args_count) {
+  singleton_->processor_->CodeCreateEvent(
+      tag,
+      args_count,
+      code->address(),
+      code->ExecutableSize());
+}
+
+
+void CpuProfiler::CodeMoveEvent(Address from, Address to) {
+  singleton_->processor_->CodeMoveEvent(from, to);
+}
+
+
+void CpuProfiler::CodeDeleteEvent(Address from) {
+  singleton_->processor_->CodeDeleteEvent(from);
+}
+
+
+void CpuProfiler::FunctionCreateEvent(JSFunction* function) {
+  singleton_->processor_->FunctionCreateEvent(
+      function->address(), function->code()->address());
+}
+
+
+void CpuProfiler::FunctionMoveEvent(Address from, Address to) {
+  singleton_->processor_->FunctionMoveEvent(from, to);
+}
+
+
+void CpuProfiler::FunctionDeleteEvent(Address from) {
+  singleton_->processor_->FunctionDeleteEvent(from);
+}
+
+
+void CpuProfiler::GetterCallbackEvent(String* name, Address entry_point) {
+  singleton_->processor_->CallbackCreateEvent(
+      Logger::CALLBACK_TAG, "get ", name, entry_point);
+}
+
+
+void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) {
+  singleton_->processor_->RegExpCodeCreateEvent(
+      Logger::REG_EXP_TAG,
+      "RegExp: ",
+      source,
+      code->address(),
+      code->ExecutableSize());
+}
+
+
+void CpuProfiler::SetterCallbackEvent(String* name, Address entry_point) {
+  singleton_->processor_->CallbackCreateEvent(
+      Logger::CALLBACK_TAG, "set ", name, entry_point);
+}
+
+
+CpuProfiler::CpuProfiler()
+    : profiles_(new CpuProfilesCollection()),
+      next_profile_uid_(1),
+      generator_(NULL),
+      processor_(NULL) {
+}
+
+
+CpuProfiler::~CpuProfiler() {
+  delete profiles_;
+}
+
+
+void CpuProfiler::StartCollectingProfile(const char* title) {
+  if (profiles_->StartProfiling(title, next_profile_uid_++)) {
+    StartProcessorIfNotStarted();
+  }
+}
+
+
+void CpuProfiler::StartCollectingProfile(String* title) {
+  if (profiles_->StartProfiling(title, next_profile_uid_++)) {
+    StartProcessorIfNotStarted();
+  }
+}
+
+
+void CpuProfiler::StartProcessorIfNotStarted() {
+  if (processor_ == NULL) {
+    // Disable logging when using the new implementation.
+    saved_logging_nesting_ = Logger::logging_nesting_;
+    Logger::logging_nesting_ = 0;
+    generator_ = new ProfileGenerator(profiles_);
+    processor_ = new ProfilerEventsProcessor(generator_);
+    processor_->Start();
+    // Enable stack sampling.
+    // It is important to have it started prior to logging, see issue 683:
+    // http://code.google.com/p/v8/issues/detail?id=683
+    reinterpret_cast<Sampler*>(Logger::ticker_)->Start();
+    // Enumerate stuff we already have in the heap.
+    if (Heap::HasBeenSetup()) {
+      Logger::LogCodeObjects();
+      Logger::LogCompiledFunctions();
+      Logger::LogFunctionObjects();
+      Logger::LogAccessorCallbacks();
+    }
+  }
+}
+
+
+CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
+  const double actual_sampling_rate = generator_->actual_sampling_rate();
+  StopProcessorIfLastProfile();
+  CpuProfile* result = profiles_->StopProfiling(title, actual_sampling_rate);
+  if (result != NULL) {
+    result->Print();
+  }
+  return result;
+}
+
+
+CpuProfile* CpuProfiler::StopCollectingProfile(String* title) {
+  const double actual_sampling_rate = generator_->actual_sampling_rate();
+  StopProcessorIfLastProfile();
+  return profiles_->StopProfiling(title, actual_sampling_rate);
+}
+
+
+void CpuProfiler::StopProcessorIfLastProfile() {
+  if (profiles_->is_last_profile()) {
+    reinterpret_cast<Sampler*>(Logger::ticker_)->Stop();
+    processor_->Stop();
+    processor_->Join();
+    delete processor_;
+    delete generator_;
+    processor_ = NULL;
+    generator_ = NULL;
+    Logger::logging_nesting_ = saved_logging_nesting_;
+  }
+}
+
+} }  // namespace v8::internal
+
+#endif  // ENABLE_LOGGING_AND_PROFILING
+
+namespace v8 {
+namespace internal {
+
+void CpuProfiler::Setup() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (singleton_ == NULL) {
+    singleton_ = new CpuProfiler();
+  }
+#endif
+}
+
+
+void CpuProfiler::TearDown() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (singleton_ != NULL) {
+    delete singleton_;
+  }
+  singleton_ = NULL;
+#endif
+}
+
+} }  // namespace v8::internal
diff --git a/src/cpu-profiler.h b/src/cpu-profiler.h
new file mode 100644
index 0000000..35d8d5e
--- /dev/null
+++ b/src/cpu-profiler.h
@@ -0,0 +1,277 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CPU_PROFILER_H_
+#define V8_CPU_PROFILER_H_
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+#include "circular-queue.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class CodeEntry;
+class CodeMap;
+class CpuProfile;
+class CpuProfilesCollection;
+class ProfileGenerator;
+
+
+#define CODE_EVENTS_TYPE_LIST(V)                \
+  V(CODE_CREATION, CodeCreateEventRecord)       \
+  V(CODE_MOVE,     CodeMoveEventRecord)         \
+  V(CODE_DELETE,   CodeDeleteEventRecord)       \
+  V(CODE_ALIAS,    CodeAliasEventRecord)
+
+
+class CodeEventRecord {
+ public:
+#define DECLARE_TYPE(type, ignore) type,
+  enum Type {
+    NONE = 0,
+    CODE_EVENTS_TYPE_LIST(DECLARE_TYPE)
+    NUMBER_OF_TYPES
+  };
+#undef DECLARE_TYPE
+
+  Type type;
+  unsigned order;
+};
+
+
+class CodeCreateEventRecord : public CodeEventRecord {
+ public:
+  Address start;
+  CodeEntry* entry;
+  unsigned size;
+
+  INLINE(void UpdateCodeMap(CodeMap* code_map));
+};
+
+
+class CodeMoveEventRecord : public CodeEventRecord {
+ public:
+  Address from;
+  Address to;
+
+  INLINE(void UpdateCodeMap(CodeMap* code_map));
+};
+
+
+class CodeDeleteEventRecord : public CodeEventRecord {
+ public:
+  Address start;
+
+  INLINE(void UpdateCodeMap(CodeMap* code_map));
+};
+
+
+class CodeAliasEventRecord : public CodeEventRecord {
+ public:
+  Address alias;
+  Address start;
+
+  INLINE(void UpdateCodeMap(CodeMap* code_map));
+};
+
+
+class TickSampleEventRecord BASE_EMBEDDED {
+ public:
+  // The first machine word of a TickSampleEventRecord must not ever
+  // become equal to SamplingCircularQueue::kClear.  As both order and
+  // TickSample's first field are not reliable in this sense (order
+  // can overflow, TickSample can have all fields reset), we are
+  // forced to use an artificial filler field.
+  int filler;
+  unsigned order;
+  TickSample sample;
+
+  static TickSampleEventRecord* cast(void* value) {
+    return reinterpret_cast<TickSampleEventRecord*>(value);
+  }
+
+  INLINE(static TickSampleEventRecord* init(void* value));
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(TickSampleEventRecord);
+};
+
+
+// This class implements both the profile events processor thread and
+// methods called by event producers: VM and stack sampler threads.
+class ProfilerEventsProcessor : public Thread {
+ public:
+  explicit ProfilerEventsProcessor(ProfileGenerator* generator);
+  virtual ~ProfilerEventsProcessor() { }
+
+  // Thread control.
+  virtual void Run();
+  inline void Stop() { running_ = false; }
+  INLINE(bool running()) { return running_; }
+
+  // Events adding methods. Called by VM threads.
+  void CallbackCreateEvent(Logger::LogEventsAndTags tag,
+                           const char* prefix, String* name,
+                           Address start);
+  void CodeCreateEvent(Logger::LogEventsAndTags tag,
+                       String* name,
+                       String* resource_name, int line_number,
+                       Address start, unsigned size);
+  void CodeCreateEvent(Logger::LogEventsAndTags tag,
+                       const char* name,
+                       Address start, unsigned size);
+  void CodeCreateEvent(Logger::LogEventsAndTags tag,
+                       int args_count,
+                       Address start, unsigned size);
+  void CodeMoveEvent(Address from, Address to);
+  void CodeDeleteEvent(Address from);
+  void FunctionCreateEvent(Address alias, Address start);
+  void FunctionMoveEvent(Address from, Address to);
+  void FunctionDeleteEvent(Address from);
+  void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag,
+                             const char* prefix, String* name,
+                             Address start, unsigned size);
+
+  // Tick sample events are filled directly in the buffer of the circular
+  // queue (because the structure is of fixed width, but usually not all
+  // stack frame entries are filled.) This method returns a pointer to the
+  // next record of the buffer.
+  INLINE(TickSample* TickSampleEvent());
+
+ private:
+  union CodeEventsContainer {
+    CodeEventRecord generic;
+#define DECLARE_CLASS(ignore, type) type type##_;
+    CODE_EVENTS_TYPE_LIST(DECLARE_CLASS)
+#undef DECLARE_TYPE
+  };
+
+  // Called from events processing thread (Run() method.)
+  bool ProcessCodeEvent(unsigned* dequeue_order);
+  bool ProcessTicks(unsigned dequeue_order);
+
+  INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag));
+
+  ProfileGenerator* generator_;
+  bool running_;
+  CircularQueue<CodeEventsContainer> events_buffer_;
+  SamplingCircularQueue ticks_buffer_;
+  unsigned enqueue_order_;
+};
+
+} }  // namespace v8::internal
+
+
+#define PROFILE(Call)                                  \
+  LOG(Call);                                           \
+  do {                                                 \
+    if (v8::internal::CpuProfiler::is_profiling()) {   \
+      v8::internal::CpuProfiler::Call;                 \
+    }                                                  \
+  } while (false)
+#else
+#define PROFILE(Call) LOG(Call)
+#endif  // ENABLE_LOGGING_AND_PROFILING
+
+
+namespace v8 {
+namespace internal {
+
+class CpuProfiler {
+ public:
+  static void Setup();
+  static void TearDown();
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  static void StartProfiling(const char* title);
+  static void StartProfiling(String* title);
+  static CpuProfile* StopProfiling(const char* title);
+  static CpuProfile* StopProfiling(String* title);
+  static int GetProfilesCount();
+  static CpuProfile* GetProfile(int index);
+  static CpuProfile* FindProfile(unsigned uid);
+
+  // Invoked from stack sampler (thread or signal handler.)
+  static TickSample* TickSampleEvent();
+
+  // Must be called via PROFILE macro, otherwise will crash when
+  // profiling is not enabled.
+  static void CallbackEvent(String* name, Address entry_point);
+  static void CodeCreateEvent(Logger::LogEventsAndTags tag,
+                              Code* code, const char* comment);
+  static void CodeCreateEvent(Logger::LogEventsAndTags tag,
+                              Code* code, String* name);
+  static void CodeCreateEvent(Logger::LogEventsAndTags tag,
+                              Code* code, String* name,
+                              String* source, int line);
+  static void CodeCreateEvent(Logger::LogEventsAndTags tag,
+                              Code* code, int args_count);
+  static void CodeMoveEvent(Address from, Address to);
+  static void CodeDeleteEvent(Address from);
+  static void FunctionCreateEvent(JSFunction* function);
+  static void FunctionMoveEvent(Address from, Address to);
+  static void FunctionDeleteEvent(Address from);
+  static void GetterCallbackEvent(String* name, Address entry_point);
+  static void RegExpCodeCreateEvent(Code* code, String* source);
+  static void SetterCallbackEvent(String* name, Address entry_point);
+
+  static INLINE(bool is_profiling()) {
+    return singleton_ != NULL && singleton_->processor_ != NULL;
+  }
+
+ private:
+  CpuProfiler();
+  ~CpuProfiler();
+  void StartCollectingProfile(const char* title);
+  void StartCollectingProfile(String* title);
+  void StartProcessorIfNotStarted();
+  CpuProfile* StopCollectingProfile(const char* title);
+  CpuProfile* StopCollectingProfile(String* title);
+  void StopProcessorIfLastProfile();
+
+  CpuProfilesCollection* profiles_;
+  unsigned next_profile_uid_;
+  ProfileGenerator* generator_;
+  ProfilerEventsProcessor* processor_;
+  int saved_logging_nesting_;
+
+  static CpuProfiler* singleton_;
+
+#else
+  static INLINE(bool is_profiling()) { return false; }
+#endif  // ENABLE_LOGGING_AND_PROFILING
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
+};
+
+} }  // namespace v8::internal
+
+
+#endif  // V8_CPU_PROFILER_H_
diff --git a/src/d8-debug.cc b/src/d8-debug.cc
index 4e0243a..5f3ed76 100644
--- a/src/d8-debug.cc
+++ b/src/d8-debug.cc
@@ -34,6 +34,11 @@
 
 namespace v8 {
 
+void PrintPrompt() {
+  printf("dbg> ");
+  fflush(stdout);
+}
+
 
 void HandleDebugEvent(DebugEvent event,
                       Handle<Object> exec_state,
@@ -86,7 +91,7 @@
   bool running = false;
   while (!running) {
     char command[kBufferSize];
-    printf("dbg> ");
+    PrintPrompt();
     char* str = fgets(command, kBufferSize, stdin);
     if (str == NULL) break;
 
@@ -178,6 +183,7 @@
   // Start the keyboard thread.
   KeyboardThread keyboard(this);
   keyboard.Start();
+  PrintPrompt();
 
   // Process events received from debugged VM and from the keyboard.
   bool terminate = false;
@@ -264,7 +270,8 @@
   Handle<Object> details =
       Shell::DebugMessageDetails(Handle<String>::Cast(String::New(message)));
   if (try_catch.HasCaught()) {
-      Shell::ReportException(&try_catch);
+    Shell::ReportException(&try_catch);
+    PrintPrompt();
     return;
   }
   String::Utf8Value str(details->Get(String::New("text")));
@@ -277,7 +284,7 @@
   } else {
     printf("???\n");
   }
-  printf("dbg> ");
+  PrintPrompt();
 }
 
 
@@ -289,13 +296,17 @@
   Handle<Value> request =
       Shell::DebugCommandToJSONRequest(String::New(command));
   if (try_catch.HasCaught()) {
-    Shell::ReportException(&try_catch);
+    v8::String::Utf8Value exception(try_catch.Exception());
+    const char* exception_string = Shell::ToCString(exception);
+    printf("%s\n", exception_string);
+    PrintPrompt();
     return;
   }
 
   // If undefined is returned the command was handled internally and there is
   // no JSON to send.
   if (request->IsUndefined()) {
+    PrintPrompt();
     return;
   }
 
diff --git a/src/d8-posix.cc b/src/d8-posix.cc
index 2535ce0..335bd2b 100644
--- a/src/d8-posix.cc
+++ b/src/d8-posix.cc
@@ -663,10 +663,28 @@
 }
 
 
+Handle<Value> Shell::UnsetEnvironment(const Arguments& args) {
+  if (args.Length() != 1) {
+    const char* message = "unsetenv() takes one argument";
+    return ThrowException(String::New(message));
+  }
+  String::Utf8Value var(args[0]);
+  if (*var == NULL) {
+    const char* message =
+        "os.setenv(): String conversion of variable name failed.";
+    return ThrowException(String::New(message));
+  }
+  unsetenv(*var);
+  return v8::Undefined();
+}
+
+
 void Shell::AddOSMethods(Handle<ObjectTemplate> os_templ) {
   os_templ->Set(String::New("system"), FunctionTemplate::New(System));
   os_templ->Set(String::New("chdir"), FunctionTemplate::New(ChangeDirectory));
   os_templ->Set(String::New("setenv"), FunctionTemplate::New(SetEnvironment));
+  os_templ->Set(String::New("unsetenv"),
+                FunctionTemplate::New(UnsetEnvironment));
   os_templ->Set(String::New("umask"), FunctionTemplate::New(SetUMask));
   os_templ->Set(String::New("mkdirp"), FunctionTemplate::New(MakeDirectory));
   os_templ->Set(String::New("rmdir"), FunctionTemplate::New(RemoveDirectory));
diff --git a/src/d8.cc b/src/d8.cc
index dedbd55..a69320a 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -102,7 +102,7 @@
 
 
 // Converts a V8 value to a C string.
-const char* ToCString(const v8::String::Utf8Value& value) {
+const char* Shell::ToCString(const v8::String::Utf8Value& value) {
   return *value ? *value : "<string conversion failed>";
 }
 
@@ -447,9 +447,10 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Install the debugger object in the utility scope
   i::Debug::Load();
-  i::JSObject* debug = i::Debug::debug_context()->global();
+  i::Handle<i::JSObject> debug
+      = i::Handle<i::JSObject>(i::Debug::debug_context()->global());
   utility_context_->Global()->Set(String::New("$debug"),
-                                  Utils::ToLocal(&debug));
+                                  Utils::ToLocal(debug));
 #endif
 
   // Run the d8 shell utility script in the utility context
@@ -467,9 +468,12 @@
 
   // Mark the d8 shell script as native to avoid it showing up as normal source
   // in the debugger.
-  i::Handle<i::JSFunction> script_fun = Utils::OpenHandle(*script);
-  i::Handle<i::Script> script_object =
-      i::Handle<i::Script>(i::Script::cast(script_fun->shared()->script()));
+  i::Handle<i::Object> compiled_script = Utils::OpenHandle(*script);
+  i::Handle<i::Script> script_object = compiled_script->IsJSFunction()
+      ? i::Handle<i::Script>(i::Script::cast(
+          i::JSFunction::cast(*compiled_script)->shared()->script()))
+      : i::Handle<i::Script>(i::Script::cast(
+          i::SharedFunctionInfo::cast(*compiled_script)->script()));
   script_object->set_type(i::Smi::FromInt(i::Script::TYPE_NATIVE));
 
   // Create the evaluation context
diff --git a/src/d8.h b/src/d8.h
index c93ea46..30f04c7 100644
--- a/src/d8.h
+++ b/src/d8.h
@@ -117,6 +117,7 @@
                             Handle<Value> name,
                             bool print_result,
                             bool report_exceptions);
+  static const char* ToCString(const v8::String::Utf8Value& value);
   static void ReportException(TryCatch* try_catch);
   static void Initialize();
   static void OnExit();
@@ -175,6 +176,7 @@
   static Handle<Value> System(const Arguments& args);
   static Handle<Value> ChangeDirectory(const Arguments& args);
   static Handle<Value> SetEnvironment(const Arguments& args);
+  static Handle<Value> UnsetEnvironment(const Arguments& args);
   static Handle<Value> SetUMask(const Arguments& args);
   static Handle<Value> MakeDirectory(const Arguments& args);
   static Handle<Value> RemoveDirectory(const Arguments& args);
diff --git a/src/d8.js b/src/d8.js
index be4a051..b9ff09c 100644
--- a/src/d8.js
+++ b/src/d8.js
@@ -164,7 +164,7 @@
       Debug.State.currentFrame = 0;
       details.text = result;
       break;
-      
+
     case 'exception':
       if (body.uncaught) {
         result += 'Uncaught: ';
@@ -212,7 +212,7 @@
 
 function SourceInfo(body) {
   var result = '';
-  
+
   if (body.script) {
     if (body.script.name) {
       result += body.script.name;
@@ -224,7 +224,7 @@
   result += body.sourceLine + 1;
   result += ' column ';
   result += body.sourceColumn + 1;
-  
+
   return result;
 }
 
@@ -297,20 +297,20 @@
     case 'bt':
       this.request_ = this.backtraceCommandToJSONRequest_(args);
       break;
-      
+
     case 'frame':
     case 'f':
       this.request_ = this.frameCommandToJSONRequest_(args);
       break;
-      
+
     case 'scopes':
       this.request_ = this.scopesCommandToJSONRequest_(args);
       break;
-      
+
     case 'scope':
       this.request_ = this.scopeCommandToJSONRequest_(args);
       break;
-      
+
     case 'print':
     case 'p':
       this.request_ = this.printCommandToJSONRequest_(args);
@@ -331,16 +331,16 @@
     case 'source':
       this.request_ = this.sourceCommandToJSONRequest_(args);
       break;
-      
+
     case 'scripts':
       this.request_ = this.scriptsCommandToJSONRequest_(args);
       break;
-      
+
     case 'break':
     case 'b':
       this.request_ = this.breakCommandToJSONRequest_(args);
       break;
-      
+
     case 'clear':
       this.request_ = this.clearCommandToJSONRequest_(args);
       break;
@@ -365,7 +365,7 @@
     default:
       throw new Error('Unknown command "' + cmd + '"');
   }
-  
+
   last_cmd = cmd;
 }
 
@@ -490,22 +490,22 @@
         case 'i':
           request.arguments.stepaction = 'in';
           break;
-          
+
         case 'min':
         case 'm':
           request.arguments.stepaction = 'min';
           break;
-          
+
         case 'next':
         case 'n':
           request.arguments.stepaction = 'next';
           break;
-          
+
         case 'out':
         case 'o':
           request.arguments.stepaction = 'out';
           break;
-          
+
         default:
           throw new Error('Invalid step argument "' + args[0] + '".');
       }
@@ -523,7 +523,7 @@
 DebugRequest.prototype.backtraceCommandToJSONRequest_ = function(args) {
   // Build a backtrace request from the text command.
   var request = this.createRequest('backtrace');
-  
+
   // Default is to show top 10 frames.
   request.arguments = {};
   request.arguments.fromFrame = 0;
@@ -626,7 +626,7 @@
   if (args.length == 0) {
     throw new Error('Missing object id.');
   }
-  
+
   return this.makeReferencesJSONRequest_(args, 'referencedBy');
 };
 
@@ -637,7 +637,7 @@
   if (args.length == 0) {
     throw new Error('Missing object id.');
   }
-  
+
   // Build a references request.
   return this.makeReferencesJSONRequest_(args, 'constructedBy');
 };
@@ -691,18 +691,18 @@
       case 'natives':
         request.arguments.types = ScriptTypeFlag(Debug.ScriptType.Native);
         break;
-        
+
       case 'extensions':
         request.arguments.types = ScriptTypeFlag(Debug.ScriptType.Extension);
         break;
-        
+
       case 'all':
         request.arguments.types =
             ScriptTypeFlag(Debug.ScriptType.Normal) |
             ScriptTypeFlag(Debug.ScriptType.Native) |
             ScriptTypeFlag(Debug.ScriptType.Extension);
         break;
-        
+
       default:
         throw new Error('Invalid argument "' + args[0] + '".');
     }
@@ -715,8 +715,6 @@
 // Create a JSON request for the break command.
 DebugRequest.prototype.breakCommandToJSONRequest_ = function(args) {
   // Build a evaluate request from the text command.
-  var request = this.createRequest('setbreakpoint');
-
   // Process arguments if any.
   if (args && args.length > 0) {
     var target = args;
@@ -726,6 +724,8 @@
     var condition;
     var pos;
 
+    var request = this.createRequest('setbreakpoint');
+
     // Check for breakpoint condition.
     pos = args.indexOf(' ');
     if (pos > 0) {
@@ -740,7 +740,7 @@
       type = 'script';
       var tmp = target.substring(pos + 1, target.length);
       target = target.substring(0, pos);
-      
+
       // Check for both line and column.
       pos = tmp.indexOf(':');
       if (pos > 0) {
@@ -755,7 +755,7 @@
     } else {
       type = 'function';
     }
-  
+
     request.arguments = {};
     request.arguments.type = type;
     request.arguments.target = target;
@@ -763,7 +763,7 @@
     request.arguments.column = column;
     request.arguments.condition = condition;
   } else {
-    throw new Error('Invalid break arguments.');
+    var request = this.createRequest('suspend');
   }
 
   return request.toJSONProtocol();
@@ -817,6 +817,7 @@
     print('warning: arguments to \'help\' are ignored');
   }
 
+  print('break');
   print('break location [condition]');
   print('  break on named function: location is a function name');
   print('  break on function: location is #<id>#');
@@ -931,18 +932,22 @@
     var body = response.body();
     var result = '';
     switch (response.command()) {
+      case 'suspend':
+        details.text = 'stopped';
+        break;
+        
       case 'setbreakpoint':
         result = 'set breakpoint #';
         result += body.breakpoint;
         details.text = result;
         break;
-        
+
       case 'clearbreakpoint':
         result = 'cleared breakpoint #';
         result += body.breakpoint;
         details.text = result;
         break;
-        
+
       case 'backtrace':
         if (body.totalFrames == 0) {
           result = '(empty stack)';
@@ -956,14 +961,14 @@
         }
         details.text = result;
         break;
-        
+
       case 'frame':
         details.text = SourceUnderline(body.sourceLineText,
                                        body.column);
         Debug.State.currentSourceLine = body.line;
         Debug.State.currentFrame = body.index;
         break;
-        
+
       case 'scopes':
         if (body.totalScopes == 0) {
           result = '(no scopes)';
@@ -987,7 +992,7 @@
         result += formatObject_(scope_object_value, true);
         details.text = result;
         break;
-      
+
       case 'evaluate':
       case 'lookup':
         if (last_cmd == 'p' || last_cmd == 'print') {
@@ -1031,7 +1036,7 @@
         }
         details.text = result;
         break;
-        
+
       case 'source':
         // Get the source from the response.
         var source = body.source;
@@ -1066,7 +1071,7 @@
         }
         details.text = result;
         break;
-        
+
       case 'scripts':
         var result = '';
         for (i = 0; i < body.length; i++) {
@@ -1128,7 +1133,7 @@
       case 'continue':
         details.text = "(running)";
         break;
-        
+
       default:
         details.text =
             'Response for unknown command \'' + response.command + '\'' +
@@ -1137,7 +1142,7 @@
   } catch (e) {
     details.text = 'Error: "' + e + '" formatting response';
   }
-  
+
   return details;
 };
 
@@ -1254,7 +1259,7 @@
 
 
 /**
- * Get a metadata field from a protocol value. 
+ * Get a metadata field from a protocol value.
  * @return {Object} the metadata field value
  */
 ProtocolValue.prototype.field = function(name) {
@@ -1435,12 +1440,12 @@
 
 
 function BooleanToJSON_(value) {
-  return String(value); 
+  return String(value);
 }
 
 
 function NumberToJSON_(value) {
-  return String(value); 
+  return String(value);
 }
 
 
diff --git a/src/data-flow.cc b/src/data-flow.cc
index 5e9d217..4e7620a 100644
--- a/src/data-flow.cc
+++ b/src/data-flow.cc
@@ -28,11 +28,28 @@
 #include "v8.h"
 
 #include "data-flow.h"
+#include "scopes.h"
 
 namespace v8 {
 namespace internal {
 
 
+#ifdef DEBUG
+void BitVector::Print() {
+  bool first = true;
+  PrintF("{");
+  for (int i = 0; i < length(); i++) {
+    if (Contains(i)) {
+      if (!first) PrintF(",");
+      first = false;
+      PrintF("%d");
+    }
+  }
+  PrintF("}");
+}
+#endif
+
+
 void AstLabeler::Label(CompilationInfo* info) {
   info_ = info;
   VisitStatements(info_->function()->body());
@@ -145,8 +162,8 @@
 }
 
 
-void AstLabeler::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* expr) {
+void AstLabeler::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
   UNREACHABLE();
 }
 
@@ -204,6 +221,9 @@
   USE(proxy);
   ASSERT(proxy != NULL && proxy->var()->is_this());
   info()->set_has_this_properties(true);
+
+  prop->obj()->set_num(AstNode::kNoNumber);
+  prop->key()->set_num(AstNode::kNoNumber);
   Visit(expr->value());
   expr->set_num(next_number_++);
 }
@@ -220,6 +240,9 @@
   USE(proxy);
   ASSERT(proxy != NULL && proxy->var()->is_this());
   info()->set_has_this_properties(true);
+
+  expr->obj()->set_num(AstNode::kNoNumber);
+  expr->key()->set_num(AstNode::kNoNumber);
   expr->set_num(next_number_++);
 }
 
@@ -271,289 +294,460 @@
 }
 
 
-ZoneList<Expression*>* VarUseMap::Lookup(Variable* var) {
-  HashMap::Entry* entry = HashMap::Lookup(var, var->name()->Hash(), true);
-  if (entry->value == NULL) {
-    entry->value = new ZoneList<Expression*>(1);
-  }
-  return reinterpret_cast<ZoneList<Expression*>*>(entry->value);
+AssignedVariablesAnalyzer::AssignedVariablesAnalyzer(FunctionLiteral* fun)
+    : fun_(fun),
+      av_(fun->scope()->num_parameters() + fun->scope()->num_stack_slots()) {}
+
+
+void AssignedVariablesAnalyzer::Analyze() {
+  ASSERT(av_.length() > 0);
+  VisitStatements(fun_->body());
 }
 
 
-void LivenessAnalyzer::Analyze(FunctionLiteral* fun) {
-  // Process the function body.
-  VisitStatements(fun->body());
+Variable* AssignedVariablesAnalyzer::FindSmiLoopVariable(ForStatement* stmt) {
+  // The loop must have all necessary parts.
+  if (stmt->init() == NULL || stmt->cond() == NULL || stmt->next() == NULL) {
+    return NULL;
+  }
+  // The initialization statement has to be a simple assignment.
+  Assignment* init = stmt->init()->StatementAsSimpleAssignment();
+  if (init == NULL) return NULL;
 
-  // All variables are implicitly defined at the function start.
-  // Record a definition of all variables live at function entry.
-  for (HashMap::Entry* p = live_vars_.Start();
-       p != NULL;
-       p = live_vars_.Next(p)) {
-    Variable* var = reinterpret_cast<Variable*>(p->key);
-    RecordDef(var, fun);
+  // We only deal with local variables.
+  Variable* loop_var = init->target()->AsVariableProxy()->AsVariable();
+  if (loop_var == NULL || !loop_var->IsStackAllocated()) return NULL;
+
+  // The initial value has to be a smi.
+  Literal* init_lit = init->value()->AsLiteral();
+  if (init_lit == NULL || !init_lit->handle()->IsSmi()) return NULL;
+  int init_value = Smi::cast(*init_lit->handle())->value();
+
+  // The condition must be a compare of variable with <, <=, >, or >=.
+  CompareOperation* cond = stmt->cond()->AsCompareOperation();
+  if (cond == NULL) return NULL;
+  if (cond->op() != Token::LT
+      && cond->op() != Token::LTE
+      && cond->op() != Token::GT
+      && cond->op() != Token::GTE) return NULL;
+
+  // The lhs must be the same variable as in the init expression.
+  if (cond->left()->AsVariableProxy()->AsVariable() != loop_var) return NULL;
+
+  // The rhs must be a smi.
+  Literal* term_lit = cond->right()->AsLiteral();
+  if (term_lit == NULL || !term_lit->handle()->IsSmi()) return NULL;
+  int term_value = Smi::cast(*term_lit->handle())->value();
+
+  // The count operation updates the same variable as in the init expression.
+  CountOperation* update = stmt->next()->StatementAsCountOperation();
+  if (update == NULL) return NULL;
+  if (update->expression()->AsVariableProxy()->AsVariable() != loop_var) {
+    return NULL;
+  }
+
+  // The direction of the count operation must agree with the start and the end
+  // value. We currently do not allow the initial value to be the same as the
+  // terminal value. This _would_ be ok as long as the loop body never executes
+  // or executes exactly one time.
+  if (init_value == term_value) return NULL;
+  if (init_value < term_value && update->op() != Token::INC) return NULL;
+  if (init_value > term_value && update->op() != Token::DEC) return NULL;
+
+  // Check that the update operation cannot overflow the smi range. This can
+  // occur in the two cases where the loop bound is equal to the largest or
+  // smallest smi.
+  if (update->op() == Token::INC && term_value == Smi::kMaxValue) return NULL;
+  if (update->op() == Token::DEC && term_value == Smi::kMinValue) return NULL;
+
+  // Found a smi loop variable.
+  return loop_var;
+}
+
+int AssignedVariablesAnalyzer::BitIndex(Variable* var) {
+  ASSERT(var != NULL);
+  ASSERT(var->IsStackAllocated());
+  Slot* slot = var->slot();
+  if (slot->type() == Slot::PARAMETER) {
+    return slot->index();
+  } else {
+    return fun_->scope()->num_parameters() + slot->index();
   }
 }
 
 
-void LivenessAnalyzer::VisitStatements(ZoneList<Statement*>* stmts) {
-  // Visit statements right-to-left.
-  for (int i = stmts->length() - 1; i >= 0; i--) {
-    Visit(stmts->at(i));
+void AssignedVariablesAnalyzer::RecordAssignedVar(Variable* var) {
+  ASSERT(var != NULL);
+  if (var->IsStackAllocated()) {
+    av_.Add(BitIndex(var));
   }
 }
 
 
-void LivenessAnalyzer::RecordUse(Variable* var, Expression* expr) {
-  ASSERT(var->is_global() || var->is_this());
-  ZoneList<Expression*>* uses = live_vars_.Lookup(var);
-  uses->Add(expr);
-}
-
-
-void LivenessAnalyzer::RecordDef(Variable* var, Expression* expr) {
-  ASSERT(var->is_global() || var->is_this());
-
-  // We do not support other expressions that can define variables.
-  ASSERT(expr->AsFunctionLiteral() != NULL);
-
-  // Add the variable to the list of defined variables.
-  if (expr->defined_vars() == NULL) {
-    expr->set_defined_vars(new ZoneList<DefinitionInfo*>(1));
-  }
-  DefinitionInfo* def = new DefinitionInfo();
-  expr->AsFunctionLiteral()->defined_vars()->Add(def);
-
-  // Compute the last use of the definition. The variable uses are
-  // inserted in reversed evaluation order. The first element
-  // in the list of live uses is the last use.
-  ZoneList<Expression*>* uses = live_vars_.Lookup(var);
-  while (uses->length() > 0) {
-    Expression* use_site = uses->RemoveLast();
-    use_site->set_var_def(def);
-    if (uses->length() == 0) {
-      def->set_last_use(use_site);
-    }
+void AssignedVariablesAnalyzer::MarkIfTrivial(Expression* expr) {
+  Variable* var = expr->AsVariableProxy()->AsVariable();
+  if (var != NULL &&
+      var->IsStackAllocated() &&
+      !var->is_arguments() &&
+      var->mode() != Variable::CONST &&
+      (var->is_this() || !av_.Contains(BitIndex(var)))) {
+    expr->AsVariableProxy()->set_is_trivial(true);
   }
 }
 
 
-// Visitor functions for live variable analysis.
-void LivenessAnalyzer::VisitDeclaration(Declaration* decl) {
-  UNREACHABLE();
+void AssignedVariablesAnalyzer::ProcessExpression(Expression* expr) {
+  BitVector saved_av(av_);
+  av_.Clear();
+  Visit(expr);
+  av_.Union(saved_av);
 }
 
-
-void LivenessAnalyzer::VisitBlock(Block* stmt) {
+void AssignedVariablesAnalyzer::VisitBlock(Block* stmt) {
   VisitStatements(stmt->statements());
 }
 
 
-void LivenessAnalyzer::VisitExpressionStatement(
+void AssignedVariablesAnalyzer::VisitExpressionStatement(
     ExpressionStatement* stmt) {
-  Visit(stmt->expression());
+  ProcessExpression(stmt->expression());
 }
 
 
-void LivenessAnalyzer::VisitEmptyStatement(EmptyStatement* stmt) {
+void AssignedVariablesAnalyzer::VisitEmptyStatement(EmptyStatement* stmt) {
   // Do nothing.
 }
 
 
-void LivenessAnalyzer::VisitIfStatement(IfStatement* stmt) {
-  UNREACHABLE();
+void AssignedVariablesAnalyzer::VisitIfStatement(IfStatement* stmt) {
+  ProcessExpression(stmt->condition());
+  Visit(stmt->then_statement());
+  Visit(stmt->else_statement());
 }
 
 
-void LivenessAnalyzer::VisitContinueStatement(ContinueStatement* stmt) {
-  UNREACHABLE();
+void AssignedVariablesAnalyzer::VisitContinueStatement(
+    ContinueStatement* stmt) {
+  // Nothing to do.
 }
 
 
-void LivenessAnalyzer::VisitBreakStatement(BreakStatement* stmt) {
-  UNREACHABLE();
+void AssignedVariablesAnalyzer::VisitBreakStatement(BreakStatement* stmt) {
+  // Nothing to do.
 }
 
 
-void LivenessAnalyzer::VisitReturnStatement(ReturnStatement* stmt) {
-  UNREACHABLE();
+void AssignedVariablesAnalyzer::VisitReturnStatement(ReturnStatement* stmt) {
+  ProcessExpression(stmt->expression());
 }
 
 
-void LivenessAnalyzer::VisitWithEnterStatement(
+void AssignedVariablesAnalyzer::VisitWithEnterStatement(
     WithEnterStatement* stmt) {
-  UNREACHABLE();
+  ProcessExpression(stmt->expression());
 }
 
 
-void LivenessAnalyzer::VisitWithExitStatement(WithExitStatement* stmt) {
-  UNREACHABLE();
+void AssignedVariablesAnalyzer::VisitWithExitStatement(
+    WithExitStatement* stmt) {
+  // Nothing to do.
 }
 
 
-void LivenessAnalyzer::VisitSwitchStatement(SwitchStatement* stmt) {
-  UNREACHABLE();
+void AssignedVariablesAnalyzer::VisitSwitchStatement(SwitchStatement* stmt) {
+  BitVector result(av_);
+  av_.Clear();
+  Visit(stmt->tag());
+  result.Union(av_);
+  for (int i = 0; i < stmt->cases()->length(); i++) {
+    CaseClause* clause = stmt->cases()->at(i);
+    if (!clause->is_default()) {
+      av_.Clear();
+      Visit(clause->label());
+      result.Union(av_);
+    }
+    VisitStatements(clause->statements());
+  }
+  av_.Union(result);
 }
 
 
-void LivenessAnalyzer::VisitDoWhileStatement(DoWhileStatement* stmt) {
-  UNREACHABLE();
+void AssignedVariablesAnalyzer::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  ProcessExpression(stmt->cond());
+  Visit(stmt->body());
 }
 
 
-void LivenessAnalyzer::VisitWhileStatement(WhileStatement* stmt) {
-  UNREACHABLE();
+void AssignedVariablesAnalyzer::VisitWhileStatement(WhileStatement* stmt) {
+  ProcessExpression(stmt->cond());
+  Visit(stmt->body());
 }
 
 
-void LivenessAnalyzer::VisitForStatement(ForStatement* stmt) {
-  UNREACHABLE();
+void AssignedVariablesAnalyzer::VisitForStatement(ForStatement* stmt) {
+  if (stmt->init() != NULL) Visit(stmt->init());
+
+  if (stmt->cond() != NULL) ProcessExpression(stmt->cond());
+
+  if (stmt->next() != NULL) Visit(stmt->next());
+
+  // Process loop body. After visiting the loop body av_ contains
+  // the assigned variables of the loop body.
+  BitVector saved_av(av_);
+  av_.Clear();
+  Visit(stmt->body());
+
+  Variable* var = FindSmiLoopVariable(stmt);
+  if (var != NULL && !av_.Contains(BitIndex(var))) {
+    stmt->set_loop_variable(var);
+  }
+
+  av_.Union(saved_av);
 }
 
 
-void LivenessAnalyzer::VisitForInStatement(ForInStatement* stmt) {
-  UNREACHABLE();
+void AssignedVariablesAnalyzer::VisitForInStatement(ForInStatement* stmt) {
+  ProcessExpression(stmt->each());
+  ProcessExpression(stmt->enumerable());
+  Visit(stmt->body());
 }
 
 
-void LivenessAnalyzer::VisitTryCatchStatement(TryCatchStatement* stmt) {
-  UNREACHABLE();
+void AssignedVariablesAnalyzer::VisitTryCatchStatement(
+    TryCatchStatement* stmt) {
+  Visit(stmt->try_block());
+  Visit(stmt->catch_block());
 }
 
 
-void LivenessAnalyzer::VisitTryFinallyStatement(
+void AssignedVariablesAnalyzer::VisitTryFinallyStatement(
     TryFinallyStatement* stmt) {
-  UNREACHABLE();
+  Visit(stmt->try_block());
+  Visit(stmt->finally_block());
 }
 
 
-void LivenessAnalyzer::VisitDebuggerStatement(
+void AssignedVariablesAnalyzer::VisitDebuggerStatement(
     DebuggerStatement* stmt) {
+  // Nothing to do.
+}
+
+
+void AssignedVariablesAnalyzer::VisitFunctionLiteral(FunctionLiteral* expr) {
+  // Nothing to do.
+  ASSERT(av_.IsEmpty());
+}
+
+
+void AssignedVariablesAnalyzer::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
+  // Nothing to do.
+  ASSERT(av_.IsEmpty());
+}
+
+
+void AssignedVariablesAnalyzer::VisitConditional(Conditional* expr) {
+  ASSERT(av_.IsEmpty());
+
+  Visit(expr->condition());
+
+  BitVector result(av_);
+  av_.Clear();
+  Visit(expr->then_expression());
+  result.Union(av_);
+
+  av_.Clear();
+  Visit(expr->else_expression());
+  av_.Union(result);
+}
+
+
+void AssignedVariablesAnalyzer::VisitSlot(Slot* expr) {
   UNREACHABLE();
 }
 
 
-void LivenessAnalyzer::VisitFunctionLiteral(FunctionLiteral* expr) {
-  UNREACHABLE();
+void AssignedVariablesAnalyzer::VisitVariableProxy(VariableProxy* expr) {
+  // Nothing to do.
+  ASSERT(av_.IsEmpty());
 }
 
 
-void LivenessAnalyzer::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* expr) {
-  UNREACHABLE();
+void AssignedVariablesAnalyzer::VisitLiteral(Literal* expr) {
+  // Nothing to do.
+  ASSERT(av_.IsEmpty());
 }
 
 
-void LivenessAnalyzer::VisitConditional(Conditional* expr) {
-  UNREACHABLE();
+void AssignedVariablesAnalyzer::VisitRegExpLiteral(RegExpLiteral* expr) {
+  // Nothing to do.
+  ASSERT(av_.IsEmpty());
 }
 
 
-void LivenessAnalyzer::VisitSlot(Slot* expr) {
-  UNREACHABLE();
+void AssignedVariablesAnalyzer::VisitObjectLiteral(ObjectLiteral* expr) {
+  ASSERT(av_.IsEmpty());
+  BitVector result(av_.length());
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    Visit(expr->properties()->at(i)->value());
+    result.Union(av_);
+    av_.Clear();
+  }
+  av_ = result;
 }
 
 
-void LivenessAnalyzer::VisitVariableProxy(VariableProxy* expr) {
-  Variable* var = expr->var();
-  ASSERT(var->is_global());
-  ASSERT(!var->is_this());
-  RecordUse(var, expr);
+void AssignedVariablesAnalyzer::VisitArrayLiteral(ArrayLiteral* expr) {
+  ASSERT(av_.IsEmpty());
+  BitVector result(av_.length());
+  for (int i = 0; i < expr->values()->length(); i++) {
+    Visit(expr->values()->at(i));
+    result.Union(av_);
+    av_.Clear();
+  }
+  av_ = result;
 }
 
 
-void LivenessAnalyzer::VisitLiteral(Literal* expr) {
-  UNREACHABLE();
-}
-
-
-void LivenessAnalyzer::VisitRegExpLiteral(RegExpLiteral* expr) {
-  UNREACHABLE();
-}
-
-
-void LivenessAnalyzer::VisitObjectLiteral(ObjectLiteral* expr) {
-  UNREACHABLE();
-}
-
-
-void LivenessAnalyzer::VisitArrayLiteral(ArrayLiteral* expr) {
-  UNREACHABLE();
-}
-
-
-void LivenessAnalyzer::VisitCatchExtensionObject(
+void AssignedVariablesAnalyzer::VisitCatchExtensionObject(
     CatchExtensionObject* expr) {
-  UNREACHABLE();
+  ASSERT(av_.IsEmpty());
+  Visit(expr->key());
+  ProcessExpression(expr->value());
 }
 
 
-void LivenessAnalyzer::VisitAssignment(Assignment* expr) {
+void AssignedVariablesAnalyzer::VisitAssignment(Assignment* expr) {
+  ASSERT(av_.IsEmpty());
+
+  // There are three kinds of assignments: variable assignments, property
+  // assignments, and reference errors (invalid left-hand sides).
+  Variable* var = expr->target()->AsVariableProxy()->AsVariable();
   Property* prop = expr->target()->AsProperty();
-  ASSERT(prop != NULL);
-  ASSERT(prop->key()->IsPropertyName());
-  VariableProxy* proxy = prop->obj()->AsVariableProxy();
-  ASSERT(proxy != NULL && proxy->var()->is_this());
+  ASSERT(var == NULL || prop == NULL);
 
-  // Record use of this at the assignment node. Assignments to
-  // this-properties are treated like unary operations.
-  RecordUse(proxy->var(), expr);
+  if (var != NULL) {
+    MarkIfTrivial(expr->value());
+    Visit(expr->value());
+    if (expr->is_compound()) {
+      // Left-hand side occurs also as an rvalue.
+      MarkIfTrivial(expr->target());
+      ProcessExpression(expr->target());
+    }
+    RecordAssignedVar(var);
 
-  // Visit right-hand side.
-  Visit(expr->value());
+  } else if (prop != NULL) {
+    MarkIfTrivial(expr->value());
+    Visit(expr->value());
+    if (!prop->key()->IsPropertyName()) {
+      MarkIfTrivial(prop->key());
+      ProcessExpression(prop->key());
+    }
+    MarkIfTrivial(prop->obj());
+    ProcessExpression(prop->obj());
+
+  } else {
+    Visit(expr->target());
+  }
 }
 
 
-void LivenessAnalyzer::VisitThrow(Throw* expr) {
-  UNREACHABLE();
+void AssignedVariablesAnalyzer::VisitThrow(Throw* expr) {
+  ASSERT(av_.IsEmpty());
+  Visit(expr->exception());
 }
 
 
-void LivenessAnalyzer::VisitProperty(Property* expr) {
-  ASSERT(expr->key()->IsPropertyName());
-  VariableProxy* proxy = expr->obj()->AsVariableProxy();
-  ASSERT(proxy != NULL && proxy->var()->is_this());
-  RecordUse(proxy->var(), expr);
+void AssignedVariablesAnalyzer::VisitProperty(Property* expr) {
+  ASSERT(av_.IsEmpty());
+  if (!expr->key()->IsPropertyName()) {
+    MarkIfTrivial(expr->key());
+    Visit(expr->key());
+  }
+  MarkIfTrivial(expr->obj());
+  ProcessExpression(expr->obj());
 }
 
 
-void LivenessAnalyzer::VisitCall(Call* expr) {
-  UNREACHABLE();
+void AssignedVariablesAnalyzer::VisitCall(Call* expr) {
+  ASSERT(av_.IsEmpty());
+  Visit(expr->expression());
+  BitVector result(av_);
+  for (int i = 0; i < expr->arguments()->length(); i++) {
+    av_.Clear();
+    Visit(expr->arguments()->at(i));
+    result.Union(av_);
+  }
+  av_ = result;
 }
 
 
-void LivenessAnalyzer::VisitCallNew(CallNew* expr) {
-  UNREACHABLE();
+void AssignedVariablesAnalyzer::VisitCallNew(CallNew* expr) {
+  ASSERT(av_.IsEmpty());
+  Visit(expr->expression());
+  BitVector result(av_);
+  for (int i = 0; i < expr->arguments()->length(); i++) {
+    av_.Clear();
+    Visit(expr->arguments()->at(i));
+    result.Union(av_);
+  }
+  av_ = result;
 }
 
 
-void LivenessAnalyzer::VisitCallRuntime(CallRuntime* expr) {
-  UNREACHABLE();
+void AssignedVariablesAnalyzer::VisitCallRuntime(CallRuntime* expr) {
+  ASSERT(av_.IsEmpty());
+  BitVector result(av_);
+  for (int i = 0; i < expr->arguments()->length(); i++) {
+    av_.Clear();
+    Visit(expr->arguments()->at(i));
+    result.Union(av_);
+  }
+  av_ = result;
 }
 
 
-void LivenessAnalyzer::VisitUnaryOperation(UnaryOperation* expr) {
-  UNREACHABLE();
+void AssignedVariablesAnalyzer::VisitUnaryOperation(UnaryOperation* expr) {
+  ASSERT(av_.IsEmpty());
+  Visit(expr->expression());
 }
 
 
-void LivenessAnalyzer::VisitCountOperation(CountOperation* expr) {
-  UNREACHABLE();
+void AssignedVariablesAnalyzer::VisitCountOperation(CountOperation* expr) {
+  ASSERT(av_.IsEmpty());
+
+  Visit(expr->expression());
+
+  Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+  if (var != NULL) RecordAssignedVar(var);
 }
 
 
-void LivenessAnalyzer::VisitBinaryOperation(BinaryOperation* expr) {
-  // Visit child nodes in reverse evaluation order.
+void AssignedVariablesAnalyzer::VisitBinaryOperation(BinaryOperation* expr) {
+  ASSERT(av_.IsEmpty());
+  MarkIfTrivial(expr->right());
   Visit(expr->right());
-  Visit(expr->left());
+  MarkIfTrivial(expr->left());
+  ProcessExpression(expr->left());
 }
 
 
-void LivenessAnalyzer::VisitCompareOperation(CompareOperation* expr) {
-  UNREACHABLE();
+void AssignedVariablesAnalyzer::VisitCompareOperation(CompareOperation* expr) {
+  ASSERT(av_.IsEmpty());
+  MarkIfTrivial(expr->right());
+  Visit(expr->right());
+  MarkIfTrivial(expr->left());
+  ProcessExpression(expr->left());
 }
 
 
-void LivenessAnalyzer::VisitThisFunction(ThisFunction* expr) {
+void AssignedVariablesAnalyzer::VisitThisFunction(ThisFunction* expr) {
+  // Nothing to do.
+  ASSERT(av_.IsEmpty());
+}
+
+
+void AssignedVariablesAnalyzer::VisitDeclaration(Declaration* decl) {
   UNREACHABLE();
 }
 
diff --git a/src/data-flow.h b/src/data-flow.h
index 2331944..079da65 100644
--- a/src/data-flow.h
+++ b/src/data-flow.h
@@ -28,12 +28,186 @@
 #ifndef V8_DATAFLOW_H_
 #define V8_DATAFLOW_H_
 
+#include "v8.h"
+
 #include "ast.h"
 #include "compiler.h"
+#include "zone-inl.h"
 
 namespace v8 {
 namespace internal {
 
+// Forward declarations.
+class Node;
+
+class BitVector: public ZoneObject {
+ public:
+  explicit BitVector(int length)
+      : length_(length),
+        data_length_(SizeFor(length)),
+        data_(Zone::NewArray<uint32_t>(data_length_)) {
+    ASSERT(length > 0);
+    Clear();
+  }
+
+  BitVector(const BitVector& other)
+      : length_(other.length()),
+        data_length_(SizeFor(length_)),
+        data_(Zone::NewArray<uint32_t>(data_length_)) {
+    CopyFrom(other);
+  }
+
+  static int SizeFor(int length) {
+    return 1 + ((length - 1) / 32);
+  }
+
+  BitVector& operator=(const BitVector& rhs) {
+    if (this != &rhs) CopyFrom(rhs);
+    return *this;
+  }
+
+  void CopyFrom(const BitVector& other) {
+    ASSERT(other.length() == length());
+    for (int i = 0; i < data_length_; i++) {
+      data_[i] = other.data_[i];
+    }
+  }
+
+  bool Contains(int i) {
+    ASSERT(i >= 0 && i < length());
+    uint32_t block = data_[i / 32];
+    return (block & (1U << (i % 32))) != 0;
+  }
+
+  void Add(int i) {
+    ASSERT(i >= 0 && i < length());
+    data_[i / 32] |= (1U << (i % 32));
+  }
+
+  void Remove(int i) {
+    ASSERT(i >= 0 && i < length());
+    data_[i / 32] &= ~(1U << (i % 32));
+  }
+
+  void Union(const BitVector& other) {
+    ASSERT(other.length() == length());
+    for (int i = 0; i < data_length_; i++) {
+      data_[i] |= other.data_[i];
+    }
+  }
+
+  void Intersect(const BitVector& other) {
+    ASSERT(other.length() == length());
+    for (int i = 0; i < data_length_; i++) {
+      data_[i] &= other.data_[i];
+    }
+  }
+
+  void Subtract(const BitVector& other) {
+    ASSERT(other.length() == length());
+    for (int i = 0; i < data_length_; i++) {
+      data_[i] &= ~other.data_[i];
+    }
+  }
+
+  void Clear() {
+    for (int i = 0; i < data_length_; i++) {
+      data_[i] = 0;
+    }
+  }
+
+  bool IsEmpty() const {
+    for (int i = 0; i < data_length_; i++) {
+      if (data_[i] != 0) return false;
+    }
+    return true;
+  }
+
+  bool Equals(const BitVector& other) {
+    for (int i = 0; i < data_length_; i++) {
+      if (data_[i] != other.data_[i]) return false;
+    }
+    return true;
+  }
+
+  int length() const { return length_; }
+
+#ifdef DEBUG
+  void Print();
+#endif
+
+ private:
+  int length_;
+  int data_length_;
+  uint32_t* data_;
+};
+
+
+// Simple fixed-capacity list-based worklist (managed as a queue) of
+// pointers to T.
+template<typename T>
+class WorkList BASE_EMBEDDED {
+ public:
+  // The worklist cannot grow bigger than size.  We keep one item empty to
+  // distinguish between empty and full.
+  explicit WorkList(int size)
+      : capacity_(size + 1), head_(0), tail_(0), queue_(capacity_) {
+    for (int i = 0; i < capacity_; i++) queue_.Add(NULL);
+  }
+
+  bool is_empty() { return head_ == tail_; }
+
+  bool is_full() {
+    // The worklist is full if head is at 0 and tail is at capacity - 1:
+    //   head == 0 && tail == capacity-1 ==> tail - head == capacity - 1
+    // or if tail is immediately to the left of head:
+    //   tail+1 == head  ==> tail - head == -1
+    int diff = tail_ - head_;
+    return (diff == -1 || diff == capacity_ - 1);
+  }
+
+  void Insert(T* item) {
+    ASSERT(!is_full());
+    queue_[tail_++] = item;
+    if (tail_ == capacity_) tail_ = 0;
+  }
+
+  T* Remove() {
+    ASSERT(!is_empty());
+    T* item = queue_[head_++];
+    if (head_ == capacity_) head_ = 0;
+    return item;
+  }
+
+ private:
+  int capacity_;  // Including one empty slot.
+  int head_;      // Where the first item is.
+  int tail_;      // Where the next inserted item will go.
+  List<T*> queue_;
+};
+
+
+struct ReachingDefinitionsData BASE_EMBEDDED {
+ public:
+  ReachingDefinitionsData() : rd_in_(NULL), kill_(NULL), gen_(NULL) {}
+
+  void Initialize(int definition_count) {
+    rd_in_ = new BitVector(definition_count);
+    kill_ = new BitVector(definition_count);
+    gen_ = new BitVector(definition_count);
+  }
+
+  BitVector* rd_in() { return rd_in_; }
+  BitVector* kill() { return kill_; }
+  BitVector* gen() { return gen_; }
+
+ private:
+  BitVector* rd_in_;
+  BitVector* kill_;
+  BitVector* gen_;
+};
+
+
 // This class is used to number all expressions in the AST according to
 // their evaluation order (post-order left-to-right traversal).
 class AstLabeler: public AstVisitor {
@@ -62,52 +236,39 @@
 };
 
 
-class VarUseMap : public HashMap {
+// Computes the set of assigned variables and annotates variables proxies
+// that are trivial sub-expressions and for-loops where the loop variable
+// is guaranteed to be a smi.
+class AssignedVariablesAnalyzer : public AstVisitor {
  public:
-  VarUseMap() : HashMap(VarMatch) {}
+  explicit AssignedVariablesAnalyzer(FunctionLiteral* fun);
 
-  ZoneList<Expression*>* Lookup(Variable* var);
+  void Analyze();
 
  private:
-  static bool VarMatch(void* key1, void* key2) { return key1 == key2; }
-};
+  Variable* FindSmiLoopVariable(ForStatement* stmt);
 
+  int BitIndex(Variable* var);
 
-class DefinitionInfo : public ZoneObject {
- public:
-  explicit DefinitionInfo() : last_use_(NULL) {}
+  void RecordAssignedVar(Variable* var);
 
-  Expression* last_use() { return last_use_; }
-  void set_last_use(Expression* expr) { last_use_ = expr; }
+  void MarkIfTrivial(Expression* expr);
 
- private:
-  Expression* last_use_;
-  Register location_;
-};
-
-
-class LivenessAnalyzer : public AstVisitor {
- public:
-  LivenessAnalyzer() {}
-
-  void Analyze(FunctionLiteral* fun);
-
- private:
-  void VisitStatements(ZoneList<Statement*>* stmts);
-
-  void RecordUse(Variable* var, Expression* expr);
-  void RecordDef(Variable* var, Expression* expr);
-
+  // Visits an expression saving the accumulator before, clearing
+  // it before visting and restoring it after visiting.
+  void ProcessExpression(Expression* expr);
 
   // AST node visit functions.
 #define DECLARE_VISIT(type) virtual void Visit##type(type* node);
   AST_NODE_LIST(DECLARE_VISIT)
 #undef DECLARE_VISIT
 
-  // Map for tracking the live variables.
-  VarUseMap live_vars_;
+  FunctionLiteral* fun_;
 
-  DISALLOW_COPY_AND_ASSIGN(LivenessAnalyzer);
+  // Accumulator for assigned variables set.
+  BitVector av_;
+
+  DISALLOW_COPY_AND_ASSIGN(AssignedVariablesAnalyzer);
 };
 
 
diff --git a/src/date.js b/src/date.js
index 6b6ed42..b9e19d6 100644
--- a/src/date.js
+++ b/src/date.js
@@ -113,8 +113,11 @@
   // we must do this, but for compatibility with other browsers, we use
   // the actual year if it is in the range 1970..2037
   if (t >= 0 && t <= 2.1e12) return t;
-  var day = MakeDay(EquivalentYear(YEAR_FROM_TIME(t)), MONTH_FROM_TIME(t), DATE_FROM_TIME(t));
-  return TimeClip(MakeDate(day, TimeWithinDay(t)));
+
+  var day = MakeDay(EquivalentYear(YEAR_FROM_TIME(t)),
+                    MONTH_FROM_TIME(t),
+                    DATE_FROM_TIME(t));
+  return MakeDate(day, TimeWithinDay(t));
 }
 
 
@@ -236,6 +239,10 @@
 }
 
 function LocalTimeNoCheck(time) {
+  if (time < -MAX_TIME_MS || time > MAX_TIME_MS) {
+    return $NaN;
+  }
+
   // Inline the DST offset cache checks for speed.
   // The cache is hit, or DaylightSavingsOffset is called,
   // before local_time_offset is used.
@@ -280,96 +287,45 @@
 }
 
 
-// Compute modified Julian day from year, month, date.
-function ToJulianDay(year, month, date) {
-  var jy = (month > 1) ? year : year - 1;
-  var jm = (month > 1) ? month + 2 : month + 14;
-  var ja = FLOOR(jy / 100);
-  return FLOOR(FLOOR(365.25*jy) + FLOOR(30.6001*jm) + date + 1720995) + 2 - ja + FLOOR(0.25*ja);
-}
+var ymd_from_time_cache = [$NaN, $NaN, $NaN];
+var ymd_from_time_cached_time = $NaN;
 
-var four_year_cycle_table = CalculateDateTable();
-
-
-function CalculateDateTable() {
-  var month_lengths = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31];
-  var four_year_cycle_table = new $Array(1461);
-
-  var cumulative = 0;
-  var position = 0;
-  var leap_position = 0;
-  for (var month = 0; month < 12; month++) {
-    var month_bits = month << kMonthShift;
-    var length = month_lengths[month];
-    for (var day = 1; day <= length; day++) {
-      four_year_cycle_table[leap_position] =
-        month_bits + day;
-      four_year_cycle_table[366 + position] =
-        (1 << kYearShift) + month_bits + day;
-      four_year_cycle_table[731 + position] =
-        (2 << kYearShift) + month_bits + day;
-      four_year_cycle_table[1096 + position] =
-        (3 << kYearShift) + month_bits + day;
-      leap_position++;
-      position++;
+function YearFromTime(t) {
+  if (t !== ymd_from_time_cached_time) {
+    if (!$isFinite(t)) {
+      return $NaN;
     }
-    if (month == 1) {
-      four_year_cycle_table[leap_position++] = month_bits + 29;
+
+    %DateYMDFromTime(t, ymd_from_time_cache);
+    ymd_from_time_cached_time = t
+  }
+
+  return ymd_from_time_cache[0];
+}
+
+function MonthFromTime(t) {
+  if (t !== ymd_from_time_cached_time) {
+    if (!$isFinite(t)) {
+      return $NaN;
     }
+    %DateYMDFromTime(t, ymd_from_time_cache);
+    ymd_from_time_cached_time = t
   }
-  return four_year_cycle_table;
+
+  return ymd_from_time_cache[1];
 }
 
+function DateFromTime(t) {
+  if (t !== ymd_from_time_cached_time) {
+    if (!$isFinite(t)) {
+      return $NaN;
+    }
 
-// Constructor for creating objects holding year, month, and date.
-// Introduced to ensure the two return points in FromJulianDay match same map.
-function DayTriplet(year, month, date) {
-  this.year = year;
-  this.month = month;
-  this.date = date;
-}
-
-var julian_day_cache_triplet;
-var julian_day_cache_day = $NaN;
-
-// Compute year, month, and day from modified Julian day.
-// The missing days in 1582 are ignored for JavaScript compatibility.
-function FromJulianDay(julian) {
-  if (julian_day_cache_day == julian) {
-    return julian_day_cache_triplet;
+    %DateYMDFromTime(t, ymd_from_time_cache);
+    ymd_from_time_cached_time = t
   }
-  var result;
-  // Avoid floating point and non-Smi maths in common case.  This is also a period of
-  // time where leap years are very regular.  The range is not too large to avoid overflow
-  // when doing the multiply-to-divide trick.
-  if (julian > kDayZeroInJulianDay &&
-      (julian - kDayZeroInJulianDay) < 40177) { // 1970 - 2080
-    var jsimple = (julian - kDayZeroInJulianDay) + 731; // Day 0 is 1st January 1968
-    var y = 1968;
-    // Divide by 1461 by multiplying with 22967 and shifting down by 25!
-    var after_1968 = (jsimple * 22967) >> 25;
-    y += after_1968 << 2;
-    jsimple -= 1461 * after_1968;
-    var four_year_cycle = four_year_cycle_table[jsimple];
-    result = new DayTriplet(y + (four_year_cycle >> kYearShift),
-                            (four_year_cycle & kMonthMask) >> kMonthShift,
-                            four_year_cycle & kDayMask);
-  } else {
-    var jalpha = FLOOR((julian - 1867216.25) / 36524.25);
-    var jb = julian + 1 + jalpha - FLOOR(0.25 * jalpha) + 1524;
-    var jc = FLOOR(6680.0 + ((jb-2439870) - 122.1)/365.25);
-    var jd = FLOOR(365 * jc + (0.25 * jc));
-    var je = FLOOR((jb - jd)/30.6001);
-    var m = je - 1;
-    if (m > 12) m -= 13;
-    var y = jc - 4715;
-    if (m > 2) { --y; --m; }
-    var d = jb - jd - FLOOR(30.6001 * je);
-    result = new DayTriplet(y, m, d);
-  }
-  julian_day_cache_day = julian;
-  julian_day_cache_triplet = result;
-  return result;
+
+  return ymd_from_time_cache[2];
 }
 
 
@@ -382,20 +338,18 @@
 function MakeDay(year, month, date) {
   if (!$isFinite(year) || !$isFinite(month) || !$isFinite(date)) return $NaN;
 
-  // Conversion to integers.
   year = TO_INTEGER(year);
   month = TO_INTEGER(month);
   date = TO_INTEGER(date);
 
-  // Overflow months into year.
-  year = year + FLOOR(month/12);
-  month = month % 12;
-  if (month < 0) {
-    month += 12;
+  if (year < kMinYear || year > kMaxYear ||
+      month < kMinMonth || month > kMaxMonth ||
+      date < kMinDate || date > kMaxDate) {
+    return $NaN;
   }
 
-  // Return days relative to Jan 1 1970.
-  return ToJulianDay(year, month, date) - kDayZeroInJulianDay;
+  // Now we rely on year, month and date being SMIs.
+  return %DateMakeDay(year, month, date);
 }
 
 
@@ -607,11 +561,10 @@
 
 
 function DateString(time) {
-  var YMD = FromJulianDay(DAY(time) + kDayZeroInJulianDay);
   return WeekDays[WeekDay(time)] + ' '
-      + Months[YMD.month] + ' '
-      + TwoDigitString(YMD.date) + ' '
-      + YMD.year;
+      + Months[MonthFromTime(time)] + ' '
+      + TwoDigitString(DateFromTime(time)) + ' '
+      + YearFromTime(time);
 }
 
 
@@ -620,11 +573,10 @@
 
 
 function LongDateString(time) {
-  var YMD = FromJulianDay(DAY(time) + kDayZeroInJulianDay);
   return LongWeekDays[WeekDay(time)] + ', '
-      + LongMonths[YMD.month] + ' '
-      + TwoDigitString(YMD.date) + ', '
-      + YMD.year;
+      + LongMonths[MonthFromTime(time)] + ' '
+      + TwoDigitString(DateFromTime(time)) + ', '
+      + YearFromTime(time);
 }
 
 
@@ -668,7 +620,7 @@
 // -------------------------------------------------------------------
 
 // Reused output buffer. Used when parsing date strings.
-var parse_buffer = $Array(7);
+var parse_buffer = $Array(8);
 
 // ECMA 262 - 15.9.4.2
 function DateParse(string) {
@@ -676,13 +628,13 @@
   if (IS_NULL(arr)) return $NaN;
 
   var day = MakeDay(arr[0], arr[1], arr[2]);
-  var time = MakeTime(arr[3], arr[4], arr[5], 0);
+  var time = MakeTime(arr[3], arr[4], arr[5], arr[6]);
   var date = MakeDate(day, time);
 
-  if (IS_NULL(arr[6])) {
+  if (IS_NULL(arr[7])) {
     return TimeClip(UTC(date));
   } else {
-    return TimeClip(date - arr[6] * 1000);
+    return TimeClip(date - arr[7] * 1000);
   }
 }
 
diff --git a/src/dateparser-inl.h b/src/dateparser-inl.h
index d5921d5..be353a3 100644
--- a/src/dateparser-inl.h
+++ b/src/dateparser-inl.h
@@ -54,16 +54,25 @@
         } else {
           // n + ":"
           if (!time.Add(n)) return false;
+          in.Skip('.');
         }
+      } else if (in.Skip('.') && time.IsExpecting(n)) {
+        time.Add(n);
+        if (!in.IsAsciiDigit()) return false;
+        int n = in.ReadUnsignedNumber();
+        time.AddFinal(n);
       } else if (tz.IsExpecting(n)) {
         tz.SetAbsoluteMinute(n);
       } else if (time.IsExpecting(n)) {
         time.AddFinal(n);
-        // Require end or white space immediately after finalizing time.
-        if (!in.IsEnd() && !in.SkipWhiteSpace()) return false;
+        // Require end, white space or Z immediately after finalizing time.
+        if (!in.IsEnd() && !in.SkipWhiteSpace() && !in.Is('Z')) return false;
       } else {
         if (!day.Add(n)) return false;
         in.Skip('-');  // Ignore suffix '-' for year, month, or day.
+        // Skip trailing 'T' for ECMAScript 5 date string format but make
+        // sure that it is followed by a digit (for the time).
+        if (in.Skip('T') && !in.IsAsciiDigit()) return false;
       }
     } else if (in.IsAsciiAlphaOrAbove()) {
       // Parse a "word" (sequence of chars. >= 'A').
diff --git a/src/dateparser.cc b/src/dateparser.cc
index 51a63e1..e68532f 100644
--- a/src/dateparser.cc
+++ b/src/dateparser.cc
@@ -33,6 +33,16 @@
 namespace internal {
 
 bool DateParser::DayComposer::Write(FixedArray* output) {
+  // Set year to 0 by default.
+  if (index_ < 1) {
+    comp_[index_++] = 1;
+  }
+
+  // Day and month defaults to 1.
+  while (index_ < kSize) {
+      comp_[index_++] = 1;
+  }
+
   int year = 0;  // Default year is 0 (=> 2000) for KJS compatibility.
   int month = kNone;
   int day = kNone;
@@ -88,6 +98,7 @@
   int& hour = comp_[0];
   int& minute = comp_[1];
   int& second = comp_[2];
+  int& millisecond = comp_[3];
 
   if (hour_offset_ != kNone) {
     if (!IsHour12(hour)) return false;
@@ -95,11 +106,13 @@
     hour += hour_offset_;
   }
 
-  if (!IsHour(hour) || !IsMinute(minute) || !IsSecond(second)) return false;
+  if (!IsHour(hour) || !IsMinute(minute) ||
+      !IsSecond(second) || !IsMillisecond(millisecond)) return false;
 
   output->set(HOUR, Smi::FromInt(hour));
   output->set(MINUTE, Smi::FromInt(minute));
   output->set(SECOND, Smi::FromInt(second));
+  output->set(MILLISECOND, Smi::FromInt(millisecond));
   return true;
 }
 
@@ -134,6 +147,7 @@
   {'p', 'm', '\0', DateParser::AM_PM, 12},
   {'u', 't', '\0', DateParser::TIME_ZONE_NAME, 0},
   {'u', 't', 'c', DateParser::TIME_ZONE_NAME, 0},
+  {'z', '\0', '\0', DateParser::TIME_ZONE_NAME, 0},
   {'g', 'm', 't', DateParser::TIME_ZONE_NAME, 0},
   {'c', 'd', 't', DateParser::TIME_ZONE_NAME, -5},
   {'c', 's', 't', DateParser::TIME_ZONE_NAME, -6},
diff --git a/src/dateparser.h b/src/dateparser.h
index d339a4f..d999d9c 100644
--- a/src/dateparser.h
+++ b/src/dateparser.h
@@ -44,13 +44,14 @@
   // [3]: hour
   // [4]: minute
   // [5]: second
-  // [6]: UTC offset in seconds, or null value if no timezone specified
+  // [6]: millisecond
+  // [7]: UTC offset in seconds, or null value if no timezone specified
   // If parsing fails, return false (content of output array is not defined).
   template <typename Char>
   static bool Parse(Vector<Char> str, FixedArray* output);
 
   enum {
-    YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, UTC_OFFSET, OUTPUT_SIZE
+    YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, MILLISECOND, UTC_OFFSET, OUTPUT_SIZE
   };
 
  private:
@@ -189,7 +190,9 @@
     TimeComposer() : index_(0), hour_offset_(kNone) {}
     bool IsEmpty() const { return index_ == 0; }
     bool IsExpecting(int n) const {
-      return (index_ == 1 && IsMinute(n)) || (index_ == 2 && IsSecond(n));
+      return (index_ == 1 && IsMinute(n)) ||
+             (index_ == 2 && IsSecond(n)) ||
+             (index_ == 3 && IsMillisecond(n));
     }
     bool Add(int n) {
       return index_ < kSize ? (comp_[index_++] = n, true) : false;
@@ -207,8 +210,9 @@
     static bool IsHour(int x) { return Between(x, 0, 23); }
     static bool IsHour12(int x) { return Between(x, 0, 12); }
     static bool IsSecond(int x) { return Between(x, 0, 59); }
+    static bool IsMillisecond(int x) { return Between(x, 0, 999); }
 
-    static const int kSize = 3;
+    static const int kSize = 4;
     int comp_[kSize];
     int index_;
     int hour_offset_;
diff --git a/src/debug-debugger.js b/src/debug-debugger.js
index 55c25a9..e94cee4 100644
--- a/src/debug-debugger.js
+++ b/src/debug-debugger.js
@@ -239,6 +239,21 @@
 }
 
 
+//Creates a clone of script breakpoint that is linked to another script.
+ScriptBreakPoint.prototype.cloneForOtherScript = function (other_script) {
+  var copy = new ScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
+      other_script.id, this.line_, this.column_, this.groupId_);
+  copy.number_ = next_break_point_number++;
+  script_break_points.push(copy);
+  
+  copy.hit_count_ = this.hit_count_;
+  copy.active_ = this.active_;
+  copy.condition_ = this.condition_;
+  copy.ignoreCount_ = this.ignoreCount_;
+  return copy;
+}
+
+
 ScriptBreakPoint.prototype.number = function() {
   return this.number_;
 };
@@ -274,6 +289,13 @@
 };
 
 
+ScriptBreakPoint.prototype.update_positions = function(line, column) {
+  this.line_ = line;
+  this.column_ = column;
+}
+
+
+
 ScriptBreakPoint.prototype.hit_count = function() {
   return this.hit_count_;
 };
@@ -327,7 +349,7 @@
   if (this.type_ == Debug.ScriptBreakPointType.ScriptId) {
     return this.script_id_ == script.id;
   } else {  // this.type_ == Debug.ScriptBreakPointType.ScriptName
-    return this.script_name_ == script.name &&
+    return this.script_name_ == script.nameOrSourceURL() &&
            script.line_offset <= this.line_  &&
            this.line_ < script.line_offset + script.lineCount();
   }
@@ -400,6 +422,17 @@
 }
 
 
+function GetScriptBreakPoints(script) {
+  var result = [];
+  for (var i = 0; i < script_break_points.length; i++) {
+    if (script_break_points[i].matchesScript(script)) {
+      result.push(script_break_points[i]);
+    }
+  }
+  return result;
+}
+
+
 Debug.setListener = function(listener, opt_data) {
   if (!IS_FUNCTION(listener) && !IS_UNDEFINED(listener) && !IS_NULL(listener)) {
     throw new Error('Parameters have wrong types.');
@@ -474,6 +507,11 @@
   return %DebugDisassembleConstructor(f);
 };
 
+Debug.ExecuteInDebugContext = function(f, without_debugger) {
+  if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
+  return %ExecuteInDebugContext(f, !!without_debugger);
+};
+
 Debug.sourcePosition = function(f) {
   if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
   return %FunctionGetScriptSourcePosition(f);
@@ -778,6 +816,8 @@
 ExecutionState.prototype.frame = function(opt_index) {
   // If no index supplied return the selected frame.
   if (opt_index == null) opt_index = this.selected_frame;
+  if (opt_index < 0 || opt_index >= this.frameCount())
+    throw new Error('Illegal frame index.');
   return new FrameMirror(this.break_id, opt_index);
 };
 
@@ -1251,7 +1291,9 @@
       } else if (request.command == 'version') {
         this.versionRequest_(request, response);
       } else if (request.command == 'profile') {
-        this.profileRequest_(request, response);
+          this.profileRequest_(request, response);
+      } else if (request.command == 'changelive') {
+          this.changeLiveRequest_(request, response);
       } else {
         throw new Error('Unknown command "' + request.command + '" in request');
       }
@@ -1270,7 +1312,7 @@
         // Response controls running state.
         this.running_ = response.running;
       }
-      response.running = this.running_; 
+      response.running = this.running_;
       return response.toJSONProtocol();
     } catch (e) {
       // Failed to generate response - return generic error.
@@ -1866,12 +1908,12 @@
         return response.failed('Invalid types "' + request.arguments.types + '"');
       }
     }
-    
+
     if (!IS_UNDEFINED(request.arguments.includeSource)) {
       includeSource = %ToBoolean(request.arguments.includeSource);
       response.setOption('includeSource', includeSource);
     }
-    
+
     if (IS_ARRAY(request.arguments.ids)) {
       idsToInclude = {};
       var ids = request.arguments.ids;
@@ -1954,6 +1996,51 @@
 };
 
 
+DebugCommandProcessor.prototype.changeLiveRequest_ = function(request, response) {
+  if (!Debug.LiveEdit) {
+    return response.failed('LiveEdit feature is not supported');
+  }
+  if (!request.arguments) {
+    return response.failed('Missing arguments');
+  }
+  var script_id = request.arguments.script_id;
+  
+  var scripts = %DebugGetLoadedScripts();
+
+  var the_script = null;
+  for (var i = 0; i < scripts.length; i++) {
+    if (scripts[i].id == script_id) {
+      the_script = scripts[i];
+    }
+  }
+  if (!the_script) {
+    response.failed('Script not found');
+    return;
+  }
+
+  var change_log = new Array();
+  
+  if (!IS_STRING(request.arguments.new_source)) {
+    throw "new_source argument expected";
+  }
+
+  var new_source = request.arguments.new_source;
+  
+  try {
+    Debug.LiveEdit.SetScriptSource(the_script, new_source, change_log);
+  } catch (e) {
+    if (e instanceof Debug.LiveEdit.Failure) {
+      // Let's treat it as a "success" so that body with change_log will be
+      // sent back. "change_log" will have "failure" field set.
+      change_log.push( { failure: true, message: e.toString() } ); 
+    } else {
+      throw e;
+    }
+  }
+  response.body = {change_log: change_log};
+};
+
+
 // Check whether the previously processed command caused the VM to become
 // running.
 DebugCommandProcessor.prototype.isRunning = function() {
@@ -2026,7 +2113,7 @@
       }
     }
   }
-  
+
   return content;
 }
 
@@ -2049,7 +2136,7 @@
 
 
 /**
- * Convert a value to its debugger protocol representation. 
+ * Convert a value to its debugger protocol representation.
  * @param {*} value The value to format as protocol value.
  * @param {MirrorSerializer} mirror_serializer The serializer to use if any
  *     mirror objects are encountered.
diff --git a/src/debug.cc b/src/debug.cc
index c71a98f..729f0ab 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -39,6 +39,7 @@
 #include "global-handles.h"
 #include "ic.h"
 #include "ic-inl.h"
+#include "messages.h"
 #include "natives.h"
 #include "stub-cache.h"
 #include "log.h"
@@ -123,7 +124,9 @@
     if (RelocInfo::IsCodeTarget(rmode())) {
       Address target = original_rinfo()->target_address();
       Code* code = Code::GetCodeFromTargetAddress(target);
-      if (code->is_inline_cache_stub() || RelocInfo::IsConstructCall(rmode())) {
+      if ((code->is_inline_cache_stub() &&
+           code->kind() != Code::BINARY_OP_IC) ||
+          RelocInfo::IsConstructCall(rmode())) {
         break_point_++;
         return;
       }
@@ -682,29 +685,26 @@
   // Compile the script.
   bool allow_natives_syntax = FLAG_allow_natives_syntax;
   FLAG_allow_natives_syntax = true;
-  Handle<JSFunction> boilerplate;
-  boilerplate = Compiler::Compile(source_code,
-                                  script_name,
-                                  0,
-                                  0,
-                                  NULL,
-                                  NULL,
-                                  Handle<String>::null(),
-                                  NATIVES_CODE);
+  Handle<SharedFunctionInfo> function_info;
+  function_info = Compiler::Compile(source_code,
+                                    script_name,
+                                    0, 0, NULL, NULL,
+                                    Handle<String>::null(),
+                                    NATIVES_CODE);
   FLAG_allow_natives_syntax = allow_natives_syntax;
 
   // Silently ignore stack overflows during compilation.
-  if (boilerplate.is_null()) {
+  if (function_info.is_null()) {
     ASSERT(Top::has_pending_exception());
     Top::clear_pending_exception();
     return false;
   }
 
-  // Execute the boilerplate function in the debugger context.
+  // Execute the shared function in the debugger context.
   Handle<Context> context = Top::global_context();
   bool caught_exception = false;
   Handle<JSFunction> function =
-      Factory::NewFunctionFromBoilerplate(boilerplate, context);
+      Factory::NewFunctionFromSharedFunctionInfo(function_info, context);
   Handle<Object> result =
       Execution::TryCall(function, Handle<Object>(context->global()),
                          0, NULL, &caught_exception);
@@ -720,6 +720,7 @@
 
   // Mark this script as native and return successfully.
   Handle<Script> script(Script::cast(function->shared()->script()));
+  script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
   return true;
 }
 
@@ -760,6 +761,12 @@
   bool caught_exception =
       !CompileDebuggerScript(Natives::GetIndex("mirror")) ||
       !CompileDebuggerScript(Natives::GetIndex("debug"));
+
+  if (FLAG_enable_liveedit) {
+    caught_exception = caught_exception ||
+        !CompileDebuggerScript(Natives::GetIndex("liveedit"));
+  }
+
   Debugger::set_compiling_natives(false);
 
   // Make sure we mark the debugger as not loading before we might
@@ -799,7 +806,7 @@
 
 
 void Debug::Iterate(ObjectVisitor* v) {
-  v->VisitPointer(bit_cast<Object**, Code**>(&(debug_break_return_)));
+  v->VisitPointer(BitCast<Object**, Code**>(&(debug_break_return_)));
 }
 
 
@@ -807,6 +814,8 @@
   HandleScope scope;
   ASSERT(args.length() == 0);
 
+  thread_local_.frames_are_dropped_ = false;
+
   // Get the top-most JavaScript frame.
   JavaScriptFrameIterator it;
   JavaScriptFrame* frame = it.frame();
@@ -883,8 +892,13 @@
     PrepareStep(step_action, step_count);
   }
 
-  // Install jump to the call address which was overwritten.
-  SetAfterBreakTarget(frame);
+  if (thread_local_.frames_are_dropped_) {
+    // We must have been calling IC stub. Do not return there anymore.
+    Code* plain_return = Builtins::builtin(Builtins::PlainReturn_LiveEdit);
+    thread_local_.after_break_target_ = plain_return->entry();
+  } else {
+    SetAfterBreakTarget(frame);
+  }
 
   return Heap::undefined_value();
 }
@@ -1342,24 +1356,26 @@
   // Find the builtin debug break function matching the calling convention
   // used by the call site.
   if (code->is_inline_cache_stub()) {
-    if (code->is_call_stub()) {
-      return ComputeCallDebugBreak(code->arguments_count());
-    }
-    if (code->is_load_stub()) {
-      return Handle<Code>(Builtins::builtin(Builtins::LoadIC_DebugBreak));
-    }
-    if (code->is_store_stub()) {
-      return Handle<Code>(Builtins::builtin(Builtins::StoreIC_DebugBreak));
-    }
-    if (code->is_keyed_load_stub()) {
-      Handle<Code> result =
-          Handle<Code>(Builtins::builtin(Builtins::KeyedLoadIC_DebugBreak));
-      return result;
-    }
-    if (code->is_keyed_store_stub()) {
-      Handle<Code> result =
-          Handle<Code>(Builtins::builtin(Builtins::KeyedStoreIC_DebugBreak));
-      return result;
+    switch (code->kind()) {
+      case Code::CALL_IC:
+        return ComputeCallDebugBreak(code->arguments_count());
+
+      case Code::LOAD_IC:
+        return Handle<Code>(Builtins::builtin(Builtins::LoadIC_DebugBreak));
+
+      case Code::STORE_IC:
+        return Handle<Code>(Builtins::builtin(Builtins::StoreIC_DebugBreak));
+
+      case Code::KEYED_LOAD_IC:
+        return Handle<Code>(
+            Builtins::builtin(Builtins::KeyedLoadIC_DebugBreak));
+
+      case Code::KEYED_STORE_IC:
+        return Handle<Code>(
+            Builtins::builtin(Builtins::KeyedStoreIC_DebugBreak));
+
+      default:
+        UNREACHABLE();
     }
   }
   if (RelocInfo::IsConstructCall(mode)) {
@@ -1646,6 +1662,12 @@
 }
 
 
+void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id) {
+  thread_local_.frames_are_dropped_ = true;
+  thread_local_.break_frame_id_ = new_break_frame_id;
+}
+
+
 bool Debug::IsDebugGlobal(GlobalObject* global) {
   return IsLoaded() && global == Debug::debug_context()->global();
 }
@@ -1964,7 +1986,8 @@
 
 
 // Handle debugger actions when a new script is compiled.
-void Debugger::OnAfterCompile(Handle<Script> script, Handle<JSFunction> fun) {
+void Debugger::OnAfterCompile(Handle<Script> script,
+                              AfterCompileFlags after_compile_flags) {
   HandleScope scope;
 
   // Add the newly compiled script to the script cache.
@@ -2011,7 +2034,7 @@
     return;
   }
   // Bail out based on state or if there is no listener for this event
-  if (in_debugger) return;
+  if (in_debugger && (after_compile_flags & SEND_WHEN_DEBUGGING) == 0) return;
   if (!Debugger::EventActive(v8::AfterCompile)) return;
 
   // Create the compile state object.
@@ -2029,31 +2052,6 @@
 }
 
 
-void Debugger::OnNewFunction(Handle<JSFunction> function) {
-  return;
-  HandleScope scope;
-
-  // Bail out based on state or if there is no listener for this event
-  if (Debug::InDebugger()) return;
-  if (compiling_natives()) return;
-  if (!Debugger::EventActive(v8::NewFunction)) return;
-
-  // Enter the debugger.
-  EnterDebugger debugger;
-  if (debugger.FailedToEnter()) return;
-
-  // Create the event object.
-  bool caught_exception = false;
-  Handle<Object> event_data = MakeNewFunctionEvent(function, &caught_exception);
-  // Bail out and don't call debugger if exception.
-  if (caught_exception) {
-    return;
-  }
-  // Process debug event.
-  ProcessDebugEvent(v8::NewFunction, Handle<JSObject>::cast(event_data), true);
-}
-
-
 void Debugger::OnScriptCollected(int id) {
   HandleScope scope;
 
@@ -2135,6 +2133,13 @@
 }
 
 
+Handle<Context> Debugger::GetDebugContext() {
+    never_unload_debugger_ = true;
+    EnterDebugger debugger;
+    return Debug::debug_context();
+}
+
+
 void Debugger::UnloadDebugger() {
   // Make sure that there are no breakpoints left.
   Debug::ClearAllBreakPoints();
@@ -2463,7 +2468,7 @@
 
   // Enter the debugger.
   EnterDebugger debugger;
-  if (debugger.FailedToEnter() || !debugger.HasJavaScriptFrames()) {
+  if (debugger.FailedToEnter()) {
     return Factory::undefined_value();
   }
 
@@ -2476,8 +2481,12 @@
 
   static const int kArgc = 2;
   Object** argv[kArgc] = { exec_state.location(), data.location() };
-  Handle<Object> result = Execution::Call(fun, Factory::undefined_value(),
-                                          kArgc, argv, pending_exception);
+  Handle<Object> result = Execution::Call(
+      fun,
+      Handle<Object>(Debug::debug_context_->global_proxy()),
+      kArgc,
+      argv,
+      pending_exception);
   return result;
 }
 
diff --git a/src/debug.h b/src/debug.h
index cab9e8e..e7ac94e 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -377,10 +377,18 @@
   static void GenerateConstructCallDebugBreak(MacroAssembler* masm);
   static void GenerateReturnDebugBreak(MacroAssembler* masm);
   static void GenerateStubNoRegistersDebugBreak(MacroAssembler* masm);
+  static void GeneratePlainReturnLiveEdit(MacroAssembler* masm);
+  static void GenerateFrameDropperLiveEdit(MacroAssembler* masm);
 
   // Called from stub-cache.cc.
   static void GenerateCallICDebugBreak(MacroAssembler* masm);
 
+  static void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id);
+
+  static void SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
+                                     Handle<Code> code);
+  static const int kFrameDropperFrameSize;
+
  private:
   static bool CompileDebuggerScript(int index);
   static void ClearOneShot();
@@ -446,6 +454,9 @@
     // Storage location for jump when exiting debug break calls.
     Address after_break_target_;
 
+    // Indicates that LiveEdit has patched the stack.
+    bool frames_are_dropped_;
+
     // Top debugger entry.
     EnterDebugger* debugger_entry_;
 
@@ -604,8 +615,13 @@
   static void OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue);
   static void OnException(Handle<Object> exception, bool uncaught);
   static void OnBeforeCompile(Handle<Script> script);
+
+  enum AfterCompileFlags {
+    NO_AFTER_COMPILE_FLAGS,
+    SEND_WHEN_DEBUGGING
+  };
   static void OnAfterCompile(Handle<Script> script,
-                           Handle<JSFunction> fun);
+                             AfterCompileFlags after_compile_flags);
   static void OnNewFunction(Handle<JSFunction> fun);
   static void OnScriptCollected(int id);
   static void ProcessDebugEvent(v8::DebugEvent event,
@@ -649,9 +665,12 @@
 
   static void CallMessageDispatchHandler();
 
+  static Handle<Context> GetDebugContext();
+
   // Unload the debugger if possible. Only called when no debugger is currently
   // active.
   static void UnloadDebugger();
+  friend void ForceUnloadDebugger();  // In test-debug.cc
 
   inline static bool EventActive(v8::DebugEvent event) {
     ScopedLock with(debugger_access_);
diff --git a/src/number-info.h b/src/diy-fp.cc
similarity index 64%
copy from src/number-info.h
copy to src/diy-fp.cc
index c6f32e4..c54bd1d 100644
--- a/src/number-info.h
+++ b/src/diy-fp.cc
@@ -25,48 +25,34 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef V8_NUMBER_INFO_H_
-#define V8_NUMBER_INFO_H_
+#include "v8.h"
+
+#include "diy-fp.h"
 
 namespace v8 {
 namespace internal {
 
-class NumberInfo : public AllStatic {
- public:
-  enum Type {
-    kUnknown = 0,
-    kNumber = 1,
-    kSmi = 3,
-    kHeapNumber = 5,
-    kUninitialized = 7
-  };
-
-  // Return the weakest (least precise) common type.
-  static Type Combine(Type a, Type b) {
-    // Make use of the order of enum values.
-    return static_cast<Type>(a & b);
-  }
-
-  static bool IsNumber(Type a) {
-    ASSERT(a != kUninitialized);
-    return ((a & kNumber) != 0);
-  }
-
-  static const char* ToString(Type a) {
-    switch (a) {
-      case kUnknown: return "UnknownType";
-      case kNumber: return "NumberType";
-      case kSmi: return "SmiType";
-      case kHeapNumber: return "HeapNumberType";
-      case kUninitialized:
-        UNREACHABLE();
-        return "UninitializedType";
-    }
-    UNREACHABLE();
-    return "Unreachable code";
-  }
-};
+void DiyFp::Multiply(const DiyFp& other) {
+  // Simply "emulates" a 128 bit multiplication.
+  // However: the resulting number only contains 64 bits. The least
+  // significant 64 bits are only used for rounding the most significant 64
+  // bits.
+  const uint64_t kM32 = 0xFFFFFFFFu;
+  uint64_t a = f_ >> 32;
+  uint64_t b = f_ & kM32;
+  uint64_t c = other.f_ >> 32;
+  uint64_t d = other.f_ & kM32;
+  uint64_t ac = a * c;
+  uint64_t bc = b * c;
+  uint64_t ad = a * d;
+  uint64_t bd = b * d;
+  uint64_t tmp = (bd >> 32) + (ad & kM32) + (bc & kM32);
+  // By adding 1U << 31 to tmp we round the final result.
+  // Halfway cases will be round up.
+  tmp += 1U << 31;
+  uint64_t result_f = ac + (ad >> 32) + (bc >> 32) + (tmp >> 32);
+  e_ += other.e_ + 64;
+  f_ = result_f;
+}
 
 } }  // namespace v8::internal
-
-#endif  // V8_NUMBER_INFO_H_
diff --git a/src/diy-fp.h b/src/diy-fp.h
new file mode 100644
index 0000000..cfe05ef
--- /dev/null
+++ b/src/diy-fp.h
@@ -0,0 +1,117 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_DIY_FP_H_
+#define V8_DIY_FP_H_
+
+namespace v8 {
+namespace internal {
+
+// This "Do It Yourself Floating Point" class implements a floating-point number
+// with a uint64 significand and an int exponent. Normalized DiyFp numbers will
+// have the most significant bit of the significand set.
+// Multiplication and Subtraction do not normalize their results.
+// DiyFp are not designed to contain special doubles (NaN and Infinity).
+class DiyFp {
+ public:
+  static const int kSignificandSize = 64;
+
+  DiyFp() : f_(0), e_(0) {}
+  DiyFp(uint64_t f, int e) : f_(f), e_(e) {}
+
+  // this = this - other.
+  // The exponents of both numbers must be the same and the significand of this
+  // must be bigger than the significand of other.
+  // The result will not be normalized.
+  void Subtract(const DiyFp& other) {
+    ASSERT(e_ == other.e_);
+    ASSERT(f_ >= other.f_);
+    f_ -= other.f_;
+  }
+
+  // Returns a - b.
+  // The exponents of both numbers must be the same and this must be bigger
+  // than other. The result will not be normalized.
+  static DiyFp Minus(const DiyFp& a, const DiyFp& b) {
+    DiyFp result = a;
+    result.Subtract(b);
+    return result;
+  }
+
+
+  // this = this * other.
+  void Multiply(const DiyFp& other);
+
+  // returns a * b;
+  static DiyFp Times(const DiyFp& a, const DiyFp& b) {
+    DiyFp result = a;
+    result.Multiply(b);
+    return result;
+  }
+
+  void Normalize() {
+    ASSERT(f_ != 0);
+    uint64_t f = f_;
+    int e = e_;
+
+    // This method is mainly called for normalizing boundaries. In general
+    // boundaries need to be shifted by 10 bits. We thus optimize for this case.
+    const uint64_t k10MSBits = V8_2PART_UINT64_C(0xFFC00000, 00000000);
+    while ((f & k10MSBits) == 0) {
+      f <<= 10;
+      e -= 10;
+    }
+    while ((f & kUint64MSB) == 0) {
+      f <<= 1;
+      e--;
+    }
+    f_ = f;
+    e_ = e;
+  }
+
+  static DiyFp Normalize(const DiyFp& a) {
+    DiyFp result = a;
+    result.Normalize();
+    return result;
+  }
+
+  uint64_t f() const { return f_; }
+  int e() const { return e_; }
+
+  void set_f(uint64_t new_value) { f_ = new_value; }
+  void set_e(int new_value) { e_ = new_value; }
+
+ private:
+  static const uint64_t kUint64MSB = V8_2PART_UINT64_C(0x80000000, 00000000);
+
+  uint64_t f_;
+  int e_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_DIY_FP_H_
diff --git a/src/double.h b/src/double.h
new file mode 100644
index 0000000..65f8c94
--- /dev/null
+++ b/src/double.h
@@ -0,0 +1,169 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_DOUBLE_H_
+#define V8_DOUBLE_H_
+
+#include "diy-fp.h"
+
+namespace v8 {
+namespace internal {
+
+// We assume that doubles and uint64_t have the same endianness.
+static uint64_t double_to_uint64(double d) { return BitCast<uint64_t>(d); }
+static double uint64_to_double(uint64_t d64) { return BitCast<double>(d64); }
+
+// Helper functions for doubles.
+class Double {
+ public:
+  static const uint64_t kSignMask = V8_2PART_UINT64_C(0x80000000, 00000000);
+  static const uint64_t kExponentMask = V8_2PART_UINT64_C(0x7FF00000, 00000000);
+  static const uint64_t kSignificandMask =
+      V8_2PART_UINT64_C(0x000FFFFF, FFFFFFFF);
+  static const uint64_t kHiddenBit = V8_2PART_UINT64_C(0x00100000, 00000000);
+
+  Double() : d64_(0) {}
+  explicit Double(double d) : d64_(double_to_uint64(d)) {}
+  explicit Double(uint64_t d64) : d64_(d64) {}
+
+  DiyFp AsDiyFp() const {
+    ASSERT(!IsSpecial());
+    return DiyFp(Significand(), Exponent());
+  }
+
+  // this->Significand() must not be 0.
+  DiyFp AsNormalizedDiyFp() const {
+    uint64_t f = Significand();
+    int e = Exponent();
+
+    ASSERT(f != 0);
+
+    // The current double could be a denormal.
+    while ((f & kHiddenBit) == 0) {
+      f <<= 1;
+      e--;
+    }
+    // Do the final shifts in one go. Don't forget the hidden bit (the '-1').
+    f <<= DiyFp::kSignificandSize - kSignificandSize - 1;
+    e -= DiyFp::kSignificandSize - kSignificandSize - 1;
+    return DiyFp(f, e);
+  }
+
+  // Returns the double's bit as uint64.
+  uint64_t AsUint64() const {
+    return d64_;
+  }
+
+  int Exponent() const {
+    if (IsDenormal()) return kDenormalExponent;
+
+    uint64_t d64 = AsUint64();
+    int biased_e = static_cast<int>((d64 & kExponentMask) >> kSignificandSize);
+    return biased_e - kExponentBias;
+  }
+
+  uint64_t Significand() const {
+    uint64_t d64 = AsUint64();
+    uint64_t significand = d64 & kSignificandMask;
+    if (!IsDenormal()) {
+      return significand + kHiddenBit;
+    } else {
+      return significand;
+    }
+  }
+
+  // Returns true if the double is a denormal.
+  bool IsDenormal() const {
+    uint64_t d64 = AsUint64();
+    return (d64 & kExponentMask) == 0;
+  }
+
+  // We consider denormals not to be special.
+  // Hence only Infinity and NaN are special.
+  bool IsSpecial() const {
+    uint64_t d64 = AsUint64();
+    return (d64 & kExponentMask) == kExponentMask;
+  }
+
+  bool IsNan() const {
+    uint64_t d64 = AsUint64();
+    return ((d64 & kExponentMask) == kExponentMask) &&
+        ((d64 & kSignificandMask) != 0);
+  }
+
+
+  bool IsInfinite() const {
+    uint64_t d64 = AsUint64();
+    return ((d64 & kExponentMask) == kExponentMask) &&
+        ((d64 & kSignificandMask) == 0);
+  }
+
+
+  int Sign() const {
+    uint64_t d64 = AsUint64();
+    return (d64 & kSignMask) == 0? 1: -1;
+  }
+
+
+  // Returns the two boundaries of this.
+  // The bigger boundary (m_plus) is normalized. The lower boundary has the same
+  // exponent as m_plus.
+  void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const {
+    DiyFp v = this->AsDiyFp();
+    bool significand_is_zero = (v.f() == kHiddenBit);
+    DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1));
+    DiyFp m_minus;
+    if (significand_is_zero && v.e() != kDenormalExponent) {
+      // The boundary is closer. Think of v = 1000e10 and v- = 9999e9.
+      // Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but
+      // at a distance of 1e8.
+      // The only exception is for the smallest normal: the largest denormal is
+      // at the same distance as its successor.
+      // Note: denormals have the same exponent as the smallest normals.
+      m_minus = DiyFp((v.f() << 2) - 1, v.e() - 2);
+    } else {
+      m_minus = DiyFp((v.f() << 1) - 1, v.e() - 1);
+    }
+    m_minus.set_f(m_minus.f() << (m_minus.e() - m_plus.e()));
+    m_minus.set_e(m_plus.e());
+    *out_m_plus = m_plus;
+    *out_m_minus = m_minus;
+  }
+
+  double value() const { return uint64_to_double(d64_); }
+
+ private:
+  static const int kSignificandSize = 52;  // Excludes the hidden bit.
+  static const int kExponentBias = 0x3FF + kSignificandSize;
+  static const int kDenormalExponent = -kExponentBias + 1;
+
+  uint64_t d64_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_DOUBLE_H_
diff --git a/src/execution.cc b/src/execution.cc
index 2068413..006d358 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -46,9 +46,6 @@
                              int argc,
                              Object*** args,
                              bool* has_pending_exception) {
-  // Make sure we have a real function, not a boilerplate function.
-  ASSERT(!func->IsBoilerplate());
-
   // Entering JavaScript.
   VMState state(JS);
 
@@ -221,8 +218,8 @@
 
 void StackGuard::EnableInterrupts() {
   ExecutionAccess access;
-  if (IsSet(access)) {
-    set_limits(kInterruptLimit, access);
+  if (has_pending_interrupts(access)) {
+    set_interrupt_limits(access);
   }
 }
 
@@ -249,11 +246,6 @@
 }
 
 
-bool StackGuard::IsSet(const ExecutionAccess& lock) {
-  return thread_local_.interrupt_flags_ != 0;
-}
-
-
 bool StackGuard::IsInterrupted() {
   ExecutionAccess access;
   return thread_local_.interrupt_flags_ & INTERRUPT;
@@ -263,7 +255,7 @@
 void StackGuard::Interrupt() {
   ExecutionAccess access;
   thread_local_.interrupt_flags_ |= INTERRUPT;
-  set_limits(kInterruptLimit, access);
+  set_interrupt_limits(access);
 }
 
 
@@ -276,7 +268,7 @@
 void StackGuard::Preempt() {
   ExecutionAccess access;
   thread_local_.interrupt_flags_ |= PREEMPT;
-  set_limits(kInterruptLimit, access);
+  set_interrupt_limits(access);
 }
 
 
@@ -289,7 +281,7 @@
 void StackGuard::TerminateExecution() {
   ExecutionAccess access;
   thread_local_.interrupt_flags_ |= TERMINATE;
-  set_limits(kInterruptLimit, access);
+  set_interrupt_limits(access);
 }
 
 
@@ -303,7 +295,7 @@
 void StackGuard::DebugBreak() {
   ExecutionAccess access;
   thread_local_.interrupt_flags_ |= DEBUGBREAK;
-  set_limits(kInterruptLimit, access);
+  set_interrupt_limits(access);
 }
 
 
@@ -317,7 +309,7 @@
   if (FLAG_debugger_auto_break) {
     ExecutionAccess access;
     thread_local_.interrupt_flags_ |= DEBUGCOMMAND;
-    set_limits(kInterruptLimit, access);
+    set_interrupt_limits(access);
   }
 }
 #endif
@@ -325,7 +317,7 @@
 void StackGuard::Continue(InterruptFlag after_what) {
   ExecutionAccess access;
   thread_local_.interrupt_flags_ &= ~static_cast<int>(after_what);
-  if (thread_local_.interrupt_flags_ == 0) {
+  if (!should_postpone_interrupts(access) && !has_pending_interrupts(access)) {
     reset_limits(access);
   }
 }
diff --git a/src/execution.h b/src/execution.h
index 10683d6..e683e12 100644
--- a/src/execution.h
+++ b/src/execution.h
@@ -199,12 +199,24 @@
 
  private:
   // You should hold the ExecutionAccess lock when calling this method.
-  static bool IsSet(const ExecutionAccess& lock);
+  static bool has_pending_interrupts(const ExecutionAccess& lock) {
+    // Sanity check: We shouldn't be asking about pending interrupts
+    // unless we're not postponing them anymore.
+    ASSERT(!should_postpone_interrupts(lock));
+    return thread_local_.interrupt_flags_ != 0;
+  }
 
   // You should hold the ExecutionAccess lock when calling this method.
-  static void set_limits(uintptr_t value, const ExecutionAccess& lock) {
-    thread_local_.jslimit_ = value;
-    thread_local_.climit_ = value;
+  static bool should_postpone_interrupts(const ExecutionAccess& lock) {
+    return thread_local_.postpone_interrupts_nesting_ > 0;
+  }
+
+  // You should hold the ExecutionAccess lock when calling this method.
+  static void set_interrupt_limits(const ExecutionAccess& lock) {
+    // Ignore attempts to interrupt when interrupts are postponed.
+    if (should_postpone_interrupts(lock)) return;
+    thread_local_.jslimit_ = kInterruptLimit;
+    thread_local_.climit_ = kInterruptLimit;
     Heap::SetStackLimits();
   }
 
diff --git a/src/factory.cc b/src/factory.cc
index 8d20749..35d3c54 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -43,9 +43,11 @@
 }
 
 
-Handle<FixedArray> Factory::NewFixedArrayWithHoles(int size) {
+Handle<FixedArray> Factory::NewFixedArrayWithHoles(int size,
+                                                   PretenureFlag pretenure) {
   ASSERT(0 <= size);
-  CALL_HEAP_FUNCTION(Heap::AllocateFixedArrayWithHoles(size), FixedArray);
+  CALL_HEAP_FUNCTION(Heap::AllocateFixedArrayWithHoles(size, pretenure),
+                     FixedArray);
 }
 
 
@@ -282,31 +284,26 @@
 }
 
 
-Handle<JSFunction> Factory::BaseNewFunctionFromBoilerplate(
-    Handle<JSFunction> boilerplate,
+Handle<JSFunction> Factory::BaseNewFunctionFromSharedFunctionInfo(
+    Handle<SharedFunctionInfo> function_info,
     Handle<Map> function_map,
     PretenureFlag pretenure) {
-  ASSERT(boilerplate->IsBoilerplate());
-  ASSERT(!boilerplate->has_initial_map());
-  ASSERT(!boilerplate->has_prototype());
-  ASSERT(boilerplate->properties() == Heap::empty_fixed_array());
-  ASSERT(boilerplate->elements() == Heap::empty_fixed_array());
   CALL_HEAP_FUNCTION(Heap::AllocateFunction(*function_map,
-                                            boilerplate->shared(),
+                                            *function_info,
                                             Heap::the_hole_value(),
                                             pretenure),
                      JSFunction);
 }
 
 
-Handle<JSFunction> Factory::NewFunctionFromBoilerplate(
-    Handle<JSFunction> boilerplate,
+Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
+    Handle<SharedFunctionInfo> function_info,
     Handle<Context> context,
     PretenureFlag pretenure) {
-  Handle<JSFunction> result = BaseNewFunctionFromBoilerplate(
-      boilerplate, Top::function_map(), pretenure);
+  Handle<JSFunction> result = BaseNewFunctionFromSharedFunctionInfo(
+      function_info, Top::function_map(), pretenure);
   result->set_context(*context);
-  int number_of_literals = boilerplate->NumberOfLiterals();
+  int number_of_literals = function_info->num_literals();
   Handle<FixedArray> literals =
       Factory::NewFixedArray(number_of_literals, pretenure);
   if (number_of_literals > 0) {
@@ -317,7 +314,6 @@
                   context->global_context());
   }
   result->set_literals(*literals);
-  ASSERT(!result->IsBoilerplate());
   return result;
 }
 
@@ -490,36 +486,6 @@
 }
 
 
-Handle<JSFunction> Factory::NewFunctionBoilerplate(Handle<String> name,
-                                                   int number_of_literals,
-                                                   Handle<Code> code) {
-  Handle<JSFunction> function = NewFunctionBoilerplate(name);
-  function->set_code(*code);
-  int literals_array_size = number_of_literals;
-  // If the function contains object, regexp or array literals,
-  // allocate extra space for a literals array prefix containing the
-  // object, regexp and array constructor functions.
-  if (number_of_literals > 0) {
-    literals_array_size += JSFunction::kLiteralsPrefixSize;
-  }
-  Handle<FixedArray> literals =
-      Factory::NewFixedArray(literals_array_size, TENURED);
-  function->set_literals(*literals);
-  ASSERT(!function->has_initial_map());
-  ASSERT(!function->has_prototype());
-  return function;
-}
-
-
-Handle<JSFunction> Factory::NewFunctionBoilerplate(Handle<String> name) {
-  Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name);
-  CALL_HEAP_FUNCTION(Heap::AllocateFunction(Heap::boilerplate_function_map(),
-                                            *shared,
-                                            Heap::the_hole_value()),
-                     JSFunction);
-}
-
-
 Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
                                                      InstanceType type,
                                                      int instance_size,
@@ -547,6 +513,16 @@
 }
 
 
+Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name,
+                                                        Handle<Code> code) {
+  Handle<JSFunction> function = NewFunctionWithoutPrototype(name);
+  function->set_code(*code);
+  ASSERT(!function->has_initial_map());
+  ASSERT(!function->has_prototype());
+  return function;
+}
+
+
 Handle<Code> Factory::NewCode(const CodeDesc& desc,
                               ZoneScopeInfo* sinfo,
                               Code::Flags flags,
@@ -560,6 +536,11 @@
 }
 
 
+Handle<Code> Factory::CopyCode(Handle<Code> code, Vector<byte> reloc_info) {
+  CALL_HEAP_FUNCTION(Heap::CopyCode(*code, reloc_info), Code);
+}
+
+
 static inline Object* DoCopyInsert(DescriptorArray* array,
                                    String* key,
                                    Object* value,
@@ -681,6 +662,22 @@
 }
 
 
+Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
+    Handle<String> name, int number_of_literals, Handle<Code> code) {
+  Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name);
+  shared->set_code(*code);
+  int literals_array_size = number_of_literals;
+  // If the function contains object, regexp or array literals,
+  // allocate extra space for a literals array prefix containing the
+  // context.
+  if (number_of_literals > 0) {
+    literals_array_size += JSFunction::kLiteralsPrefixSize;
+  }
+  shared->set_num_literals(literals_array_size);
+  return shared;
+}
+
+
 Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(Handle<String> name) {
   CALL_HEAP_FUNCTION(Heap::AllocateSharedFunctionInfo(*name),
                      SharedFunctionInfo);
@@ -718,6 +715,24 @@
 }
 
 
+Handle<JSFunction> Factory::NewFunctionWithoutPrototypeHelper(
+    Handle<String> name) {
+  Handle<SharedFunctionInfo> function_share = NewSharedFunctionInfo(name);
+  CALL_HEAP_FUNCTION(Heap::AllocateFunction(
+                         *Top::function_without_prototype_map(),
+                         *function_share,
+                         *the_hole_value()),
+                     JSFunction);
+}
+
+
+Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name) {
+  Handle<JSFunction> fun = NewFunctionWithoutPrototypeHelper(name);
+  fun->set_context(Top::context()->global_context());
+  return fun;
+}
+
+
 Handle<Object> Factory::ToObject(Handle<Object> object) {
   CALL_HEAP_FUNCTION(object->ToObject(), Object);
 }
@@ -866,6 +881,7 @@
     map->set_instance_descriptors(*array);
   }
 
+  ASSERT(result->shared()->IsApiFunction());
   return result;
 }
 
diff --git a/src/factory.h b/src/factory.h
index 2a347cd..8a190fa 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -30,11 +30,12 @@
 
 #include "globals.h"
 #include "heap.h"
-#include "zone-inl.h"
 
 namespace v8 {
 namespace internal {
 
+// Forward declarations.
+class ZoneScopeInfo;
 
 // Interface for handle based allocation.
 
@@ -46,7 +47,9 @@
       PretenureFlag pretenure = NOT_TENURED);
 
   // Allocate a new fixed array with non-existing entries (the hole).
-  static Handle<FixedArray> NewFixedArrayWithHoles(int size);
+  static Handle<FixedArray> NewFixedArrayWithHoles(
+      int size,
+      PretenureFlag pretenure = NOT_TENURED);
 
   static Handle<NumberDictionary> NewNumberDictionary(int at_least_space_for);
 
@@ -215,10 +218,17 @@
   static Handle<JSFunction> NewFunction(Handle<String> name,
                                         Handle<Object> prototype);
 
+  static Handle<JSFunction> NewFunctionWithoutPrototype(Handle<String> name);
+
   static Handle<JSFunction> NewFunction(Handle<Object> super, bool is_global);
 
-  static Handle<JSFunction> NewFunctionFromBoilerplate(
-      Handle<JSFunction> boilerplate,
+  static Handle<JSFunction> BaseNewFunctionFromSharedFunctionInfo(
+      Handle<SharedFunctionInfo> function_info,
+      Handle<Map> function_map,
+      PretenureFlag pretenure);
+
+  static Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
+      Handle<SharedFunctionInfo> function_info,
       Handle<Context> context,
       PretenureFlag pretenure = TENURED);
 
@@ -229,6 +239,8 @@
 
   static Handle<Code> CopyCode(Handle<Code> code);
 
+  static Handle<Code> CopyCode(Handle<Code> code, Vector<byte> reloc_info);
+
   static Handle<Object> ToObject(Handle<Object> object);
   static Handle<Object> ToObject(Handle<Object> object,
                                  Handle<Context> global_context);
@@ -270,12 +282,6 @@
                                         Handle<Code> code,
                                         bool force_initial_map);
 
-  static Handle<JSFunction> NewFunctionBoilerplate(Handle<String> name,
-                                                   int number_of_literals,
-                                                   Handle<Code> code);
-
-  static Handle<JSFunction> NewFunctionBoilerplate(Handle<String> name);
-
   static Handle<JSFunction> NewFunction(Handle<Map> function_map,
       Handle<SharedFunctionInfo> shared, Handle<Object> prototype);
 
@@ -287,6 +293,9 @@
                                                      Handle<Code> code,
                                                      bool force_initial_map);
 
+  static Handle<JSFunction> NewFunctionWithoutPrototype(Handle<String> name,
+                                                        Handle<Code> code);
+
   static Handle<DescriptorArray> CopyAppendProxyDescriptor(
       Handle<DescriptorArray> array,
       Handle<String> key,
@@ -316,7 +325,7 @@
 
 #define ROOT_ACCESSOR(type, name, camel_name)                                  \
   static inline Handle<type> name() {                                          \
-    return Handle<type>(bit_cast<type**, Object**>(                            \
+    return Handle<type>(BitCast<type**, Object**>(                             \
         &Heap::roots_[Heap::k##camel_name##RootIndex]));                       \
   }
   ROOT_LIST(ROOT_ACCESSOR)
@@ -324,7 +333,7 @@
 
 #define SYMBOL_ACCESSOR(name, str) \
   static inline Handle<String> name() {                                        \
-    return Handle<String>(bit_cast<String**, Object**>(                        \
+    return Handle<String>(BitCast<String**, Object**>(                         \
         &Heap::roots_[Heap::k##name##RootIndex]));                             \
   }
   SYMBOL_LIST(SYMBOL_ACCESSOR)
@@ -334,6 +343,8 @@
     return Handle<String>(&Heap::hidden_symbol_);
   }
 
+  static Handle<SharedFunctionInfo> NewSharedFunctionInfo(
+      Handle<String> name, int number_of_literals, Handle<Code> code);
   static Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name);
 
   static Handle<NumberDictionary> DictionaryAtNumberPut(
@@ -370,15 +381,13 @@
   static Handle<JSFunction> NewFunctionHelper(Handle<String> name,
                                               Handle<Object> prototype);
 
+  static Handle<JSFunction> NewFunctionWithoutPrototypeHelper(
+      Handle<String> name);
+
   static Handle<DescriptorArray> CopyAppendCallbackDescriptors(
       Handle<DescriptorArray> array,
       Handle<Object> descriptors);
 
-  static Handle<JSFunction> BaseNewFunctionFromBoilerplate(
-      Handle<JSFunction> boilerplate,
-      Handle<Map> function_map,
-      PretenureFlag pretenure);
-
   // Create a new map cache.
   static Handle<MapCache> NewMapCache(int at_least_space_for);
 
diff --git a/src/fast-codegen.cc b/src/fast-codegen.cc
index 602d6b8..832cf74 100644
--- a/src/fast-codegen.cc
+++ b/src/fast-codegen.cc
@@ -195,9 +195,9 @@
 }
 
 
-void FastCodeGenSyntaxChecker::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* expr) {
-  BAILOUT("FunctionBoilerplateLiteral");
+void FastCodeGenSyntaxChecker::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
+  BAILOUT("SharedFunctionInfoLiteral");
 }
 
 
@@ -436,9 +436,6 @@
   AstLabeler labeler;
   labeler.Label(info);
 
-  LivenessAnalyzer analyzer;
-  analyzer.Analyze(info->function());
-
   CodeGenerator::MakeCodePrologue(info);
 
   const int kInitialBufferSize = 4 * KB;
@@ -563,8 +560,8 @@
 }
 
 
-void FastCodeGenerator::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* expr) {
+void FastCodeGenerator::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
   UNREACHABLE();
 }
 
@@ -598,8 +595,8 @@
     Comment cmnt(masm(), ";; Global");
     if (FLAG_print_ir) {
       SmartPointer<char> name = expr->name()->ToCString();
-      PrintF("%d: t%d = Global(%s)  // last_use = %d\n", expr->num(),
-             expr->num(), *name, expr->var_def()->last_use()->num());
+      PrintF("%d: t%d = Global(%s)\n", expr->num(),
+             expr->num(), *name);
     }
     EmitGlobalVariableLoad(cell);
   }
@@ -653,9 +650,8 @@
     SmartPointer<char> name_string = name->ToCString();
     PrintF("%d: ", expr->num());
     if (!destination().is(no_reg)) PrintF("t%d = ", expr->num());
-    PrintF("Store(this, \"%s\", t%d)  // last_use(this) = %d\n", *name_string,
-           expr->value()->num(),
-           expr->var_def()->last_use()->num());
+    PrintF("Store(this, \"%s\", t%d)\n", *name_string,
+           expr->value()->num());
   }
 
   EmitThisPropertyStore(name);
@@ -678,9 +674,8 @@
     Comment cmnt(masm(), ";; Load from this");
     if (FLAG_print_ir) {
       SmartPointer<char> name_string = name->ToCString();
-      PrintF("%d: t%d = Load(this, \"%s\")  // last_use(this) = %d\n",
-             expr->num(), expr->num(), *name_string,
-             expr->var_def()->last_use()->num());
+      PrintF("%d: t%d = Load(this, \"%s\")\n",
+             expr->num(), expr->num(), *name_string);
     }
     EmitThisPropertyLoad(name);
   }
diff --git a/src/fast-codegen.h b/src/fast-codegen.h
index e96daf6..a0282bb 100644
--- a/src/fast-codegen.h
+++ b/src/fast-codegen.h
@@ -93,6 +93,7 @@
   Register accumulator1();
   Register scratch0();
   Register scratch1();
+  Register scratch2();
   Register receiver_reg();
   Register context_reg();
 
diff --git a/src/fast-dtoa.cc b/src/fast-dtoa.cc
new file mode 100644
index 0000000..4c0d15d
--- /dev/null
+++ b/src/fast-dtoa.cc
@@ -0,0 +1,512 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "fast-dtoa.h"
+
+#include "cached-powers.h"
+#include "diy-fp.h"
+#include "double.h"
+
+namespace v8 {
+namespace internal {
+
+// The minimal and maximal target exponent define the range of w's binary
+// exponent, where 'w' is the result of multiplying the input by a cached power
+// of ten.
+//
+// A different range might be chosen on a different platform, to optimize digit
+// generation, but a smaller range requires more powers of ten to be cached.
+static const int minimal_target_exponent = -60;
+static const int maximal_target_exponent = -32;
+
+
+// Adjusts the last digit of the generated number, and screens out generated
+// solutions that may be inaccurate. A solution may be inaccurate if it is
+// outside the safe interval, or if we ctannot prove that it is closer to the
+// input than a neighboring representation of the same length.
+//
+// Input: * buffer containing the digits of too_high / 10^kappa
+//        * the buffer's length
+//        * distance_too_high_w == (too_high - w).f() * unit
+//        * unsafe_interval == (too_high - too_low).f() * unit
+//        * rest = (too_high - buffer * 10^kappa).f() * unit
+//        * ten_kappa = 10^kappa * unit
+//        * unit = the common multiplier
+// Output: returns true if the buffer is guaranteed to contain the closest
+//    representable number to the input.
+//  Modifies the generated digits in the buffer to approach (round towards) w.
+bool RoundWeed(Vector<char> buffer,
+               int length,
+               uint64_t distance_too_high_w,
+               uint64_t unsafe_interval,
+               uint64_t rest,
+               uint64_t ten_kappa,
+               uint64_t unit) {
+  uint64_t small_distance = distance_too_high_w - unit;
+  uint64_t big_distance = distance_too_high_w + unit;
+  // Let w_low  = too_high - big_distance, and
+  //     w_high = too_high - small_distance.
+  // Note: w_low < w < w_high
+  //
+  // The real w (* unit) must lie somewhere inside the interval
+  // ]w_low; w_low[ (often written as "(w_low; w_low)")
+
+  // Basically the buffer currently contains a number in the unsafe interval
+  // ]too_low; too_high[ with too_low < w < too_high
+  //
+  //  too_high - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+  //                     ^v 1 unit            ^      ^                 ^      ^
+  //  boundary_high ---------------------     .      .                 .      .
+  //                     ^v 1 unit            .      .                 .      .
+  //   - - - - - - - - - - - - - - - - - - -  +  - - + - - - - - -     .      .
+  //                                          .      .         ^       .      .
+  //                                          .  big_distance  .       .      .
+  //                                          .      .         .       .    rest
+  //                              small_distance     .         .       .      .
+  //                                          v      .         .       .      .
+  //  w_high - - - - - - - - - - - - - - - - - -     .         .       .      .
+  //                     ^v 1 unit                   .         .       .      .
+  //  w ----------------------------------------     .         .       .      .
+  //                     ^v 1 unit                   v         .       .      .
+  //  w_low  - - - - - - - - - - - - - - - - - - - - -         .       .      .
+  //                                                           .       .      v
+  //  buffer --------------------------------------------------+-------+--------
+  //                                                           .       .
+  //                                                  safe_interval    .
+  //                                                           v       .
+  //   - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -     .
+  //                     ^v 1 unit                                     .
+  //  boundary_low -------------------------                     unsafe_interval
+  //                     ^v 1 unit                                     v
+  //  too_low  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+  //
+  //
+  // Note that the value of buffer could lie anywhere inside the range too_low
+  // to too_high.
+  //
+  // boundary_low, boundary_high and w are approximations of the real boundaries
+  // and v (the input number). They are guaranteed to be precise up to one unit.
+  // In fact the error is guaranteed to be strictly less than one unit.
+  //
+  // Anything that lies outside the unsafe interval is guaranteed not to round
+  // to v when read again.
+  // Anything that lies inside the safe interval is guaranteed to round to v
+  // when read again.
+  // If the number inside the buffer lies inside the unsafe interval but not
+  // inside the safe interval then we simply do not know and bail out (returning
+  // false).
+  //
+  // Similarly we have to take into account the imprecision of 'w' when rounding
+  // the buffer. If we have two potential representations we need to make sure
+  // that the chosen one is closer to w_low and w_high since v can be anywhere
+  // between them.
+  //
+  // By generating the digits of too_high we got the largest (closest to
+  // too_high) buffer that is still in the unsafe interval. In the case where
+  // w_high < buffer < too_high we try to decrement the buffer.
+  // This way the buffer approaches (rounds towards) w.
+  // There are 3 conditions that stop the decrementation process:
+  //   1) the buffer is already below w_high
+  //   2) decrementing the buffer would make it leave the unsafe interval
+  //   3) decrementing the buffer would yield a number below w_high and farther
+  //      away than the current number. In other words:
+  //              (buffer{-1} < w_high) && w_high - buffer{-1} > buffer - w_high
+  // Instead of using the buffer directly we use its distance to too_high.
+  // Conceptually rest ~= too_high - buffer
+  while (rest < small_distance &&  // Negated condition 1
+         unsafe_interval - rest >= ten_kappa &&  // Negated condition 2
+         (rest + ten_kappa < small_distance ||  // buffer{-1} > w_high
+          small_distance - rest >= rest + ten_kappa - small_distance)) {
+    buffer[length - 1]--;
+    rest += ten_kappa;
+  }
+
+  // We have approached w+ as much as possible. We now test if approaching w-
+  // would require changing the buffer. If yes, then we have two possible
+  // representations close to w, but we cannot decide which one is closer.
+  if (rest < big_distance &&
+      unsafe_interval - rest >= ten_kappa &&
+      (rest + ten_kappa < big_distance ||
+       big_distance - rest > rest + ten_kappa - big_distance)) {
+    return false;
+  }
+
+  // Weeding test.
+  //   The safe interval is [too_low + 2 ulp; too_high - 2 ulp]
+  //   Since too_low = too_high - unsafe_interval this is equivalent to
+  //      [too_high - unsafe_interval + 4 ulp; too_high - 2 ulp]
+  //   Conceptually we have: rest ~= too_high - buffer
+  return (2 * unit <= rest) && (rest <= unsafe_interval - 4 * unit);
+}
+
+
+
+static const uint32_t kTen4 = 10000;
+static const uint32_t kTen5 = 100000;
+static const uint32_t kTen6 = 1000000;
+static const uint32_t kTen7 = 10000000;
+static const uint32_t kTen8 = 100000000;
+static const uint32_t kTen9 = 1000000000;
+
+// Returns the biggest power of ten that is less than or equal than the given
+// number. We furthermore receive the maximum number of bits 'number' has.
+// If number_bits == 0 then 0^-1 is returned
+// The number of bits must be <= 32.
+// Precondition: (1 << number_bits) <= number < (1 << (number_bits + 1)).
+static void BiggestPowerTen(uint32_t number,
+                            int number_bits,
+                            uint32_t* power,
+                            int* exponent) {
+  switch (number_bits) {
+    case 32:
+    case 31:
+    case 30:
+      if (kTen9 <= number) {
+        *power = kTen9;
+        *exponent = 9;
+        break;
+      }  // else fallthrough
+    case 29:
+    case 28:
+    case 27:
+      if (kTen8 <= number) {
+        *power = kTen8;
+        *exponent = 8;
+        break;
+      }  // else fallthrough
+    case 26:
+    case 25:
+    case 24:
+      if (kTen7 <= number) {
+        *power = kTen7;
+        *exponent = 7;
+        break;
+      }  // else fallthrough
+    case 23:
+    case 22:
+    case 21:
+    case 20:
+      if (kTen6 <= number) {
+        *power = kTen6;
+        *exponent = 6;
+        break;
+      }  // else fallthrough
+    case 19:
+    case 18:
+    case 17:
+      if (kTen5 <= number) {
+        *power = kTen5;
+        *exponent = 5;
+        break;
+      }  // else fallthrough
+    case 16:
+    case 15:
+    case 14:
+      if (kTen4 <= number) {
+        *power = kTen4;
+        *exponent = 4;
+        break;
+      }  // else fallthrough
+    case 13:
+    case 12:
+    case 11:
+    case 10:
+      if (1000 <= number) {
+        *power = 1000;
+        *exponent = 3;
+        break;
+      }  // else fallthrough
+    case 9:
+    case 8:
+    case 7:
+      if (100 <= number) {
+        *power = 100;
+        *exponent = 2;
+        break;
+      }  // else fallthrough
+    case 6:
+    case 5:
+    case 4:
+      if (10 <= number) {
+        *power = 10;
+        *exponent = 1;
+        break;
+      }  // else fallthrough
+    case 3:
+    case 2:
+    case 1:
+      if (1 <= number) {
+        *power = 1;
+        *exponent = 0;
+        break;
+      }  // else fallthrough
+    case 0:
+      *power = 0;
+      *exponent = -1;
+      break;
+    default:
+      // Following assignments are here to silence compiler warnings.
+      *power = 0;
+      *exponent = 0;
+      UNREACHABLE();
+  }
+}
+
+
+// Generates the digits of input number w.
+// w is a floating-point number (DiyFp), consisting of a significand and an
+// exponent. Its exponent is bounded by minimal_target_exponent and
+// maximal_target_exponent.
+//       Hence -60 <= w.e() <= -32.
+//
+// Returns false if it fails, in which case the generated digits in the buffer
+// should not be used.
+// Preconditions:
+//  * low, w and high are correct up to 1 ulp (unit in the last place). That
+//    is, their error must be less that a unit of their last digits.
+//  * low.e() == w.e() == high.e()
+//  * low < w < high, and taking into account their error: low~ <= high~
+//  * minimal_target_exponent <= w.e() <= maximal_target_exponent
+// Postconditions: returns false if procedure fails.
+//   otherwise:
+//     * buffer is not null-terminated, but len contains the number of digits.
+//     * buffer contains the shortest possible decimal digit-sequence
+//       such that LOW < buffer * 10^kappa < HIGH, where LOW and HIGH are the
+//       correct values of low and high (without their error).
+//     * if more than one decimal representation gives the minimal number of
+//       decimal digits then the one closest to W (where W is the correct value
+//       of w) is chosen.
+// Remark: this procedure takes into account the imprecision of its input
+//   numbers. If the precision is not enough to guarantee all the postconditions
+//   then false is returned. This usually happens rarely (~0.5%).
+//
+// Say, for the sake of example, that
+//   w.e() == -48, and w.f() == 0x1234567890abcdef
+// w's value can be computed by w.f() * 2^w.e()
+// We can obtain w's integral digits by simply shifting w.f() by -w.e().
+//  -> w's integral part is 0x1234
+//  w's fractional part is therefore 0x567890abcdef.
+// Printing w's integral part is easy (simply print 0x1234 in decimal).
+// In order to print its fraction we repeatedly multiply the fraction by 10 and
+// get each digit. Example the first digit after the comma would be computed by
+//   (0x567890abcdef * 10) >> 48. -> 3
+// The whole thing becomes slightly more complicated because we want to stop
+// once we have enough digits. That is, once the digits inside the buffer
+// represent 'w' we can stop. Everything inside the interval low - high
+// represents w. However we have to pay attention to low, high and w's
+// imprecision.
+bool DigitGen(DiyFp low,
+              DiyFp w,
+              DiyFp high,
+              Vector<char> buffer,
+              int* length,
+              int* kappa) {
+  ASSERT(low.e() == w.e() && w.e() == high.e());
+  ASSERT(low.f() + 1 <= high.f() - 1);
+  ASSERT(minimal_target_exponent <= w.e() && w.e() <= maximal_target_exponent);
+  // low, w and high are imprecise, but by less than one ulp (unit in the last
+  // place).
+  // If we remove (resp. add) 1 ulp from low (resp. high) we are certain that
+  // the new numbers are outside of the interval we want the final
+  // representation to lie in.
+  // Inversely adding (resp. removing) 1 ulp from low (resp. high) would yield
+  // numbers that are certain to lie in the interval. We will use this fact
+  // later on.
+  // We will now start by generating the digits within the uncertain
+  // interval. Later we will weed out representations that lie outside the safe
+  // interval and thus _might_ lie outside the correct interval.
+  uint64_t unit = 1;
+  DiyFp too_low = DiyFp(low.f() - unit, low.e());
+  DiyFp too_high = DiyFp(high.f() + unit, high.e());
+  // too_low and too_high are guaranteed to lie outside the interval we want the
+  // generated number in.
+  DiyFp unsafe_interval = DiyFp::Minus(too_high, too_low);
+  // We now cut the input number into two parts: the integral digits and the
+  // fractionals. We will not write any decimal separator though, but adapt
+  // kappa instead.
+  // Reminder: we are currently computing the digits (stored inside the buffer)
+  // such that:   too_low < buffer * 10^kappa < too_high
+  // We use too_high for the digit_generation and stop as soon as possible.
+  // If we stop early we effectively round down.
+  DiyFp one = DiyFp(static_cast<uint64_t>(1) << -w.e(), w.e());
+  // Division by one is a shift.
+  uint32_t integrals = static_cast<uint32_t>(too_high.f() >> -one.e());
+  // Modulo by one is an and.
+  uint64_t fractionals = too_high.f() & (one.f() - 1);
+  uint32_t divider;
+  int divider_exponent;
+  BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
+                  &divider, &divider_exponent);
+  *kappa = divider_exponent + 1;
+  *length = 0;
+  // Loop invariant: buffer = too_high / 10^kappa  (integer division)
+  // The invariant holds for the first iteration: kappa has been initialized
+  // with the divider exponent + 1. And the divider is the biggest power of ten
+  // that is smaller than integrals.
+  while (*kappa > 0) {
+    int digit = integrals / divider;
+    buffer[*length] = '0' + digit;
+    (*length)++;
+    integrals %= divider;
+    (*kappa)--;
+    // Note that kappa now equals the exponent of the divider and that the
+    // invariant thus holds again.
+    uint64_t rest =
+        (static_cast<uint64_t>(integrals) << -one.e()) + fractionals;
+    // Invariant: too_high = buffer * 10^kappa + DiyFp(rest, one.e())
+    // Reminder: unsafe_interval.e() == one.e()
+    if (rest < unsafe_interval.f()) {
+      // Rounding down (by not emitting the remaining digits) yields a number
+      // that lies within the unsafe interval.
+      return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f(),
+                       unsafe_interval.f(), rest,
+                       static_cast<uint64_t>(divider) << -one.e(), unit);
+    }
+    divider /= 10;
+  }
+
+  // The integrals have been generated. We are at the point of the decimal
+  // separator. In the following loop we simply multiply the remaining digits by
+  // 10 and divide by one. We just need to pay attention to multiply associated
+  // data (like the interval or 'unit'), too.
+  // Instead of multiplying by 10 we multiply by 5 (cheaper operation) and
+  // increase its (imaginary) exponent. At the same time we decrease the
+  // divider's (one's) exponent and shift its significand.
+  // Basically, if fractionals was a DiyFp (with fractionals.e == one.e):
+  //      fractionals.f *= 10;
+  //      fractionals.f >>= 1; fractionals.e++; // value remains unchanged.
+  //      one.f >>= 1; one.e++;                 // value remains unchanged.
+  //      and we have again fractionals.e == one.e which allows us to divide
+  //           fractionals.f() by one.f()
+  // We simply combine the *= 10 and the >>= 1.
+  while (true) {
+    fractionals *= 5;
+    unit *= 5;
+    unsafe_interval.set_f(unsafe_interval.f() * 5);
+    unsafe_interval.set_e(unsafe_interval.e() + 1);  // Will be optimized out.
+    one.set_f(one.f() >> 1);
+    one.set_e(one.e() + 1);
+    // Integer division by one.
+    int digit = static_cast<int>(fractionals >> -one.e());
+    buffer[*length] = '0' + digit;
+    (*length)++;
+    fractionals &= one.f() - 1;  // Modulo by one.
+    (*kappa)--;
+    if (fractionals < unsafe_interval.f()) {
+      return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f() * unit,
+                       unsafe_interval.f(), fractionals, one.f(), unit);
+    }
+  }
+}
+
+
+// Provides a decimal representation of v.
+// Returns true if it succeeds, otherwise the result cannot be trusted.
+// There will be *length digits inside the buffer (not null-terminated).
+// If the function returns true then
+//        v == (double) (buffer * 10^decimal_exponent).
+// The digits in the buffer are the shortest representation possible: no
+// 0.09999999999999999 instead of 0.1. The shorter representation will even be
+// chosen even if the longer one would be closer to v.
+// The last digit will be closest to the actual v. That is, even if several
+// digits might correctly yield 'v' when read again, the closest will be
+// computed.
+bool grisu3(double v, Vector<char> buffer, int* length, int* decimal_exponent) {
+  DiyFp w = Double(v).AsNormalizedDiyFp();
+  // boundary_minus and boundary_plus are the boundaries between v and its
+  // closest floating-point neighbors. Any number strictly between
+  // boundary_minus and boundary_plus will round to v when convert to a double.
+  // Grisu3 will never output representations that lie exactly on a boundary.
+  DiyFp boundary_minus, boundary_plus;
+  Double(v).NormalizedBoundaries(&boundary_minus, &boundary_plus);
+  ASSERT(boundary_plus.e() == w.e());
+  DiyFp ten_mk;  // Cached power of ten: 10^-k
+  int mk;        // -k
+  GetCachedPower(w.e() + DiyFp::kSignificandSize, minimal_target_exponent,
+                 maximal_target_exponent, &mk, &ten_mk);
+  ASSERT(minimal_target_exponent <= w.e() + ten_mk.e() +
+         DiyFp::kSignificandSize &&
+         maximal_target_exponent >= w.e() + ten_mk.e() +
+         DiyFp::kSignificandSize);
+  // Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
+  // 64 bit significand and ten_mk is thus only precise up to 64 bits.
+
+  // The DiyFp::Times procedure rounds its result, and ten_mk is approximated
+  // too. The variable scaled_w (as well as scaled_boundary_minus/plus) are now
+  // off by a small amount.
+  // In fact: scaled_w - w*10^k < 1ulp (unit in the last place) of scaled_w.
+  // In other words: let f = scaled_w.f() and e = scaled_w.e(), then
+  //           (f-1) * 2^e < w*10^k < (f+1) * 2^e
+  DiyFp scaled_w = DiyFp::Times(w, ten_mk);
+  ASSERT(scaled_w.e() ==
+         boundary_plus.e() + ten_mk.e() + DiyFp::kSignificandSize);
+  // In theory it would be possible to avoid some recomputations by computing
+  // the difference between w and boundary_minus/plus (a power of 2) and to
+  // compute scaled_boundary_minus/plus by subtracting/adding from
+  // scaled_w. However the code becomes much less readable and the speed
+  // enhancements are not terriffic.
+  DiyFp scaled_boundary_minus = DiyFp::Times(boundary_minus, ten_mk);
+  DiyFp scaled_boundary_plus  = DiyFp::Times(boundary_plus,  ten_mk);
+
+  // DigitGen will generate the digits of scaled_w. Therefore we have
+  // v == (double) (scaled_w * 10^-mk).
+  // Set decimal_exponent == -mk and pass it to DigitGen. If scaled_w is not an
+  // integer than it will be updated. For instance if scaled_w == 1.23 then
+  // the buffer will be filled with "123" und the decimal_exponent will be
+  // decreased by 2.
+  int kappa;
+  bool result = DigitGen(scaled_boundary_minus, scaled_w, scaled_boundary_plus,
+                         buffer, length, &kappa);
+  *decimal_exponent = -mk + kappa;
+  return result;
+}
+
+
+bool FastDtoa(double v,
+              Vector<char> buffer,
+              int* sign,
+              int* length,
+              int* point) {
+  ASSERT(v != 0);
+  ASSERT(!Double(v).IsSpecial());
+
+  if (v < 0) {
+    v = -v;
+    *sign = 1;
+  } else {
+    *sign = 0;
+  }
+  int decimal_exponent;
+  bool result = grisu3(v, buffer, length, &decimal_exponent);
+  *point = *length + decimal_exponent;
+  buffer[*length] = '\0';
+  return result;
+}
+
+} }  // namespace v8::internal
diff --git a/src/fast-dtoa.h b/src/fast-dtoa.h
new file mode 100644
index 0000000..9f1f76a
--- /dev/null
+++ b/src/fast-dtoa.h
@@ -0,0 +1,59 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_FAST_DTOA_H_
+#define V8_FAST_DTOA_H_
+
+namespace v8 {
+namespace internal {
+
+// FastDtoa will produce at most kFastDtoaMaximalLength digits. This does not
+// include the terminating '\0' character.
+static const int kFastDtoaMaximalLength = 17;
+
+// Provides a decimal representation of v.
+// v must not be (positive or negative) zero and it must not be Infinity or NaN.
+// Returns true if it succeeds, otherwise the result can not be trusted.
+// There will be *length digits inside the buffer followed by a null terminator.
+// If the function returns true then
+//   v == (double) (buffer * 10^(point - length)).
+// The digits in the buffer are the shortest representation possible: no
+// 0.099999999999 instead of 0.1.
+// The last digit will be closest to the actual v. That is, even if several
+// digits might correctly yield 'v' when read again, the buffer will contain the
+// one closest to v.
+// The variable 'sign' will be '0' if the given number is positive, and '1'
+//   otherwise.
+bool FastDtoa(double d,
+              Vector<char> buffer,
+              int* sign,
+              int* length,
+              int* point);
+
+} }  // namespace v8::internal
+
+#endif  // V8_FAST_DTOA_H_
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index dbb9ce7..490a2c5 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -124,6 +124,7 @@
 DEFINE_string(expose_debug_as, NULL, "expose debug in global object")
 DEFINE_bool(expose_gc, false, "expose gc extension")
 DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture")
+DEFINE_bool(disable_native_files, false, "disable builtin natives files")
 
 // builtins-ia32.cc
 DEFINE_bool(inline_new, true, "use fast inline allocation")
@@ -152,16 +153,23 @@
             "try to use the speculative optimizing backend for all code")
 DEFINE_bool(trace_bailout, false,
             "print reasons for falling back to using the classic V8 backend")
+DEFINE_bool(safe_int32_compiler, true,
+            "enable optimized side-effect-free int32 expressions.")
+DEFINE_bool(use_flow_graph, false, "perform flow-graph based optimizations")
 
 // compilation-cache.cc
 DEFINE_bool(compilation_cache, true, "enable compilation cache")
 
+// data-flow.cc
+DEFINE_bool(loop_peeling, false, "Peel off the first iteration of loops.")
+
 // debug.cc
 DEFINE_bool(remote_debugging, false, "enable remote debugging")
 DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
 DEFINE_bool(debugger_auto_break, true,
             "automatically set the debug break flag when debugger commands are "
             "in the queue")
+DEFINE_bool(enable_liveedit, true, "enable liveedit experimental feature")
 
 // frames.cc
 DEFINE_int(max_stack_trace_source_length, 300,
@@ -197,6 +205,9 @@
             "Flush inline caches prior to mark compact collection.")
 DEFINE_bool(cleanup_caches_in_maps_at_gc, true,
             "Flush code caches in maps during mark compact cycle.")
+DEFINE_int(random_seed, 0,
+           "Default seed for initializing random generator "
+           "(0, the default, means to use system random).")
 
 DEFINE_bool(canonicalize_object_literal_maps, true,
             "Canonicalize maps for object literals.")
@@ -220,8 +231,11 @@
 DEFINE_bool(optimize_ast, true, "optimize the ast")
 
 // simulator-arm.cc and simulator-mips.cc
-DEFINE_bool(trace_sim, false, "trace simulator execution")
+DEFINE_bool(trace_sim, false, "Trace simulator execution")
+DEFINE_bool(check_icache, false, "Check icache flushes in ARM simulator")
 DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
+DEFINE_int(sim_stack_alignment, 8,
+           "Stack alingment in bytes in simulator (4 or 8, 8 is default)")
 
 // top.cc
 DEFINE_bool(trace_exception, false,
@@ -229,9 +243,6 @@
 DEFINE_bool(preallocate_message_memory, false,
             "preallocate some memory to build stack traces.")
 
-// usage-analyzer.cc
-DEFINE_bool(usage_computation, true, "compute variable usage counts")
-
 // v8.cc
 DEFINE_bool(preemption, false,
             "activate a 100ms timer that switches between V8 threads")
@@ -303,6 +314,8 @@
 DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
 DEFINE_bool(print_scopes, false, "print scopes")
 DEFINE_bool(print_ir, false, "print the AST as seen by the backend")
+DEFINE_bool(print_graph_text, false,
+            "print a text representation of the flow graph")
 
 // contexts.cc
 DEFINE_bool(trace_contexts, false, "trace contexts operations")
@@ -381,6 +394,8 @@
 DEFINE_bool(prof_lazy, false,
             "Used with --prof, only does sampling and logging"
             " when profiler is active (implies --noprof_auto).")
+DEFINE_bool(prof_browser_mode, true,
+            "Used with --prof, turns on browser-compatible mode for profiling.")
 DEFINE_bool(log_regexp, false, "Log regular expression execution.")
 DEFINE_bool(sliding_state_window, false,
             "Update sliding state window counters.")
diff --git a/src/flow-graph.cc b/src/flow-graph.cc
new file mode 100644
index 0000000..02a2cd9
--- /dev/null
+++ b/src/flow-graph.cc
@@ -0,0 +1,763 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "flow-graph.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+void BasicBlock::BuildTraversalOrder(ZoneList<BasicBlock*>* preorder,
+                                     ZoneList<BasicBlock*>* postorder,
+                                     bool mark) {
+  if (mark_ == mark) return;
+  mark_ = mark;
+  preorder->Add(this);
+  if (right_successor_ != NULL) {
+    right_successor_->BuildTraversalOrder(preorder, postorder, mark);
+  }
+  if (left_successor_ != NULL) {
+    left_successor_->BuildTraversalOrder(preorder, postorder, mark);
+  }
+  postorder->Add(this);
+}
+
+
+FlowGraph* FlowGraphBuilder::Build(FunctionLiteral* lit) {
+  // Create new entry and exit nodes.  These will not change during
+  // construction.
+  entry_ = new BasicBlock(NULL);
+  exit_ = new BasicBlock(NULL);
+  // Begin accumulating instructions in the entry block.
+  current_ = entry_;
+
+  VisitDeclarations(lit->scope()->declarations());
+  VisitStatements(lit->body());
+  // In the event of stack overflow or failure to handle a syntactic
+  // construct, return an invalid flow graph.
+  if (HasStackOverflow()) return new FlowGraph(NULL, NULL);
+
+  // If current is not the exit, add a link to the exit.
+  if (current_ != exit_) {
+    // If current already has a successor (i.e., will be a branch node) and
+    // if the exit already has a predecessor, insert an empty block to
+    // maintain edge split form.
+    if (current_->HasSuccessor() && exit_->HasPredecessor()) {
+      current_ = new BasicBlock(current_);
+    }
+    Literal* undefined = new Literal(Factory::undefined_value());
+    current_->AddInstruction(new ReturnStatement(undefined));
+    exit_->AddPredecessor(current_);
+  }
+
+  FlowGraph* graph = new FlowGraph(entry_, exit_);
+  bool mark = !entry_->GetMark();
+  entry_->BuildTraversalOrder(graph->preorder(), graph->postorder(), mark);
+
+#ifdef DEBUG
+  // Number the nodes in reverse postorder.
+  int n = 0;
+  for (int i = graph->postorder()->length() - 1; i >= 0; --i) {
+    graph->postorder()->at(i)->set_number(n++);
+  }
+#endif
+
+  return graph;
+}
+
+
+void FlowGraphBuilder::VisitDeclaration(Declaration* decl) {
+  Variable* var = decl->proxy()->AsVariable();
+  Slot* slot = var->slot();
+  // We allow only declarations that do not require code generation.
+  // The following all require code generation: global variables and
+  // functions, variables with slot type LOOKUP, declarations with
+  // mode CONST, and functions.
+
+  if (var->is_global() ||
+      (slot != NULL && slot->type() == Slot::LOOKUP) ||
+      decl->mode() == Variable::CONST ||
+      decl->fun() != NULL) {
+    // Here and in the rest of the flow graph builder we indicate an
+    // unsupported syntactic construct by setting the stack overflow
+    // flag on the visitor.  This causes bailout of the visitor.
+    SetStackOverflow();
+  }
+}
+
+
+void FlowGraphBuilder::VisitBlock(Block* stmt) {
+  VisitStatements(stmt->statements());
+}
+
+
+void FlowGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
+  Visit(stmt->expression());
+}
+
+
+void FlowGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
+  // Nothing to do.
+}
+
+
+void FlowGraphBuilder::VisitIfStatement(IfStatement* stmt) {
+  // Build a diamond in the flow graph.  First accumulate the instructions
+  // of the test in the current basic block.
+  Visit(stmt->condition());
+
+  // Remember the branch node and accumulate the true branch as its left
+  // successor.  This relies on the successors being added left to right.
+  BasicBlock* branch = current_;
+  current_ = new BasicBlock(branch);
+  Visit(stmt->then_statement());
+
+  // Construct a join node and then accumulate the false branch in a fresh
+  // successor of the branch node.
+  BasicBlock* join = new BasicBlock(current_);
+  current_ = new BasicBlock(branch);
+  Visit(stmt->else_statement());
+  join->AddPredecessor(current_);
+
+  current_ = join;
+}
+
+
+void FlowGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitForStatement(ForStatement* stmt) {
+  // Build a loop in the flow graph.  First accumulate the instructions of
+  // the initializer in the current basic block.
+  if (stmt->init() != NULL) Visit(stmt->init());
+
+  // Create a new basic block for the test.  This will be the join node.
+  BasicBlock* join = new BasicBlock(current_);
+  current_ = join;
+  if (stmt->cond() != NULL) Visit(stmt->cond());
+
+  // The current node is the branch node.  Create a new basic block to begin
+  // the body.
+  BasicBlock* branch = current_;
+  current_ = new BasicBlock(branch);
+  Visit(stmt->body());
+  if (stmt->next() != NULL) Visit(stmt->next());
+
+  // Add the backward edge from the end of the body and continue with the
+  // false arm of the branch.
+  join->AddPredecessor(current_);
+  current_ = new BasicBlock(branch);
+}
+
+
+void FlowGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+  SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
+  SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitConditional(Conditional* expr) {
+  SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitSlot(Slot* expr) {
+  // Slots do not appear in the AST.
+  UNREACHABLE();
+}
+
+
+void FlowGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
+  current_->AddInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitLiteral(Literal* expr) {
+  current_->AddInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
+  SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
+  SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+  SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
+  SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitAssignment(Assignment* expr) {
+  // There are three basic kinds of assignment: variable assignments,
+  // property assignments, and invalid left-hand sides (which are translated
+  // to "throw ReferenceError" by the parser).
+  Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+  Property* prop = expr->target()->AsProperty();
+  ASSERT(var == NULL || prop == NULL);
+  if (var != NULL) {
+    if (expr->is_compound() && !expr->target()->IsTrivial()) {
+      Visit(expr->target());
+    }
+    if (!expr->value()->IsTrivial()) Visit(expr->value());
+    current_->AddInstruction(expr);
+
+  } else if (prop != NULL) {
+    if (!prop->obj()->IsTrivial()) Visit(prop->obj());
+    if (!prop->key()->IsPropertyName() && !prop->key()->IsTrivial()) {
+      Visit(prop->key());
+    }
+    if (!expr->value()->IsTrivial()) Visit(expr->value());
+    current_->AddInstruction(expr);
+
+  } else {
+    Visit(expr->target());
+  }
+}
+
+
+void FlowGraphBuilder::VisitThrow(Throw* expr) {
+  SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitProperty(Property* expr) {
+  if (!expr->obj()->IsTrivial()) Visit(expr->obj());
+  if (!expr->key()->IsPropertyName() && !expr->key()->IsTrivial()) {
+    Visit(expr->key());
+  }
+  current_->AddInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitCall(Call* expr) {
+  Visit(expr->expression());
+  VisitExpressions(expr->arguments());
+  current_->AddInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitCallNew(CallNew* expr) {
+  SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
+  SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
+  switch (expr->op()) {
+    case Token::NOT:
+    case Token::BIT_NOT:
+    case Token::DELETE:
+    case Token::TYPEOF:
+    case Token::VOID:
+      SetStackOverflow();
+      break;
+
+    case Token::ADD:
+    case Token::SUB:
+      Visit(expr->expression());
+      current_->AddInstruction(expr);
+      break;
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void FlowGraphBuilder::VisitCountOperation(CountOperation* expr) {
+  Visit(expr->expression());
+  current_->AddInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
+  switch (expr->op()) {
+    case Token::COMMA:
+    case Token::OR:
+    case Token::AND:
+      SetStackOverflow();
+      break;
+
+    case Token::BIT_OR:
+    case Token::BIT_XOR:
+    case Token::BIT_AND:
+    case Token::SHL:
+    case Token::SAR:
+    case Token::SHR:
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+    case Token::MOD:
+      if (!expr->left()->IsTrivial()) Visit(expr->left());
+      if (!expr->right()->IsTrivial()) Visit(expr->right());
+      current_->AddInstruction(expr);
+      break;
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void FlowGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
+  switch (expr->op()) {
+    case Token::EQ:
+    case Token::NE:
+    case Token::EQ_STRICT:
+    case Token::NE_STRICT:
+    case Token::INSTANCEOF:
+    case Token::IN:
+      SetStackOverflow();
+      break;
+
+    case Token::LT:
+    case Token::GT:
+    case Token::LTE:
+    case Token::GTE:
+      if (!expr->left()->IsTrivial()) Visit(expr->left());
+      if (!expr->right()->IsTrivial()) Visit(expr->right());
+      current_->AddInstruction(expr);
+      break;
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void FlowGraphBuilder::VisitThisFunction(ThisFunction* expr) {
+  SetStackOverflow();
+}
+
+
+#ifdef DEBUG
+
+// Print a textual representation of an instruction in a flow graph.
+class InstructionPrinter: public AstVisitor {
+ public:
+  InstructionPrinter() {}
+
+ private:
+  // Overridden from the base class.
+  virtual void VisitExpressions(ZoneList<Expression*>* exprs);
+
+  // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+  DISALLOW_COPY_AND_ASSIGN(InstructionPrinter);
+};
+
+
+static void PrintSubexpression(Expression* expr) {
+  if (!expr->IsTrivial()) {
+    PrintF("@%d", expr->num());
+  } else if (expr->AsLiteral() != NULL) {
+    expr->AsLiteral()->handle()->Print();
+  } else if (expr->AsVariableProxy() != NULL) {
+    PrintF("%s", *expr->AsVariableProxy()->name()->ToCString());
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void InstructionPrinter::VisitExpressions(ZoneList<Expression*>* exprs) {
+  for (int i = 0; i < exprs->length(); ++i) {
+    if (i != 0) PrintF(", ");
+    PrintF("@%d", exprs->at(i)->num());
+  }
+}
+
+
+// We only define printing functions for the node types that can occur as
+// instructions in a flow graph.  The rest are unreachable.
+void InstructionPrinter::VisitDeclaration(Declaration* decl) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitBlock(Block* stmt) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitExpressionStatement(ExpressionStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitEmptyStatement(EmptyStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitIfStatement(IfStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitContinueStatement(ContinueStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitBreakStatement(BreakStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitReturnStatement(ReturnStatement* stmt) {
+  PrintF("return ");
+  PrintSubexpression(stmt->expression());
+}
+
+
+void InstructionPrinter::VisitWithEnterStatement(WithEnterStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitWithExitStatement(WithExitStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitSwitchStatement(SwitchStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitWhileStatement(WhileStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitForStatement(ForStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitForInStatement(ForInStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitTryCatchStatement(TryCatchStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitDebuggerStatement(DebuggerStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitFunctionLiteral(FunctionLiteral* expr) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitConditional(Conditional* expr) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitSlot(Slot* expr) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitVariableProxy(VariableProxy* expr) {
+  Variable* var = expr->AsVariable();
+  if (var != NULL) {
+    PrintF("%s", *var->name()->ToCString());
+  } else {
+    ASSERT(expr->AsProperty() != NULL);
+    Visit(expr->AsProperty());
+  }
+}
+
+
+void InstructionPrinter::VisitLiteral(Literal* expr) {
+  expr->handle()->Print();
+}
+
+
+void InstructionPrinter::VisitRegExpLiteral(RegExpLiteral* expr) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitObjectLiteral(ObjectLiteral* expr) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitArrayLiteral(ArrayLiteral* expr) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitCatchExtensionObject(
+    CatchExtensionObject* expr) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitAssignment(Assignment* expr) {
+  Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+  Property* prop = expr->target()->AsProperty();
+
+  // Print the left-hand side.
+  Visit(expr->target());
+  if (var == NULL && prop == NULL) return;  // Throw reference error.
+  PrintF(" = ");
+  // For compound assignments, print the left-hand side again and the
+  // corresponding binary operator.
+  if (expr->is_compound()) {
+    PrintSubexpression(expr->target());
+    PrintF(" %s ", Token::String(expr->binary_op()));
+  }
+
+  // Print the right-hand side.
+  PrintSubexpression(expr->value());
+}
+
+
+void InstructionPrinter::VisitThrow(Throw* expr) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitProperty(Property* expr) {
+  PrintSubexpression(expr->obj());
+  if (expr->key()->IsPropertyName()) {
+    PrintF(".");
+    ASSERT(expr->key()->AsLiteral() != NULL);
+    expr->key()->AsLiteral()->handle()->Print();
+  } else {
+    PrintF("[");
+    PrintSubexpression(expr->key());
+    PrintF("]");
+  }
+}
+
+
+void InstructionPrinter::VisitCall(Call* expr) {
+  PrintF("@%d(", expr->expression()->num());
+  VisitExpressions(expr->arguments());
+  PrintF(")");
+}
+
+
+void InstructionPrinter::VisitCallNew(CallNew* expr) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitCallRuntime(CallRuntime* expr) {
+  UNREACHABLE();
+}
+
+
+void InstructionPrinter::VisitUnaryOperation(UnaryOperation* expr) {
+  PrintF("%s(@%d)", Token::String(expr->op()), expr->expression()->num());
+}
+
+
+void InstructionPrinter::VisitCountOperation(CountOperation* expr) {
+  if (expr->is_prefix()) {
+    PrintF("%s@%d", Token::String(expr->op()), expr->expression()->num());
+  } else {
+    PrintF("@%d%s", expr->expression()->num(), Token::String(expr->op()));
+  }
+}
+
+
+void InstructionPrinter::VisitBinaryOperation(BinaryOperation* expr) {
+  PrintSubexpression(expr->left());
+  PrintF(" %s ", Token::String(expr->op()));
+  PrintSubexpression(expr->right());
+}
+
+
+void InstructionPrinter::VisitCompareOperation(CompareOperation* expr) {
+  PrintSubexpression(expr->left());
+  PrintF(" %s ", Token::String(expr->op()));
+  PrintSubexpression(expr->right());
+}
+
+
+void InstructionPrinter::VisitThisFunction(ThisFunction* expr) {
+  UNREACHABLE();
+}
+
+
+int BasicBlock::PrintAsText(int instruction_number) {
+  // Print a label for all blocks except the entry.
+  if (HasPredecessor()) {
+    PrintF("L%d:", number());
+  }
+
+  // Number and print the instructions.  Since AST child nodes are visited
+  // before their parents, the parent nodes can refer to them by number.
+  InstructionPrinter printer;
+  for (int i = 0; i < instructions_.length(); ++i) {
+    PrintF("\n%d ", instruction_number);
+    instructions_[i]->set_num(instruction_number++);
+    instructions_[i]->Accept(&printer);
+  }
+
+  // If this is the exit, print "exit".  If there is a single successor,
+  // print "goto" successor on a separate line.  If there are two
+  // successors, print "goto" successor on the same line as the last
+  // instruction in the block.  There is a blank line between blocks (and
+  // after the last one).
+  if (left_successor_ == NULL) {
+    PrintF("\nexit\n\n");
+  } else if (right_successor_ == NULL) {
+    PrintF("\ngoto L%d\n\n", left_successor_->number());
+  } else {
+    PrintF(", goto (L%d, L%d)\n\n",
+           left_successor_->number(),
+           right_successor_->number());
+  }
+
+  return instruction_number;
+}
+
+
+void FlowGraph::PrintAsText(Handle<String> name) {
+  PrintF("\n==== name = \"%s\" ====\n", *name->ToCString());
+  // Print nodes in reverse postorder.  Note that AST node numbers are used
+  // during printing of instructions and thus their current values are
+  // destroyed.
+  int number = 0;
+  for (int i = postorder_.length() - 1; i >= 0; --i) {
+    number = postorder_[i]->PrintAsText(number);
+  }
+}
+
+#endif  // DEBUG
+
+
+} }  // namespace v8::internal
diff --git a/src/flow-graph.h b/src/flow-graph.h
new file mode 100644
index 0000000..f6af841
--- /dev/null
+++ b/src/flow-graph.h
@@ -0,0 +1,180 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_FLOW_GRAPH_H_
+#define V8_FLOW_GRAPH_H_
+
+#include "v8.h"
+
+#include "data-flow.h"
+#include "zone.h"
+
+namespace v8 {
+namespace internal {
+
+// The nodes of a flow graph are basic blocks.  Basic blocks consist of
+// instructions represented as pointers to AST nodes in the order that they
+// would be visited by the code generator.  A block can have arbitrarily many
+// (even zero) predecessors and up to two successors.  Blocks with multiple
+// predecessors are "join nodes" and blocks with multiple successors are
+// "branch nodes".  A block can be both a branch and a join node.
+//
+// Flow graphs are in edge split form: a branch node is never the
+// predecessor of a merge node.  Empty basic blocks are inserted to maintain
+// edge split form.
+class BasicBlock: public ZoneObject {
+ public:
+  // Construct a basic block with a given predecessor.  NULL indicates no
+  // predecessor or that the predecessor will be set later.
+  explicit BasicBlock(BasicBlock* predecessor)
+      : predecessors_(2),
+        instructions_(8),
+        left_successor_(NULL),
+        right_successor_(NULL),
+        mark_(false) {
+    if (predecessor != NULL) AddPredecessor(predecessor);
+  }
+
+  bool HasPredecessor() { return !predecessors_.is_empty(); }
+  bool HasSuccessor() { return left_successor_ != NULL; }
+
+  // Add a given basic block as a predecessor of this block.  This function
+  // also adds this block as a successor of the given block.
+  void AddPredecessor(BasicBlock* predecessor) {
+    ASSERT(predecessor != NULL);
+    predecessors_.Add(predecessor);
+    predecessor->AddSuccessor(this);
+  }
+
+  // Add an instruction to the end of this block.  The block must be "open"
+  // by not having a successor yet.
+  void AddInstruction(AstNode* instruction) {
+    ASSERT(!HasSuccessor() && instruction != NULL);
+    instructions_.Add(instruction);
+  }
+
+  // Perform a depth-first traversal of graph rooted at this node,
+  // accumulating pre- and postorder traversal orders.  Visited nodes are
+  // marked with mark.
+  void BuildTraversalOrder(ZoneList<BasicBlock*>* preorder,
+                           ZoneList<BasicBlock*>* postorder,
+                           bool mark);
+  bool GetMark() { return mark_; }
+
+#ifdef DEBUG
+  // In debug mode, blocks are numbered in reverse postorder to help with
+  // printing.
+  int number() { return number_; }
+  void set_number(int n) { number_ = n; }
+
+  // Print a basic block, given the number of the first instruction.
+  // Returns the next number after the number of the last instruction.
+  int PrintAsText(int instruction_number);
+#endif
+
+ private:
+  // Add a given basic block as successor to this block.  This function does
+  // not add this block as a predecessor of the given block so as to avoid
+  // circularity.
+  void AddSuccessor(BasicBlock* successor) {
+    ASSERT(right_successor_ == NULL && successor != NULL);
+    if (HasSuccessor()) {
+      right_successor_ = successor;
+    } else {
+      left_successor_ = successor;
+    }
+  }
+
+  ZoneList<BasicBlock*> predecessors_;
+  ZoneList<AstNode*> instructions_;
+  BasicBlock* left_successor_;
+  BasicBlock* right_successor_;
+
+  // Support for graph traversal.  Before traversal, all nodes in the graph
+  // have the same mark (true or false).  Traversal marks already-visited
+  // nodes with the opposite mark.  After traversal, all nodes again have
+  // the same mark.  Traversal of the same graph is not reentrant.
+  bool mark_;
+
+#ifdef DEBUG
+  int number_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(BasicBlock);
+};
+
+
+// A flow graph has distinguished entry and exit blocks.  The entry block is
+// the only one with no predecessors and the exit block is the only one with
+// no successors.
+class FlowGraph: public ZoneObject {
+ public:
+  FlowGraph(BasicBlock* entry, BasicBlock* exit)
+      : entry_(entry), exit_(exit), preorder_(8), postorder_(8) {
+  }
+
+  ZoneList<BasicBlock*>* preorder() { return &preorder_; }
+  ZoneList<BasicBlock*>* postorder() { return &postorder_; }
+
+#ifdef DEBUG
+  void PrintAsText(Handle<String> name);
+#endif
+
+ private:
+  BasicBlock* entry_;
+  BasicBlock* exit_;
+  ZoneList<BasicBlock*> preorder_;
+  ZoneList<BasicBlock*> postorder_;
+};
+
+
+// The flow graph builder walks the AST adding reachable AST nodes to the
+// flow graph as instructions.  It remembers the entry and exit nodes of the
+// graph, and keeps a pointer to the current block being constructed.
+class FlowGraphBuilder: public AstVisitor {
+ public:
+  FlowGraphBuilder() {}
+
+  FlowGraph* Build(FunctionLiteral* lit);
+
+ private:
+  // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+  BasicBlock* entry_;
+  BasicBlock* exit_;
+  BasicBlock* current_;
+
+  DISALLOW_COPY_AND_ASSIGN(FlowGraphBuilder);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_FLOW_GRAPH_H_
diff --git a/src/frame-element.cc b/src/frame-element.cc
index 1455559..ee7be95 100644
--- a/src/frame-element.cc
+++ b/src/frame-element.cc
@@ -28,6 +28,7 @@
 #include "v8.h"
 
 #include "frame-element.h"
+#include "zone-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/frame-element.h b/src/frame-element.h
index 5762814..48bb354 100644
--- a/src/frame-element.h
+++ b/src/frame-element.h
@@ -28,8 +28,9 @@
 #ifndef V8_FRAME_ELEMENT_H_
 #define V8_FRAME_ELEMENT_H_
 
-#include "number-info.h"
+#include "type-info.h"
 #include "macro-assembler.h"
+#include "zone.h"
 
 namespace v8 {
 namespace internal {
@@ -53,23 +54,19 @@
     SYNCED
   };
 
-  inline NumberInfo::Type number_info() {
-    // Copied elements do not have number info. Instead
+  inline TypeInfo type_info() {
+    // Copied elements do not have type info. Instead
     // we have to inspect their backing element in the frame.
     ASSERT(!is_copy());
-    if (!is_constant()) return NumberInfoField::decode(value_);
-    Handle<Object> value = handle();
-    if (value->IsSmi()) return NumberInfo::kSmi;
-    if (value->IsHeapNumber()) return NumberInfo::kHeapNumber;
-    return NumberInfo::kUnknown;
+    return TypeInfo::FromInt(TypeInfoField::decode(value_));
   }
 
-  inline void set_number_info(NumberInfo::Type info) {
-    // Copied elements do not have number info. Instead
+  inline void set_type_info(TypeInfo info) {
+    // Copied elements do not have type info. Instead
     // we have to inspect their backing element in the frame.
     ASSERT(!is_copy());
-    value_ = value_ & ~NumberInfoField::mask();
-    value_ = value_ | NumberInfoField::encode(info);
+    value_ = value_ & ~TypeInfoField::mask();
+    value_ = value_ | TypeInfoField::encode(info.ToInt());
   }
 
   // The default constructor creates an invalid frame element.
@@ -77,7 +74,7 @@
     value_ = TypeField::encode(INVALID)
         | CopiedField::encode(false)
         | SyncedField::encode(false)
-        | NumberInfoField::encode(NumberInfo::kUninitialized)
+        | TypeInfoField::encode(TypeInfo::Uninitialized().ToInt())
         | DataField::encode(0);
   }
 
@@ -88,7 +85,7 @@
   }
 
   // Factory function to construct an in-memory frame element.
-  static FrameElement MemoryElement(NumberInfo::Type info) {
+  static FrameElement MemoryElement(TypeInfo info) {
     FrameElement result(MEMORY, no_reg, SYNCED, info);
     return result;
   }
@@ -96,7 +93,7 @@
   // Factory function to construct an in-register frame element.
   static FrameElement RegisterElement(Register reg,
                                       SyncFlag is_synced,
-                                      NumberInfo::Type info) {
+                                      TypeInfo info) {
     return FrameElement(REGISTER, reg, is_synced, info);
   }
 
@@ -104,7 +101,8 @@
   // compile time.
   static FrameElement ConstantElement(Handle<Object> value,
                                       SyncFlag is_synced) {
-    FrameElement result(value, is_synced);
+    TypeInfo info = TypeInfo::TypeFromValue(value);
+    FrameElement result(value, is_synced, info);
     return result;
   }
 
@@ -142,6 +140,16 @@
   void set_copied() { value_ = value_ | CopiedField::encode(true); }
   void clear_copied() { value_ = value_ & ~CopiedField::mask(); }
 
+  // An untagged int32 FrameElement represents a signed int32
+  // on the stack.  These are only allowed in a side-effect-free
+  // int32 calculation, and if a non-int32 input shows up or an overflow
+  // occurs, we bail out and drop all the int32 values.
+  void set_untagged_int32(bool value) {
+    value_ &= ~UntaggedInt32Field::mask();
+    value_ |= UntaggedInt32Field::encode(value);
+  }
+  bool is_untagged_int32() const { return UntaggedInt32Field::decode(value_); }
+
   Register reg() const {
     ASSERT(is_register());
     uint32_t reg = DataField::decode(value_);
@@ -210,20 +218,20 @@
   FrameElement(Type type,
                Register reg,
                SyncFlag is_synced,
-               NumberInfo::Type info) {
+               TypeInfo info) {
     value_ = TypeField::encode(type)
         | CopiedField::encode(false)
         | SyncedField::encode(is_synced != NOT_SYNCED)
-        | NumberInfoField::encode(info)
+        | TypeInfoField::encode(info.ToInt())
         | DataField::encode(reg.code_ > 0 ? reg.code_ : 0);
   }
 
   // Used to construct constant elements.
-  FrameElement(Handle<Object> value, SyncFlag is_synced) {
+  FrameElement(Handle<Object> value, SyncFlag is_synced, TypeInfo info) {
     value_ = TypeField::encode(CONSTANT)
         | CopiedField::encode(false)
         | SyncedField::encode(is_synced != NOT_SYNCED)
-        | NumberInfoField::encode(NumberInfo::kUninitialized)
+        | TypeInfoField::encode(info.ToInt())
         | DataField::encode(ConstantList()->length());
     ConstantList()->Add(value);
   }
@@ -249,11 +257,13 @@
   // Encode type, copied, synced and data in one 32 bit integer.
   uint32_t value_;
 
+  // Declare BitFields with template parameters <type, start, size>.
   class TypeField: public BitField<Type, 0, 3> {};
   class CopiedField: public BitField<bool, 3, 1> {};
   class SyncedField: public BitField<bool, 4, 1> {};
-  class NumberInfoField: public BitField<NumberInfo::Type, 5, 3> {};
-  class DataField: public BitField<uint32_t, 8, 32 - 8> {};
+  class UntaggedInt32Field: public BitField<bool, 5, 1> {};
+  class TypeInfoField: public BitField<int, 6, 6> {};
+  class DataField: public BitField<uint32_t, 12, 32 - 12> {};
 
   friend class VirtualFrame;
 };
diff --git a/src/frames.cc b/src/frames.cc
index 06896ea..9cf83c9 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -32,7 +32,6 @@
 #include "scopeinfo.h"
 #include "string-stream.h"
 #include "top.h"
-#include "zone-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -307,14 +306,12 @@
 
 
 void StackHandler::Cook(Code* code) {
-  ASSERT(MarkCompactCollector::IsCompacting());
   ASSERT(code->contains(pc()));
   set_pc(AddressFrom<Address>(pc() - code->instruction_start()));
 }
 
 
 void StackHandler::Uncook(Code* code) {
-  ASSERT(MarkCompactCollector::HasCompacted());
   set_pc(code->instruction_start() + OffsetFrom(pc()));
   ASSERT(code->contains(pc()));
 }
@@ -330,9 +327,6 @@
 
 
 void StackFrame::CookFramesForThread(ThreadLocalTop* thread) {
-  // Only cooking frames when the collector is compacting and thus moving code
-  // around.
-  ASSERT(MarkCompactCollector::IsCompacting());
   ASSERT(!thread->stack_is_cooked());
   for (StackFrameIterator it(thread); !it.done(); it.Advance()) {
     it.frame()->Cook();
@@ -342,9 +336,6 @@
 
 
 void StackFrame::UncookFramesForThread(ThreadLocalTop* thread) {
-  // Only uncooking frames when the collector is compacting and thus moving code
-  // around.
-  ASSERT(MarkCompactCollector::HasCompacted());
   ASSERT(thread->stack_is_cooked());
   for (StackFrameIterator it(thread); !it.done(); it.Advance()) {
     it.frame()->Uncook();
@@ -391,6 +382,12 @@
 }
 
 
+void EntryFrame::SetCallerFp(Address caller_fp) {
+  const int offset = EntryFrameConstants::kCallerFPOffset;
+  Memory::Address_at(this->fp() + offset) = caller_fp;
+}
+
+
 StackFrame::Type EntryFrame::GetCallerState(State* state) const {
   const int offset = EntryFrameConstants::kCallerFPOffset;
   Address fp = Memory::Address_at(this->fp() + offset);
@@ -423,6 +420,11 @@
 }
 
 
+void ExitFrame::SetCallerFp(Address caller_fp) {
+  Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset) = caller_fp;
+}
+
+
 Address ExitFrame::GetCallerStackPointer() const {
   return fp() + ExitFrameConstants::kCallerSPDisplacement;
 }
@@ -452,6 +454,12 @@
 }
 
 
+void StandardFrame::SetCallerFp(Address caller_fp) {
+  Memory::Address_at(fp() + StandardFrameConstants::kCallerFPOffset) =
+      caller_fp;
+}
+
+
 bool StandardFrame::IsExpressionInsideHandler(int n) const {
   Address address = GetExpressionAddress(n);
   for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
@@ -523,6 +531,31 @@
   Code* code = NULL;
   if (IsConstructor()) accumulator->Add("new ");
   accumulator->PrintFunction(function, receiver, &code);
+
+  if (function->IsJSFunction()) {
+    Handle<SharedFunctionInfo> shared(JSFunction::cast(function)->shared());
+    Object* script_obj = shared->script();
+    if (script_obj->IsScript()) {
+      Handle<Script> script(Script::cast(script_obj));
+      accumulator->Add(" [");
+      accumulator->PrintName(script->name());
+
+      Address pc = this->pc();
+      if (code != NULL && code->kind() == Code::FUNCTION &&
+          pc >= code->instruction_start() && pc < code->relocation_start()) {
+        int source_pos = code->SourcePosition(pc);
+        int line = GetScriptLineNumberSafe(script, source_pos) + 1;
+        accumulator->Add(":%d", line);
+      } else {
+        int function_start_pos = shared->start_position();
+        int line = GetScriptLineNumberSafe(script, function_start_pos) + 1;
+        accumulator->Add(":~%d", line);
+      }
+
+      accumulator->Add("] ");
+    }
+  }
+
   accumulator->Add("(this=%o", receiver);
 
   // Get scope information for nicer output, if possible. If code is
@@ -751,4 +784,40 @@
 }
 
 
+#define DEFINE_WRAPPER(type, field)                              \
+class field##_Wrapper : public ZoneObject {                      \
+ public:  /* NOLINT */                                           \
+  field##_Wrapper(const field& original) : frame_(original) {    \
+  }                                                              \
+  field frame_;                                                  \
+};
+STACK_FRAME_TYPE_LIST(DEFINE_WRAPPER)
+#undef DEFINE_WRAPPER
+
+static StackFrame* AllocateFrameCopy(StackFrame* frame) {
+#define FRAME_TYPE_CASE(type, field) \
+  case StackFrame::type: { \
+    field##_Wrapper* wrapper = \
+        new field##_Wrapper(*(reinterpret_cast<field*>(frame))); \
+    return &wrapper->frame_; \
+  }
+
+  switch (frame->type()) {
+    STACK_FRAME_TYPE_LIST(FRAME_TYPE_CASE)
+    default: UNREACHABLE();
+  }
+#undef FRAME_TYPE_CASE
+  return NULL;
+}
+
+Vector<StackFrame*> CreateStackMap() {
+  ZoneList<StackFrame*> list(10);
+  for (StackFrameIterator it; !it.done(); it.Advance()) {
+    StackFrame* frame = AllocateFrameCopy(it.frame());
+    list.Add(frame);
+  }
+  return list.ToVector();
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/frames.h b/src/frames.h
index 8cbbc62..98aaead 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -114,6 +114,12 @@
   // by the debugger.
   enum Id { NO_ID = 0 };
 
+  // Copy constructor; it breaks the connection to host iterator.
+  StackFrame(const StackFrame& original) {
+    this->state_ = original.state_;
+    this->iterator_ = NULL;
+  }
+
   // Type testers.
   bool is_entry() const { return type() == ENTRY; }
   bool is_entry_construct() const { return type() == ENTRY_CONSTRUCT; }
@@ -132,6 +138,8 @@
   Address pc() const { return *pc_address(); }
   void set_pc(Address pc) { *pc_address() = pc; }
 
+  virtual void SetCallerFp(Address caller_fp) = 0;
+
   Address* pc_address() const { return state_.pc_address; }
 
   // Get the id of this stack frame.
@@ -200,7 +208,8 @@
   friend class StackHandlerIterator;
   friend class SafeStackFrameIterator;
 
-  DISALLOW_IMPLICIT_CONSTRUCTORS(StackFrame);
+ private:
+  void operator=(const StackFrame& original);
 };
 
 
@@ -218,6 +227,7 @@
     ASSERT(frame->is_entry());
     return static_cast<EntryFrame*>(frame);
   }
+  virtual void SetCallerFp(Address caller_fp);
 
  protected:
   explicit EntryFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
@@ -268,6 +278,8 @@
   // Garbage collection support.
   virtual void Iterate(ObjectVisitor* v) const;
 
+  virtual void SetCallerFp(Address caller_fp);
+
   static ExitFrame* cast(StackFrame* frame) {
     ASSERT(frame->is_exit());
     return static_cast<ExitFrame*>(frame);
@@ -303,6 +315,8 @@
   inline void SetExpression(int index, Object* value);
   int ComputeExpressionsCount() const;
 
+  virtual void SetCallerFp(Address caller_fp);
+
   static StandardFrame* cast(StackFrame* frame) {
     ASSERT(frame->is_standard());
     return static_cast<StandardFrame*>(frame);
@@ -658,6 +672,10 @@
 };
 
 
+// Reads all frames on the current stack and copies them into the current
+// zone memory.
+Vector<StackFrame*> CreateStackMap();
+
 } }  // namespace v8::internal
 
 #endif  // V8_FRAMES_H_
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 6371439..699a1e9 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -30,6 +30,7 @@
 #include "codegen-inl.h"
 #include "compiler.h"
 #include "full-codegen.h"
+#include "scopes.h"
 #include "stub-cache.h"
 #include "debug.h"
 #include "liveedit.h"
@@ -211,9 +212,9 @@
 }
 
 
-void FullCodeGenSyntaxChecker::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* expr) {
-  BAILOUT("FunctionBoilerplateLiteral");
+void FullCodeGenSyntaxChecker::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
+  BAILOUT("SharedFunctionInfoLiteral");
 }
 
 
@@ -449,7 +450,6 @@
   CodeGenerator::MakeCodePrologue(info);
   const int kInitialBufferSize = 4 * KB;
   MacroAssembler masm(NULL, kInitialBufferSize);
-  LiveEditFunctionTracker live_edit_tracker(info->function());
 
   FullCodeGenerator cgen(&masm);
   cgen.Generate(info, PRIMARY);
@@ -458,9 +458,7 @@
     return Handle<Code>::null();
   }
   Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
-  Handle<Code> result = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
-  live_edit_tracker.RecordFunctionCode(result);
-  return result;
+  return CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
 }
 
 
@@ -523,8 +521,8 @@
             array->set_undefined(j++);
           }
         } else {
-          Handle<JSFunction> function =
-              Compiler::BuildBoilerplate(decl->fun(), script(), this);
+          Handle<SharedFunctionInfo> function =
+              Compiler::BuildFunctionInfo(decl->fun(), script(), this);
           // Check for stack-overflow exception.
           if (HasStackOverflow()) return;
           array->set(j++, *function);
@@ -997,8 +995,8 @@
 }
 
 
-void FullCodeGenerator::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* expr) {
+void FullCodeGenerator::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
   UNREACHABLE();
 }
 
diff --git a/src/globals.h b/src/globals.h
index 68d0bdc..bef5e8e 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -50,15 +50,32 @@
 #define V8_HOST_ARCH_MIPS 1
 #define V8_HOST_ARCH_32_BIT 1
 #else
-#error Your host architecture was not detected as supported by v8
+#error Host architecture was not detected as supported by v8
 #endif
 
+// Check for supported combinations of host and target architectures.
+#if defined(V8_TARGET_ARCH_IA32) && !defined(V8_HOST_ARCH_IA32)
+#error Target architecture ia32 is only supported on ia32 host
+#endif
+#if defined(V8_TARGET_ARCH_X64) && !defined(V8_HOST_ARCH_X64)
+#error Target architecture x64 is only supported on x64 host
+#endif
+#if (defined(V8_TARGET_ARCH_ARM) && \
+    !(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_ARM)))
+#error Target architecture arm is only supported on arm and ia32 host
+#endif
+#if (defined(V8_TARGET_ARCH_MIPS) && \
+    !(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_MIPS)))
+#error Target architecture mips is only supported on mips and ia32 host
+#endif
+
+// Define unaligned read for the target architectures supporting it.
 #if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
 #define V8_TARGET_CAN_READ_UNALIGNED 1
 #elif V8_TARGET_ARCH_ARM
 #elif V8_TARGET_ARCH_MIPS
 #else
-#error Your target architecture is not supported by v8
+#error Target architecture is not supported by v8
 #endif
 
 // Support for alternative bool type. This is only enabled if the code is
@@ -98,6 +115,11 @@
 #define V8_PTR_PREFIX ""
 #endif  // V8_HOST_ARCH_64_BIT
 
+// The following macro works on both 32 and 64-bit platforms.
+// Usage: instead of writing 0x1234567890123456
+//      write V8_2PART_UINT64_C(0x12345678,90123456);
+#define V8_2PART_UINT64_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u))
+
 #define V8PRIxPTR V8_PTR_PREFIX "x"
 #define V8PRIdPTR V8_PTR_PREFIX "d"
 
@@ -107,8 +129,9 @@
 #define V8PRIxPTR "lx"
 #endif
 
-#if defined(__APPLE__) && defined(__MACH__)
-#define USING_MAC_ABI
+#if (defined(__APPLE__) && defined(__MACH__)) || \
+    defined(__FreeBSD__) || defined(__OpenBSD__)
+#define USING_BSD_ABI
 #endif
 
 // Code-point values in Unicode 4.0 are 21 bits wide.
@@ -141,6 +164,9 @@
 const intptr_t kIntptrSignBit = 0x80000000;
 #endif
 
+// Mask for the sign bit in a smi.
+const intptr_t kSmiSignMask = kIntptrSignBit;
+
 const int kObjectAlignmentBits = kPointerSizeLog2;
 const intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
 const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
@@ -169,6 +195,15 @@
 const int kBitsPerPointer = kPointerSize * kBitsPerByte;
 const int kBitsPerInt = kIntSize * kBitsPerByte;
 
+// IEEE 754 single precision floating point number bit layout.
+const uint32_t kBinary32SignMask = 0x80000000u;
+const uint32_t kBinary32ExponentMask = 0x7f800000u;
+const uint32_t kBinary32MantissaMask = 0x007fffffu;
+const int kBinary32ExponentBias = 127;
+const int kBinary32MaxExponent  = 0xFE;
+const int kBinary32MinExponent  = 0x01;
+const int kBinary32MantissaBits = 23;
+const int kBinary32ExponentShift = 23;
 
 // Zap-value: The value used for zapping dead objects.
 // Should be a recognizable hex value tagged as a heap object pointer.
@@ -190,6 +225,10 @@
 // gives 8K bytes per page.
 const int kPageSizeBits = 13;
 
+// On Intel architecture, cache line size is 64 bytes.
+// On ARM it may be less (32 bytes), but as far this constant is
+// used for aligning data, it doesn't hurt to align on a greater value.
+const int kProcessorCacheLineSize = 64;
 
 // Constants relevant to double precision floating point numbers.
 
@@ -261,6 +300,8 @@
 class Script;
 class Slot;
 class Smi;
+template <typename Config, class Allocator = FreeStoreAllocationPolicy>
+    class SplayTree;
 class Statement;
 class String;
 class Struct;
@@ -314,8 +355,7 @@
 
 enum VisitMode { VISIT_ALL, VISIT_ALL_IN_SCAVENGE, VISIT_ONLY_STRONG };
 
-// Flag indicating whether code is built in to the VM (one of the natives
-// files).
+// Flag indicating whether code is built into the VM (one of the natives files).
 enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE };
 
 
@@ -408,7 +448,11 @@
   CONSTANT_TRANSITION = 6,  // only in fast mode
   NULL_DESCRIPTOR     = 7,  // only in fast mode
   // All properties before MAP_TRANSITION are real.
-  FIRST_PHANTOM_PROPERTY_TYPE = MAP_TRANSITION
+  FIRST_PHANTOM_PROPERTY_TYPE = MAP_TRANSITION,
+  // There are no IC stubs for NULL_DESCRIPTORS. Therefore,
+  // NULL_DESCRIPTOR can be used as the type flag for IC stubs for
+  // nonexistent properties.
+  NONEXISTENT = NULL_DESCRIPTOR
 };
 
 
@@ -438,7 +482,7 @@
 
 // Logging and profiling.
 // A StateTag represents a possible state of the VM.  When compiled with
-// ENABLE_LOGGING_AND_PROFILING, the logger maintains a stack of these.
+// ENABLE_VMSTATE_TRACKING, the logger maintains a stack of these.
 // Creating a VMState object enters a state by pushing on the stack, and
 // destroying a VMState object leaves a state by popping the current state
 // from the stack.
@@ -571,42 +615,6 @@
 #define INLINE(header) inline header
 #endif
 
-// The type-based aliasing rule allows the compiler to assume that pointers of
-// different types (for some definition of different) never alias each other.
-// Thus the following code does not work:
-//
-// float f = foo();
-// int fbits = *(int*)(&f);
-//
-// The compiler 'knows' that the int pointer can't refer to f since the types
-// don't match, so the compiler may cache f in a register, leaving random data
-// in fbits.  Using C++ style casts makes no difference, however a pointer to
-// char data is assumed to alias any other pointer.  This is the 'memcpy
-// exception'.
-//
-// Bit_cast uses the memcpy exception to move the bits from a variable of one
-// type of a variable of another type.  Of course the end result is likely to
-// be implementation dependent.  Most compilers (gcc-4.2 and MSVC 2005)
-// will completely optimize bit_cast away.
-//
-// There is an additional use for bit_cast.
-// Recent gccs will warn when they see casts that may result in breakage due to
-// the type-based aliasing rule.  If you have checked that there is no breakage
-// you can use bit_cast to cast one pointer type to another.  This confuses gcc
-// enough that it can no longer see that you have cast one pointer type to
-// another thus avoiding the warning.
-template <class Dest, class Source>
-inline Dest bit_cast(const Source& source) {
-  // Compile time assertion: sizeof(Dest) == sizeof(Source)
-  // A compile error here means your Dest and Source have different sizes.
-  typedef char VerifySizesAreEqual[sizeof(Dest) == sizeof(Source) ? 1 : -1];
-
-  Dest dest;
-  memcpy(&dest, &source, sizeof(dest));
-  return dest;
-}
-
-
 // Feature flags bit positions. They are mostly based on the CPUID spec.
 // (We assign CPUID itself to one of the currently reserved bits --
 // feel free to change this if needed.)
diff --git a/src/handles.cc b/src/handles.cc
index c9a2877..1d4465f 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -174,13 +174,6 @@
 }
 
 
-void SetExpectedNofPropertiesFromEstimate(Handle<JSFunction> func,
-                                          int estimate) {
-  SetExpectedNofProperties(
-      func, ExpectedNofPropertiesFromEstimate(estimate));
-}
-
-
 void NormalizeProperties(Handle<JSObject> object,
                          PropertyNormalizationMode mode,
                          int expected_additional_properties) {
@@ -203,13 +196,14 @@
 
 
 void FlattenString(Handle<String> string) {
-  CALL_HEAP_FUNCTION_VOID(string->TryFlattenIfNotFlat());
+  CALL_HEAP_FUNCTION_VOID(string->TryFlatten());
   ASSERT(string->IsFlat());
 }
 
 
 Handle<Object> SetPrototype(Handle<JSFunction> function,
                             Handle<Object> prototype) {
+  ASSERT(function->should_have_prototype());
   CALL_HEAP_FUNCTION(Accessors::FunctionSetPrototype(*function,
                                                      *prototype,
                                                      NULL),
@@ -292,6 +286,12 @@
 }
 
 
+Handle<Object> GetElement(Handle<Object> obj,
+                          uint32_t index) {
+  CALL_HEAP_FUNCTION(Runtime::GetElement(obj, index), Object);
+}
+
+
 Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
                                           Handle<JSObject> holder,
                                           Handle<String> name,
@@ -371,8 +371,11 @@
 }
 
 
-Handle<String> SubString(Handle<String> str, int start, int end) {
-  CALL_HEAP_FUNCTION(str->SubString(start, end), String);
+Handle<String> SubString(Handle<String> str,
+                         int start,
+                         int end,
+                         PretenureFlag pretenure) {
+  CALL_HEAP_FUNCTION(str->SubString(start, end, pretenure), String);
 }
 
 
@@ -455,6 +458,16 @@
   }
 
   Handle<String> src(String::cast(script->source()));
+
+  Handle<FixedArray> array = CalculateLineEnds(src, true);
+
+  script->set_line_ends(*array);
+  ASSERT(script->line_ends()->IsFixedArray());
+}
+
+
+Handle<FixedArray> CalculateLineEnds(Handle<String> src,
+                                     bool with_imaginary_last_new_line) {
   const int src_len = src->length();
   Handle<String> new_line = Factory::NewStringFromAscii(CStrVector("\n"));
 
@@ -466,8 +479,12 @@
     if (position != -1) {
       position++;
     }
-    // Even if the last line misses a line end, it is counted.
-    line_count++;
+    if (position != -1) {
+      line_count++;
+    } else if (with_imaginary_last_new_line) {
+      // Even if the last line misses a line end, it is counted.
+      line_count++;
+    }
   }
 
   // Pass 2: Fill in line ends positions
@@ -476,15 +493,17 @@
   position = 0;
   while (position != -1 && position < src_len) {
     position = Runtime::StringMatch(src, new_line, position);
-    // If the script does not end with a line ending add the final end
-    // position as just past the last line ending.
-    array->set(array_index++,
-               Smi::FromInt(position != -1 ? position++ : src_len));
+    if (position != -1) {
+      array->set(array_index++, Smi::FromInt(position++));
+    } else if (with_imaginary_last_new_line) {
+      // If the script does not end with a line ending add the final end
+      // position as just past the last line ending.
+      array->set(array_index++, Smi::FromInt(src_len));
+    }
   }
   ASSERT(array_index == line_count);
 
-  script->set_line_ends(*array);
-  ASSERT(script->line_ends()->IsFixedArray());
+  return array;
 }
 
 
@@ -514,8 +533,32 @@
 }
 
 
+int GetScriptLineNumberSafe(Handle<Script> script, int code_pos) {
+  AssertNoAllocation no_allocation;
+  if (!script->line_ends()->IsUndefined()) {
+    return GetScriptLineNumber(script, code_pos);
+  }
+  // Slow mode: we do not have line_ends. We have to iterate through source.
+  if (!script->source()->IsString()) {
+    return -1;
+  }
+  String* source = String::cast(script->source());
+  int line = 0;
+  int len = source->length();
+  for (int pos = 0; pos < len; pos++) {
+    if (pos == code_pos) {
+      break;
+    }
+    if (source->Get(pos) == '\n') {
+      line++;
+    }
+  }
+  return line;
+}
+
+
 void CustomArguments::IterateInstance(ObjectVisitor* v) {
-  v->VisitPointers(values_, values_ + 4);
+  v->VisitPointers(values_, values_ + ARRAY_SIZE(values_));
 }
 
 
@@ -711,7 +754,7 @@
                  ClearExceptionFlag flag) {
   CompilationInfo info(function, 0, receiver);
   bool result = CompileLazyHelper(&info, flag);
-  LOG(FunctionCreateEvent(*function));
+  PROFILE(FunctionCreateEvent(*function));
   return result;
 }
 
@@ -721,7 +764,7 @@
                        ClearExceptionFlag flag) {
   CompilationInfo info(function, 1, receiver);
   bool result = CompileLazyHelper(&info, flag);
-  LOG(FunctionCreateEvent(*function));
+  PROFILE(FunctionCreateEvent(*function));
   return result;
 }
 
diff --git a/src/handles.h b/src/handles.h
index 7902909..ea13def 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -42,7 +42,7 @@
 template<class T>
 class Handle {
  public:
-  INLINE(Handle(T** location)) { location_ = location; }
+  INLINE(explicit Handle(T** location)) { location_ = location; }
   INLINE(explicit Handle(T* obj));
 
   INLINE(Handle()) : location_(NULL) {}
@@ -238,6 +238,9 @@
 Handle<Object> GetProperty(Handle<Object> obj,
                            Handle<Object> key);
 
+Handle<Object> GetElement(Handle<Object> obj,
+                          uint32_t index);
+
 Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
                                           Handle<JSObject> holder,
                                           Handle<String> name,
@@ -268,7 +271,14 @@
 
 // Script line number computations.
 void InitScriptLineEnds(Handle<Script> script);
+// For string calculates an array of line end positions. If the string
+// does not end with a new line character, this character may optionally be
+// imagined.
+Handle<FixedArray> CalculateLineEnds(Handle<String> string,
+                                     bool with_imaginary_last_new_line);
 int GetScriptLineNumber(Handle<Script> script, int code_position);
+// The safe version does not make heap allocations but may work much slower.
+int GetScriptLineNumberSafe(Handle<Script> script, int code_position);
 
 // Computes the enumerable keys from interceptors. Used for debug mirrors and
 // by GetKeysInFixedArrayFor below.
@@ -292,7 +302,10 @@
 Handle<FixedArray> UnionOfKeys(Handle<FixedArray> first,
                                Handle<FixedArray> second);
 
-Handle<String> SubString(Handle<String> str, int start, int end);
+Handle<String> SubString(Handle<String> str,
+                         int start,
+                         int end,
+                         PretenureFlag pretenure = NOT_TENURED);
 
 
 // Sets the expected number of properties for the function's instances.
@@ -304,8 +317,6 @@
 // Sets the expected number of properties based on estimate from compiler.
 void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
                                           int estimate);
-void SetExpectedNofPropertiesFromEstimate(Handle<JSFunction> func,
-                                          int estimate);
 
 
 Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
diff --git a/src/heap-inl.h b/src/heap-inl.h
index f18bf0f..82e1a91 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -133,7 +133,8 @@
 #ifdef DEBUG
   if (!result->IsFailure()) {
     // Maps have their own alignment.
-    CHECK((OffsetFrom(result) & kMapAlignmentMask) == kHeapObjectTag);
+    CHECK((reinterpret_cast<intptr_t>(result) & kMapAlignmentMask) ==
+          static_cast<intptr_t>(kHeapObjectTag));
   }
 #endif
   return result;
@@ -187,6 +188,18 @@
 }
 
 
+void Heap::RecordWrites(Address address, int start, int len) {
+  if (new_space_.Contains(address)) return;
+  ASSERT(!new_space_.FromSpaceContains(address));
+  for (int offset = start;
+       offset < start + len * kPointerSize;
+       offset += kPointerSize) {
+    SLOW_ASSERT(Contains(address + offset));
+    Page::SetRSet(address, offset);
+  }
+}
+
+
 OldSpace* Heap::TargetSpace(HeapObject* object) {
   InstanceType type = object->map()->instance_type();
   AllocationSpace space = TargetSpaceId(type);
@@ -223,19 +236,27 @@
 
 void Heap::CopyBlock(Object** dst, Object** src, int byte_size) {
   ASSERT(IsAligned(byte_size, kPointerSize));
+  CopyWords(dst, src, byte_size / kPointerSize);
+}
 
-  // Use block copying memcpy if the segment we're copying is
-  // enough to justify the extra call/setup overhead.
-  static const int kBlockCopyLimit = 16 * kPointerSize;
 
-  if (byte_size >= kBlockCopyLimit) {
-    memcpy(dst, src, byte_size);
-  } else {
-    int remaining = byte_size / kPointerSize;
-    do {
-      remaining--;
+void Heap::MoveBlock(Object** dst, Object** src, int byte_size) {
+  ASSERT(IsAligned(byte_size, kPointerSize));
+
+  int size_in_words = byte_size / kPointerSize;
+
+  if ((dst < src) || (dst >= (src + size_in_words))) {
+    ASSERT((dst >= (src + size_in_words)) ||
+           ((OffsetFrom(reinterpret_cast<Address>(src)) -
+             OffsetFrom(reinterpret_cast<Address>(dst))) >= kPointerSize));
+
+    Object** end = src + size_in_words;
+
+    while (src != end) {
       *dst++ = *src++;
-    } while (remaining > 0);
+    }
+  } else {
+    memmove(dst, src, byte_size);
   }
 }
 
@@ -261,6 +282,25 @@
 }
 
 
+Object* Heap::PrepareForCompare(String* str) {
+  // Always flatten small strings and force flattening of long strings
+  // after we have accumulated a certain amount we failed to flatten.
+  static const int kMaxAlwaysFlattenLength = 32;
+  static const int kFlattenLongThreshold = 16*KB;
+
+  const int length = str->length();
+  Object* obj = str->TryFlatten();
+  if (length <= kMaxAlwaysFlattenLength ||
+      unflattened_strings_length_ >= kFlattenLongThreshold) {
+    return obj;
+  }
+  if (obj->IsFailure()) {
+    unflattened_strings_length_ += length;
+  }
+  return str;
+}
+
+
 int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
   ASSERT(HasBeenSetup());
   int amount = amount_of_external_allocated_memory_ + change_in_bytes;
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index 3cb65ee..90544f1 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -31,6 +31,7 @@
 #include "frames-inl.h"
 #include "global-handles.h"
 #include "string-stream.h"
+#include "zone-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/heap-profiler.h b/src/heap-profiler.h
index c615942..d6f2650 100644
--- a/src/heap-profiler.h
+++ b/src/heap-profiler.h
@@ -28,6 +28,8 @@
 #ifndef V8_HEAP_PROFILER_H_
 #define V8_HEAP_PROFILER_H_
 
+#include "zone.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/heap.cc b/src/heap.cc
index cfb786a..193f082 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -41,11 +41,12 @@
 #include "scopeinfo.h"
 #include "snapshot.h"
 #include "v8threads.h"
-#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
+#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
 #include "regexp-macro-assembler.h"
 #include "arm/regexp-macro-assembler-arm.h"
 #endif
 
+
 namespace v8 {
 namespace internal {
 
@@ -97,6 +98,9 @@
 // set up by ConfigureHeap otherwise.
 int Heap::reserved_semispace_size_ = Heap::max_semispace_size_;
 
+List<Heap::GCPrologueCallbackPair> Heap::gc_prologue_callbacks_;
+List<Heap::GCEpilogueCallbackPair> Heap::gc_epilogue_callbacks_;
+
 GCCallback Heap::global_gc_prologue_callback_ = NULL;
 GCCallback Heap::global_gc_epilogue_callback_ = NULL;
 
@@ -113,9 +117,11 @@
 int Heap::mc_count_ = 0;
 int Heap::gc_count_ = 0;
 
+int Heap::unflattened_strings_length_ = 0;
+
 int Heap::always_allocate_scope_depth_ = 0;
 int Heap::linear_allocation_scope_depth_ = 0;
-bool Heap::context_disposed_pending_ = false;
+int Heap::contexts_disposed_ = 0;
 
 #ifdef DEBUG
 bool Heap::allocation_allowed_ = true;
@@ -300,7 +306,9 @@
 
 void Heap::GarbageCollectionPrologue() {
   TranscendentalCache::Clear();
+  ClearJSFunctionResultCaches();
   gc_count_++;
+  unflattened_strings_length_ = 0;
 #ifdef DEBUG
   ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
   allow_allocation(false);
@@ -371,24 +379,6 @@
 }
 
 
-void Heap::CollectAllGarbageIfContextDisposed() {
-  // If the garbage collector interface is exposed through the global
-  // gc() function, we avoid being clever about forcing GCs when
-  // contexts are disposed and leave it to the embedder to make
-  // informed decisions about when to force a collection.
-  if (!FLAG_expose_gc && context_disposed_pending_) {
-    HistogramTimerScope scope(&Counters::gc_context);
-    CollectAllGarbage(false);
-  }
-  context_disposed_pending_ = false;
-}
-
-
-void Heap::NotifyContextDisposed() {
-  context_disposed_pending_ = true;
-}
-
-
 bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
   // The VM is in the GC state until exiting this function.
   VMState state(GC);
@@ -552,16 +542,51 @@
 }
 
 
+class ClearThreadJSFunctionResultCachesVisitor: public ThreadVisitor  {
+  virtual void VisitThread(ThreadLocalTop* top) {
+    Context* context = top->context_;
+    if (context == NULL) return;
+
+    FixedArray* caches =
+      context->global()->global_context()->jsfunction_result_caches();
+    int length = caches->length();
+    for (int i = 0; i < length; i++) {
+      JSFunctionResultCache::cast(caches->get(i))->Clear();
+    }
+  }
+};
+
+
+void Heap::ClearJSFunctionResultCaches() {
+  if (Bootstrapper::IsActive()) return;
+  ClearThreadJSFunctionResultCachesVisitor visitor;
+  ThreadManager::IterateThreads(&visitor);
+}
+
+
 void Heap::PerformGarbageCollection(AllocationSpace space,
                                     GarbageCollector collector,
                                     GCTracer* tracer) {
   VerifySymbolTable();
   if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
     ASSERT(!allocation_allowed_);
+    GCTracer::ExternalScope scope(tracer);
     global_gc_prologue_callback_();
   }
+
+  GCType gc_type =
+      collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
+
+  for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
+    if (gc_type & gc_prologue_callbacks_[i].gc_type) {
+      gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
+    }
+  }
+
   EnsureFromSpaceIsCommitted();
+
   if (collector == MARK_COMPACTOR) {
+    // Perform mark-sweep with optional compaction.
     MarkCompact(tracer);
 
     int old_gen_size = PromotedSpaceSize();
@@ -570,13 +595,15 @@
     old_gen_allocation_limit_ =
         old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
     old_gen_exhausted_ = false;
+  } else {
+    Scavenge();
   }
-  Scavenge();
 
   Counters::objs_since_last_young.Set(0);
 
   if (collector == MARK_COMPACTOR) {
     DisableAssertNoAllocation allow_allocation;
+    GCTracer::ExternalScope scope(tracer);
     GlobalHandles::PostGarbageCollectionProcessing();
   }
 
@@ -589,8 +616,18 @@
         amount_of_external_allocated_memory_;
   }
 
+  GCCallbackFlags callback_flags = tracer->is_compacting()
+      ? kGCCallbackFlagCompacted
+      : kNoGCCallbackFlags;
+  for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
+    if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
+      gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
+    }
+  }
+
   if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
     ASSERT(!allocation_allowed_);
+    GCTracer::ExternalScope scope(tracer);
     global_gc_epilogue_callback_();
   }
   VerifySymbolTable();
@@ -620,7 +657,8 @@
   Shrink();
 
   Counters::objs_since_last_full.Set(0);
-  context_disposed_pending_ = false;
+
+  contexts_disposed_ = 0;
 }
 
 
@@ -744,6 +782,17 @@
 #endif
 
 
+void Heap::CheckNewSpaceExpansionCriteria() {
+  if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
+      survived_since_last_expansion_ > new_space_.Capacity()) {
+    // Grow the size of new space if there is room to grow and enough
+    // data has survived scavenge since the last expansion.
+    new_space_.Grow();
+    survived_since_last_expansion_ = 0;
+  }
+}
+
+
 void Heap::Scavenge() {
 #ifdef DEBUG
   if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
@@ -760,13 +809,7 @@
   // Used for updating survived_since_last_expansion_ at function end.
   int survived_watermark = PromotedSpaceSize();
 
-  if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
-      survived_since_last_expansion_ > new_space_.Capacity()) {
-    // Grow the size of new space if there is room to grow and enough
-    // data has survived scavenge since the last expansion.
-    new_space_.Grow();
-    survived_since_last_expansion_ = 0;
-  }
+  CheckNewSpaceExpansionCriteria();
 
   // Flip the semispaces.  After flipping, to space is empty, from space has
   // live objects.
@@ -817,15 +860,17 @@
 
   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
 
-  ScavengeExternalStringTable();
+  UpdateNewSpaceReferencesInExternalStringTable(
+      &UpdateNewSpaceReferenceInExternalStringTableEntry);
+
   ASSERT(new_space_front == new_space_.top());
 
   // Set age mark.
   new_space_.set_age_mark(new_space_.top());
 
   // Update how much has survived scavenge.
-  survived_since_last_expansion_ +=
-      (PromotedSpaceSize() - survived_watermark) + new_space_.Size();
+  IncrementYoungSurvivorsCounter(
+      (PromotedSpaceSize() - survived_watermark) + new_space_.Size());
 
   LOG(ResourceEvent("scavenge", "end"));
 
@@ -833,7 +878,22 @@
 }
 
 
-void Heap::ScavengeExternalStringTable() {
+String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Object** p) {
+  MapWord first_word = HeapObject::cast(*p)->map_word();
+
+  if (!first_word.IsForwardingAddress()) {
+    // Unreachable external string can be finalized.
+    FinalizeExternalString(String::cast(*p));
+    return NULL;
+  }
+
+  // String is still reachable.
+  return String::cast(first_word.ToForwardingAddress());
+}
+
+
+void Heap::UpdateNewSpaceReferencesInExternalStringTable(
+    ExternalStringTableUpdaterCallback updater_func) {
   ExternalStringTable::Verify();
 
   if (ExternalStringTable::new_space_strings_.is_empty()) return;
@@ -844,16 +904,10 @@
 
   for (Object** p = start; p < end; ++p) {
     ASSERT(Heap::InFromSpace(*p));
-    MapWord first_word = HeapObject::cast(*p)->map_word();
+    String* target = updater_func(p);
 
-    if (!first_word.IsForwardingAddress()) {
-      // Unreachable external string can be finalized.
-      FinalizeExternalString(String::cast(*p));
-      continue;
-    }
+    if (target == NULL) continue;
 
-    // String is still reachable.
-    String* target = String::cast(first_word.ToForwardingAddress());
     ASSERT(target->IsExternalString());
 
     if (Heap::InNewSpace(target)) {
@@ -1221,6 +1275,16 @@
 }
 
 
+Object* Heap::AllocateCodeCache() {
+  Object* result = AllocateStruct(CODE_CACHE_TYPE);
+  if (result->IsFailure()) return result;
+  CodeCache* code_cache = CodeCache::cast(result);
+  code_cache->set_default_cache(empty_fixed_array());
+  code_cache->set_normal_type_cache(undefined_value());
+  return code_cache;
+}
+
+
 const Heap::StringTypeTable Heap::string_type_table[] = {
 #define STRING_TYPE_ELEMENT(type, size, name, camel_name)                      \
   {type, size, k##camel_name##MapRootIndex},
@@ -1261,7 +1325,7 @@
   if (obj->IsFailure()) return false;
   set_oddball_map(Map::cast(obj));
 
-  // Allocate the empty array
+  // Allocate the empty array.
   obj = AllocateEmptyFixedArray();
   if (obj->IsFailure()) return false;
   set_empty_fixed_array(FixedArray::cast(obj));
@@ -1403,11 +1467,8 @@
   if (obj->IsFailure()) return false;
   set_global_context_map(Map::cast(obj));
 
-  obj = AllocateMap(JS_FUNCTION_TYPE, JSFunction::kSize);
-  if (obj->IsFailure()) return false;
-  set_boilerplate_function_map(Map::cast(obj));
-
-  obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kSize);
+  obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
+                    SharedFunctionInfo::kAlignedSize);
   if (obj->IsFailure()) return false;
   set_shared_function_info_map(Map::cast(obj));
 
@@ -1456,10 +1517,9 @@
 }
 
 
-Object* Heap::CreateOddball(Map* map,
-                            const char* to_string,
+Object* Heap::CreateOddball(const char* to_string,
                             Object* to_number) {
-  Object* result = Allocate(map, OLD_DATA_SPACE);
+  Object* result = Allocate(oddball_map(), OLD_DATA_SPACE);
   if (result->IsFailure()) return result;
   return Oddball::cast(result)->Initialize(to_string, to_number);
 }
@@ -1490,7 +1550,7 @@
 }
 
 
-#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
+#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
 void Heap::CreateRegExpCEntryStub() {
   RegExpCEntryStub stub;
   set_re_c_entry_code(*stub.GetCode());
@@ -1527,7 +1587,7 @@
   Heap::CreateCEntryStub();
   Heap::CreateJSEntryStub();
   Heap::CreateJSConstructEntryStub();
-#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
+#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
   Heap::CreateRegExpCEntryStub();
 #endif
 }
@@ -1563,34 +1623,27 @@
   Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
   Oddball::cast(undefined_value())->set_to_number(nan_value());
 
-  // Assign the print strings for oddballs after creating symboltable.
-  symbol = LookupAsciiSymbol("null");
-  if (symbol->IsFailure()) return false;
-  Oddball::cast(null_value())->set_to_string(String::cast(symbol));
-  Oddball::cast(null_value())->set_to_number(Smi::FromInt(0));
-
   // Allocate the null_value
   obj = Oddball::cast(null_value())->Initialize("null", Smi::FromInt(0));
   if (obj->IsFailure()) return false;
 
-  obj = CreateOddball(oddball_map(), "true", Smi::FromInt(1));
+  obj = CreateOddball("true", Smi::FromInt(1));
   if (obj->IsFailure()) return false;
   set_true_value(obj);
 
-  obj = CreateOddball(oddball_map(), "false", Smi::FromInt(0));
+  obj = CreateOddball("false", Smi::FromInt(0));
   if (obj->IsFailure()) return false;
   set_false_value(obj);
 
-  obj = CreateOddball(oddball_map(), "hole", Smi::FromInt(-1));
+  obj = CreateOddball("hole", Smi::FromInt(-1));
   if (obj->IsFailure()) return false;
   set_the_hole_value(obj);
 
-  obj = CreateOddball(
-      oddball_map(), "no_interceptor_result_sentinel", Smi::FromInt(-2));
+  obj = CreateOddball("no_interceptor_result_sentinel", Smi::FromInt(-2));
   if (obj->IsFailure()) return false;
   set_no_interceptor_result_sentinel(obj);
 
-  obj = CreateOddball(oddball_map(), "termination_exception", Smi::FromInt(-3));
+  obj = CreateOddball("termination_exception", Smi::FromInt(-3));
   if (obj->IsFailure()) return false;
   set_termination_exception(obj);
 
@@ -1636,8 +1689,8 @@
 
   if (InitializeNumberStringCache()->IsFailure()) return false;
 
-  // Allocate cache for single character strings.
-  obj = AllocateFixedArray(String::kMaxAsciiCharCode+1);
+  // Allocate cache for single character ASCII strings.
+  obj = AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
   if (obj->IsFailure()) return false;
   set_single_character_string_cache(FixedArray::cast(obj));
 
@@ -1671,7 +1724,7 @@
   // max_semispace_size_ ==   8 MB => number_string_cache_size = 16KB.
   int number_string_cache_size = max_semispace_size_ / 512;
   number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
-  Object* obj = AllocateFixedArray(number_string_cache_size * 2);
+  Object* obj = AllocateFixedArray(number_string_cache_size * 2, TENURED);
   if (!obj->IsFailure()) set_number_string_cache(FixedArray::cast(obj));
   return obj;
 }
@@ -1731,46 +1784,13 @@
 }
 
 
-Object* Heap::SmiOrNumberFromDouble(double value,
-                                    bool new_object,
-                                    PretenureFlag pretenure) {
-  // We need to distinguish the minus zero value and this cannot be
-  // done after conversion to int. Doing this by comparing bit
-  // patterns is faster than using fpclassify() et al.
-  static const DoubleRepresentation plus_zero(0.0);
-  static const DoubleRepresentation minus_zero(-0.0);
-  static const DoubleRepresentation nan(OS::nan_value());
-  ASSERT(minus_zero_value() != NULL);
-  ASSERT(sizeof(plus_zero.value) == sizeof(plus_zero.bits));
-
-  DoubleRepresentation rep(value);
-  if (rep.bits == plus_zero.bits) return Smi::FromInt(0);  // not uncommon
-  if (rep.bits == minus_zero.bits) {
-    return new_object ? AllocateHeapNumber(-0.0, pretenure)
-                      : minus_zero_value();
-  }
-  if (rep.bits == nan.bits) {
-    return new_object
-        ? AllocateHeapNumber(OS::nan_value(), pretenure)
-        : nan_value();
-  }
-
-  // Try to represent the value as a tagged small integer.
-  int int_value = FastD2I(value);
-  if (value == FastI2D(int_value) && Smi::IsValid(int_value)) {
-    return Smi::FromInt(int_value);
-  }
-
-  // Materialize the value in the heap.
-  return AllocateHeapNumber(value, pretenure);
-}
-
-
-Object* Heap::NumberToString(Object* number) {
+Object* Heap::NumberToString(Object* number, bool check_number_string_cache) {
   Counters::number_to_string_runtime.Increment();
-  Object* cached = GetNumberStringCache(number);
-  if (cached != undefined_value()) {
-    return cached;
+  if (check_number_string_cache) {
+    Object* cached = GetNumberStringCache(number);
+    if (cached != undefined_value()) {
+      return cached;
+    }
   }
 
   char arr[100];
@@ -1821,17 +1841,24 @@
 }
 
 
-Object* Heap::NewNumberFromDouble(double value, PretenureFlag pretenure) {
-  return SmiOrNumberFromDouble(value,
-                               true /* number object must be new */,
-                               pretenure);
-}
-
-
 Object* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
-  return SmiOrNumberFromDouble(value,
-                               false /* use preallocated NaN, -0.0 */,
-                               pretenure);
+  // We need to distinguish the minus zero value and this cannot be
+  // done after conversion to int. Doing this by comparing bit
+  // patterns is faster than using fpclassify() et al.
+  static const DoubleRepresentation minus_zero(-0.0);
+
+  DoubleRepresentation rep(value);
+  if (rep.bits == minus_zero.bits) {
+    return AllocateHeapNumber(-0.0, pretenure);
+  }
+
+  int int_value = FastD2I(value);
+  if (value == int_value && Smi::IsValid(int_value)) {
+    return Smi::FromInt(int_value);
+  }
+
+  // Materialize the value in the heap.
+  return AllocateHeapNumber(value, pretenure);
 }
 
 
@@ -1930,8 +1957,9 @@
     return MakeOrFindTwoCharacterString(c1, c2);
   }
 
-  bool is_ascii = first->IsAsciiRepresentation()
-      && second->IsAsciiRepresentation();
+  bool first_is_ascii = first->IsAsciiRepresentation();
+  bool second_is_ascii = second->IsAsciiRepresentation();
+  bool is_ascii = first_is_ascii && second_is_ascii;
 
   // Make sure that an out of memory exception is thrown if the length
   // of the new cons string is too large.
@@ -1966,6 +1994,25 @@
       for (int i = 0; i < second_length; i++) *dest++ = src[i];
       return result;
     } else {
+      // For short external two-byte strings we check whether they can
+      // be represented using ascii.
+      if (!first_is_ascii) {
+        first_is_ascii = first->IsExternalTwoByteStringWithAsciiChars();
+      }
+      if (first_is_ascii && !second_is_ascii) {
+        second_is_ascii = second->IsExternalTwoByteStringWithAsciiChars();
+      }
+      if (first_is_ascii && second_is_ascii) {
+        Object* result = AllocateRawAsciiString(length);
+        if (result->IsFailure()) return result;
+        // Copy the characters into the new object.
+        char* dest = SeqAsciiString::cast(result)->GetChars();
+        String::WriteToFlat(first, dest, 0, first_length);
+        String::WriteToFlat(second, dest + first_length, 0, second_length);
+        Counters::string_add_runtime_ext_to_ascii.Increment();
+        return result;
+      }
+
       Object* result = AllocateRawTwoByteString(length);
       if (result->IsFailure()) return result;
       // Copy the characters into the new object.
@@ -1994,7 +2041,8 @@
 
 Object* Heap::AllocateSubString(String* buffer,
                                 int start,
-                                int end) {
+                                int end,
+                                PretenureFlag pretenure) {
   int length = end - start;
 
   if (length == 1) {
@@ -2010,16 +2058,13 @@
   }
 
   // Make an attempt to flatten the buffer to reduce access time.
-  if (!buffer->IsFlat()) {
-    buffer->TryFlatten();
-  }
+  buffer->TryFlatten();
 
   Object* result = buffer->IsAsciiRepresentation()
-      ? AllocateRawAsciiString(length)
-      : AllocateRawTwoByteString(length);
+      ? AllocateRawAsciiString(length, pretenure )
+      : AllocateRawTwoByteString(length, pretenure);
   if (result->IsFailure()) return result;
   String* string_result = String::cast(result);
-
   // Copy the characters into the new object.
   if (buffer->IsAsciiRepresentation()) {
     ASSERT(string_result->IsAsciiRepresentation());
@@ -2138,9 +2183,11 @@
   if (size == 0) return;
   HeapObject* filler = HeapObject::FromAddress(addr);
   if (size == kPointerSize) {
-    filler->set_map(Heap::one_pointer_filler_map());
+    filler->set_map(one_pointer_filler_map());
+  } else if (size == 2 * kPointerSize) {
+    filler->set_map(two_pointer_filler_map());
   } else {
-    filler->set_map(Heap::byte_array_map());
+    filler->set_map(byte_array_map());
     ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
   }
 }
@@ -2254,6 +2301,56 @@
 }
 
 
+Object* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
+  int new_body_size = RoundUp(code->instruction_size() + reloc_info.length(),
+                              kObjectAlignment);
+
+  int sinfo_size = code->sinfo_size();
+
+  int new_obj_size = Code::SizeFor(new_body_size, sinfo_size);
+
+  Address old_addr = code->address();
+
+  size_t relocation_offset =
+      static_cast<size_t>(code->relocation_start() - old_addr);
+
+  Object* result;
+  if (new_obj_size > MaxObjectSizeInPagedSpace()) {
+    result = lo_space_->AllocateRawCode(new_obj_size);
+  } else {
+    result = code_space_->AllocateRaw(new_obj_size);
+  }
+
+  if (result->IsFailure()) return result;
+
+  // Copy code object.
+  Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
+
+  // Copy header and instructions.
+  memcpy(new_addr, old_addr, relocation_offset);
+
+  // Copy patched rinfo.
+  memcpy(new_addr + relocation_offset,
+         reloc_info.start(),
+             reloc_info.length());
+
+  Code* new_code = Code::cast(result);
+  new_code->set_relocation_size(reloc_info.length());
+
+  // Copy sinfo.
+  memcpy(new_code->sinfo_start(), code->sinfo_start(), code->sinfo_size());
+
+  // Relocate the copy.
+  ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
+  new_code->Relocate(new_addr - old_addr);
+
+#ifdef DEBUG
+  code->Verify();
+#endif
+  return new_code;
+}
+
+
 Object* Heap::Allocate(Map* map, AllocationSpace space) {
   ASSERT(gc_state_ == NOT_IN_GC);
   ASSERT(map->instance_type() != MAP_TYPE);
@@ -2568,11 +2665,9 @@
               reinterpret_cast<Object**>(source->address()),
               object_size);
     // Update write barrier for all fields that lie beyond the header.
-    for (int offset = JSObject::kHeaderSize;
-         offset < object_size;
-         offset += kPointerSize) {
-      RecordWrite(clone_address, offset);
-    }
+    RecordWrites(clone_address,
+                 JSObject::kHeaderSize,
+                 (object_size - JSObject::kHeaderSize) / kPointerSize);
   } else {
     clone = new_space_.AllocateRaw(object_size);
     if (clone->IsFailure()) return clone;
@@ -2587,7 +2682,7 @@
   FixedArray* elements = FixedArray::cast(source->elements());
   FixedArray* properties = FixedArray::cast(source->properties());
   // Update elements if necessary.
-  if (elements->length()> 0) {
+  if (elements->length() > 0) {
     Object* elem = CopyFixedArray(elements);
     if (elem->IsFailure()) return elem;
     JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
@@ -2903,24 +2998,18 @@
     reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
     FixedArray* array = FixedArray::cast(result);
     array->set_length(length);
-    Object* value = undefined_value();
     // Initialize body.
-    for (int index = 0; index < length; index++) {
-      ASSERT(!Heap::InNewSpace(value));  // value = undefined
-      array->set(index, value, SKIP_WRITE_BARRIER);
-    }
+    ASSERT(!Heap::InNewSpace(undefined_value()));
+    MemsetPointer(array->data_start(), undefined_value(), length);
   }
   return result;
 }
 
 
-Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
-  ASSERT(length >= 0);
-  ASSERT(empty_fixed_array()->IsFixedArray());
+Object* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
   if (length < 0 || length > FixedArray::kMaxLength) {
     return Failure::OutOfMemoryException();
   }
-  if (length == 0) return empty_fixed_array();
 
   AllocationSpace space =
       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
@@ -2954,42 +3043,53 @@
     ASSERT(space == LO_SPACE);
     result = lo_space_->AllocateRawFixedArray(size);
   }
-  if (result->IsFailure()) return result;
-
-  // Initialize the object.
-  reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
-  FixedArray* array = FixedArray::cast(result);
-  array->set_length(length);
-  Object* value = undefined_value();
-  for (int index = 0; index < length; index++) {
-    ASSERT(!Heap::InNewSpace(value));  // value = undefined
-    array->set(index, value, SKIP_WRITE_BARRIER);
-  }
-  return array;
-}
-
-
-Object* Heap::AllocateFixedArrayWithHoles(int length) {
-  if (length == 0) return empty_fixed_array();
-  Object* result = AllocateRawFixedArray(length);
-  if (!result->IsFailure()) {
-    // Initialize header.
-    reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
-    FixedArray* array = FixedArray::cast(result);
-    array->set_length(length);
-    // Initialize body.
-    Object* value = the_hole_value();
-    for (int index = 0; index < length; index++)  {
-      ASSERT(!Heap::InNewSpace(value));  // value = the hole
-      array->set(index, value, SKIP_WRITE_BARRIER);
-    }
-  }
   return result;
 }
 
 
-Object* Heap::AllocateHashTable(int length) {
-  Object* result = Heap::AllocateFixedArray(length);
+static Object* AllocateFixedArrayWithFiller(int length,
+                                            PretenureFlag pretenure,
+                                            Object* filler) {
+  ASSERT(length >= 0);
+  ASSERT(Heap::empty_fixed_array()->IsFixedArray());
+  if (length == 0) return Heap::empty_fixed_array();
+
+  ASSERT(!Heap::InNewSpace(filler));
+  Object* result = Heap::AllocateRawFixedArray(length, pretenure);
+  if (result->IsFailure()) return result;
+
+  HeapObject::cast(result)->set_map(Heap::fixed_array_map());
+  FixedArray* array = FixedArray::cast(result);
+  array->set_length(length);
+  MemsetPointer(array->data_start(), filler, length);
+  return array;
+}
+
+
+Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
+  return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
+}
+
+
+Object* Heap::AllocateFixedArrayWithHoles(int length, PretenureFlag pretenure) {
+  return AllocateFixedArrayWithFiller(length, pretenure, the_hole_value());
+}
+
+
+Object* Heap::AllocateUninitializedFixedArray(int length) {
+  if (length == 0) return empty_fixed_array();
+
+  Object* obj = AllocateRawFixedArray(length);
+  if (obj->IsFailure()) return obj;
+
+  reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
+  FixedArray::cast(obj)->set_length(length);
+  return obj;
+}
+
+
+Object* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
+  Object* result = Heap::AllocateFixedArray(length, pretenure);
   if (result->IsFailure()) return result;
   reinterpret_cast<Array*>(result)->set_map(hash_table_map());
   ASSERT(result->IsHashTable());
@@ -3072,6 +3172,7 @@
   static int number_idle_notifications = 0;
   static int last_gc_count = gc_count_;
 
+  bool uncommit = true;
   bool finished = false;
 
   if (last_gc_count == gc_count_) {
@@ -3082,7 +3183,12 @@
   }
 
   if (number_idle_notifications == kIdlesBeforeScavenge) {
-    CollectGarbage(0, NEW_SPACE);
+    if (contexts_disposed_ > 0) {
+      HistogramTimerScope scope(&Counters::gc_context);
+      CollectAllGarbage(false);
+    } else {
+      CollectGarbage(0, NEW_SPACE);
+    }
     new_space_.Shrink();
     last_gc_count = gc_count_;
 
@@ -3102,10 +3208,29 @@
     last_gc_count = gc_count_;
     number_idle_notifications = 0;
     finished = true;
+
+  } else if (contexts_disposed_ > 0) {
+    if (FLAG_expose_gc) {
+      contexts_disposed_ = 0;
+    } else {
+      HistogramTimerScope scope(&Counters::gc_context);
+      CollectAllGarbage(false);
+      last_gc_count = gc_count_;
+    }
+    // If this is the first idle notification, we reset the
+    // notification count to avoid letting idle notifications for
+    // context disposal garbage collections start a potentially too
+    // aggressive idle GC cycle.
+    if (number_idle_notifications <= 1) {
+      number_idle_notifications = 0;
+      uncommit = false;
+    }
   }
 
-  // Uncommit unused memory in new space.
-  Heap::UncommitFromSpace();
+  // Make sure that we have no pending context disposals and
+  // conditionally uncommit from space.
+  ASSERT(contexts_disposed_ == 0);
+  if (uncommit) Heap::UncommitFromSpace();
   return finished;
 }
 
@@ -3370,7 +3495,7 @@
   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
   v->Synchronize("strong_root_list");
 
-  v->VisitPointer(bit_cast<Object**, String**>(&hidden_symbol_));
+  v->VisitPointer(BitCast<Object**, String**>(&hidden_symbol_));
   v->Synchronize("symbol");
 
   Bootstrapper::Iterate(v);
@@ -3705,6 +3830,46 @@
 #endif
 
 
+void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
+  ASSERT(callback != NULL);
+  GCPrologueCallbackPair pair(callback, gc_type);
+  ASSERT(!gc_prologue_callbacks_.Contains(pair));
+  return gc_prologue_callbacks_.Add(pair);
+}
+
+
+void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
+  ASSERT(callback != NULL);
+  for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
+    if (gc_prologue_callbacks_[i].callback == callback) {
+      gc_prologue_callbacks_.Remove(i);
+      return;
+    }
+  }
+  UNREACHABLE();
+}
+
+
+void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
+  ASSERT(callback != NULL);
+  GCEpilogueCallbackPair pair(callback, gc_type);
+  ASSERT(!gc_epilogue_callbacks_.Contains(pair));
+  return gc_epilogue_callbacks_.Add(pair);
+}
+
+
+void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
+  ASSERT(callback != NULL);
+  for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
+    if (gc_epilogue_callbacks_[i].callback == callback) {
+      gc_epilogue_callbacks_.Remove(i);
+      return;
+    }
+  }
+  UNREACHABLE();
+}
+
+
 #ifdef DEBUG
 
 class PrintHandleVisitor: public ObjectVisitor {
@@ -4067,6 +4232,7 @@
 GCTracer::GCTracer()
     : start_time_(0.0),
       start_size_(0.0),
+      external_time_(0.0),
       gc_count_(0),
       full_gc_count_(0),
       is_compacting_(false),
@@ -4084,10 +4250,12 @@
 GCTracer::~GCTracer() {
   if (!FLAG_trace_gc) return;
   // Printf ONE line iff flag is set.
-  PrintF("%s %.1f -> %.1f MB, %d ms.\n",
-         CollectorString(),
-         start_size_, SizeOfHeapObjects(),
-         static_cast<int>(OS::TimeCurrentMillis() - start_time_));
+  int time = static_cast<int>(OS::TimeCurrentMillis() - start_time_);
+  int external_time = static_cast<int>(external_time_);
+  PrintF("%s %.1f -> %.1f MB, ",
+         CollectorString(), start_size_, SizeOfHeapObjects());
+  if (external_time > 0) PrintF("%d / ", external_time);
+  PrintF("%d ms.\n", time);
 
 #if defined(ENABLE_LOGGING_AND_PROFILING)
   Heap::PrintShortHeapStatistics();
diff --git a/src/heap.h b/src/heap.h
index b107318..902fc77 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -30,12 +30,15 @@
 
 #include <math.h>
 
-#include "zone-inl.h"
-
+#include "splay-tree-inl.h"
+#include "v8-counters.h"
 
 namespace v8 {
 namespace internal {
 
+// Forward declarations.
+class ZoneScopeInfo;
+
 // Defines all the roots in Heap.
 #define UNCONDITIONAL_STRONG_ROOT_LIST(V)                                      \
   /* Put the byte array map early.  We need it to be in place by the time   */ \
@@ -86,7 +89,6 @@
   V(Map, code_map, CodeMap)                                                    \
   V(Map, oddball_map, OddballMap)                                              \
   V(Map, global_property_cell_map, GlobalPropertyCellMap)                      \
-  V(Map, boilerplate_function_map, BoilerplateFunctionMap)                     \
   V(Map, shared_function_info_map, SharedFunctionInfoMap)                      \
   V(Map, proxy_map, ProxyMap)                                                  \
   V(Object, nan_value, NanValue)                                               \
@@ -108,7 +110,7 @@
   V(Script, empty_script, EmptyScript)                                         \
   V(Smi, real_stack_limit, RealStackLimit)                                     \
 
-#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
+#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
 #define STRONG_ROOT_LIST(V)                                                    \
   UNCONDITIONAL_STRONG_ROOT_LIST(V)                                            \
   V(Code, re_c_entry_code, RegExpCEntryCode)
@@ -146,6 +148,13 @@
   V(number_symbol, "number")                                             \
   V(Number_symbol, "Number")                                             \
   V(RegExp_symbol, "RegExp")                                             \
+  V(source_symbol, "source")                                             \
+  V(global_symbol, "global")                                             \
+  V(ignore_case_symbol, "ignoreCase")                                    \
+  V(multiline_symbol, "multiline")                                       \
+  V(input_symbol, "input")                                               \
+  V(index_symbol, "index")                                               \
+  V(last_index_symbol, "lastIndex")                                      \
   V(object_symbol, "object")                                             \
   V(prototype_symbol, "prototype")                                       \
   V(string_symbol, "string")                                             \
@@ -192,6 +201,9 @@
 class HeapStats;
 
 
+typedef String* (*ExternalStringTableUpdaterCallback)(Object** pointer);
+
+
 // The all static Heap captures the interface to the global object heap.
 // All JavaScript contexts by this process share the same object heap.
 
@@ -346,6 +358,9 @@
   // Allocate a map for the specified function
   static Object* AllocateInitialMap(JSFunction* fun);
 
+  // Allocates an empty code cache.
+  static Object* AllocateCodeCache();
+
   // Allocates and fully initializes a String.  There are two String
   // encodings: ASCII and two byte. One should choose between the three string
   // allocation functions based on the encoding of the string buffer used to
@@ -450,9 +465,16 @@
   // failed.
   // Please note this does not perform a garbage collection.
   static Object* AllocateFixedArray(int length, PretenureFlag pretenure);
-  // Allocate uninitialized, non-tenured fixed array with length elements.
+  // Allocates a fixed array initialized with undefined values
   static Object* AllocateFixedArray(int length);
 
+  // Allocates an uninitialized fixed array. It must be filled by the caller.
+  //
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  static Object* AllocateUninitializedFixedArray(int length);
+
   // Make a copy of src and return it. Returns
   // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
   static Object* CopyFixedArray(FixedArray* src);
@@ -461,11 +483,14 @@
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
   // failed.
   // Please note this does not perform a garbage collection.
-  static Object* AllocateFixedArrayWithHoles(int length);
+  static Object* AllocateFixedArrayWithHoles(
+      int length,
+      PretenureFlag pretenure = NOT_TENURED);
 
   // AllocateHashTable is identical to AllocateFixedArray except
   // that the resulting object has hash_table_map as map.
-  static Object* AllocateHashTable(int length);
+  static Object* AllocateHashTable(int length,
+                                   PretenureFlag pretenure = NOT_TENURED);
 
   // Allocate a global (but otherwise uninitialized) context.
   static Object* AllocateGlobalContext();
@@ -502,13 +527,6 @@
   // Please note this does not perform a garbage collection.
   static Object* AllocateArgumentsObject(Object* callee, int length);
 
-  // Converts a double into either a Smi or a HeapNumber object.
-  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
-  // failed.
-  // Please note this does not perform a garbage collection.
-  static Object* NewNumberFromDouble(double value,
-                                     PretenureFlag pretenure = NOT_TENURED);
-
   // Same as NewNumberFromDouble, but may return a preallocated/immutable
   // number object (e.g., minus_zero_value_, nan_value_)
   static Object* NumberFromDouble(double value,
@@ -557,7 +575,8 @@
   // Please note this does not perform a garbage collection.
   static Object* AllocateSubString(String* buffer,
                                    int start,
-                                   int end);
+                                   int end,
+                                   PretenureFlag pretenure = NOT_TENURED);
 
   // Allocate a new external string object, which is backed by a string
   // resource that resides outside the V8 heap.
@@ -598,6 +617,11 @@
                             Handle<Object> self_reference);
 
   static Object* CopyCode(Code* code);
+
+  // Copy the code and scope info part of the code object, but insert
+  // the provided data as the relocation information.
+  static Object* CopyCode(Code* code, Vector<byte> reloc_info);
+
   // Finds the symbol for string in the symbol table.
   // If not found, a new symbol is added to the table and returned.
   // Returns Failure::RetryAfterGC(requested_bytes, space) if allocation
@@ -615,6 +639,15 @@
   // NULL is returned if string is in new space or not flattened.
   static Map* SymbolMapForString(String* str);
 
+  // Tries to flatten a string before compare operation.
+  //
+  // Returns a failure in case it was decided that flattening was
+  // necessary and failed.  Note, if flattening is not necessary the
+  // string might stay non-flat even when not a failure is returned.
+  //
+  // Please note this function does not perform a garbage collection.
+  static inline Object* PrepareForCompare(String* str);
+
   // Converts the given boolean condition to JavaScript boolean value.
   static Object* ToBoolean(bool condition) {
     return condition ? true_value() : false_value();
@@ -633,12 +666,8 @@
   // parameter is true.
   static void CollectAllGarbage(bool force_compaction);
 
-  // Performs a full garbage collection if a context has been disposed
-  // since the last time the check was performed.
-  static void CollectAllGarbageIfContextDisposed();
-
   // Notify the heap that a context has been disposed.
-  static void NotifyContextDisposed();
+  static int NotifyContextDisposed() { return ++contexts_disposed_; }
 
   // Utility to invoke the scavenger. This is needed in test code to
   // ensure correct callback for weak global handles.
@@ -649,10 +678,20 @@
   static bool GarbageCollectionGreedyCheck();
 #endif
 
+  static void AddGCPrologueCallback(
+      GCEpilogueCallback callback, GCType gc_type_filter);
+  static void RemoveGCPrologueCallback(GCEpilogueCallback callback);
+
+  static void AddGCEpilogueCallback(
+      GCEpilogueCallback callback, GCType gc_type_filter);
+  static void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
+
   static void SetGlobalGCPrologueCallback(GCCallback callback) {
+    ASSERT((callback == NULL) ^ (global_gc_prologue_callback_ == NULL));
     global_gc_prologue_callback_ = callback;
   }
   static void SetGlobalGCEpilogueCallback(GCCallback callback) {
+    ASSERT((callback == NULL) ^ (global_gc_epilogue_callback_ == NULL));
     global_gc_epilogue_callback_ = callback;
   }
 
@@ -774,6 +813,9 @@
   // Write barrier support for address[offset] = o.
   static inline void RecordWrite(Address address, int offset);
 
+  // Write barrier support for address[start : start + len[ = o.
+  static inline void RecordWrites(Address address, int start, int len);
+
   // Given an address occupied by a live code object, return that object.
   static Object* FindCodeObject(Address a);
 
@@ -848,8 +890,10 @@
   // Returns the adjusted value.
   static inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
 
-  // Allocate unitialized fixed array (pretenure == NON_TENURE).
+  // Allocate uninitialized fixed array.
   static Object* AllocateRawFixedArray(int length);
+  static Object* AllocateRawFixedArray(int length,
+                                       PretenureFlag pretenure);
 
   // True if we have reached the allocation limit in the old generation that
   // should force the next GC (caused normally) to be a full one.
@@ -892,7 +936,8 @@
     kRootListLength
   };
 
-  static Object* NumberToString(Object* number);
+  static Object* NumberToString(Object* number,
+                                bool check_number_string_cache = true);
 
   static Map* MapForExternalArrayType(ExternalArrayType array_type);
   static RootListIndex RootIndexForExternalArrayType(
@@ -900,6 +945,32 @@
 
   static void RecordStats(HeapStats* stats);
 
+  // Copy block of memory from src to dst. Size of block should be aligned
+  // by pointer size.
+  static inline void CopyBlock(Object** dst, Object** src, int byte_size);
+
+  // Optimized version of memmove for blocks with pointer size aligned sizes and
+  // pointer size aligned addresses.
+  static inline void MoveBlock(Object** dst, Object** src, int byte_size);
+
+  // Check new space expansion criteria and expand semispaces if it was hit.
+  static void CheckNewSpaceExpansionCriteria();
+
+  static inline void IncrementYoungSurvivorsCounter(int survived) {
+    survived_since_last_expansion_ += survived;
+  }
+
+  static void UpdateNewSpaceReferencesInExternalStringTable(
+      ExternalStringTableUpdaterCallback updater_func);
+
+  // Helper function that governs the promotion policy from new space to
+  // old.  If the object's old address lies below the new space's age
+  // mark or if we've already filled the bottom 1/16th of the to space,
+  // we try to promote this object.
+  static inline bool ShouldBePromoted(Address old_address, int object_size);
+
+  static int MaxObjectSizeInNewSpace() { return kMaxObjectSizeInNewSpace; }
+
  private:
   static int reserved_semispace_size_;
   static int max_semispace_size_;
@@ -913,7 +984,9 @@
 
   static int always_allocate_scope_depth_;
   static int linear_allocation_scope_depth_;
-  static bool context_disposed_pending_;
+
+  // For keeping track of context disposals.
+  static int contexts_disposed_;
 
 #if defined(V8_TARGET_ARCH_X64)
   static const int kMaxObjectSizeInNewSpace = 512*KB;
@@ -939,6 +1012,9 @@
   static int mc_count_;  // how many mark-compact collections happened
   static int gc_count_;  // how many gc happened
 
+  // Total length of the strings we failed to flatten since the last GC.
+  static int unflattened_strings_length_;
+
 #define ROOT_ACCESSOR(type, name, camel_name)                                  \
   static inline void set_##name(type* value) {                                 \
     roots_[k##camel_name##RootIndex] = value;                                  \
@@ -1013,6 +1089,30 @@
 
   // GC callback function, called before and after mark-compact GC.
   // Allocations in the callback function are disallowed.
+  struct GCPrologueCallbackPair {
+    GCPrologueCallbackPair(GCPrologueCallback callback, GCType gc_type)
+        : callback(callback), gc_type(gc_type) {
+    }
+    bool operator==(const GCPrologueCallbackPair& pair) const {
+      return pair.callback == callback;
+    }
+    GCPrologueCallback callback;
+    GCType gc_type;
+  };
+  static List<GCPrologueCallbackPair> gc_prologue_callbacks_;
+
+  struct GCEpilogueCallbackPair {
+    GCEpilogueCallbackPair(GCEpilogueCallback callback, GCType gc_type)
+        : callback(callback), gc_type(gc_type) {
+    }
+    bool operator==(const GCEpilogueCallbackPair& pair) const {
+      return pair.callback == callback;
+    }
+    GCEpilogueCallback callback;
+    GCType gc_type;
+  };
+  static List<GCEpilogueCallbackPair> gc_epilogue_callbacks_;
+
   static GCCallback global_gc_prologue_callback_;
   static GCCallback global_gc_epilogue_callback_;
 
@@ -1024,12 +1124,6 @@
                                        GarbageCollector collector,
                                        GCTracer* tracer);
 
-  // Returns either a Smi or a Number object from 'value'. If 'new_object'
-  // is false, it may return a preallocated immutable object.
-  static Object* SmiOrNumberFromDouble(double value,
-                                       bool new_object,
-                                       PretenureFlag pretenure = NOT_TENURED);
-
   // Allocate an uninitialized object in map space.  The behavior is identical
   // to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
   // have to test the allocation space argument and (b) can reduce code size
@@ -1056,16 +1150,17 @@
 
   static void CreateFixedStubs();
 
-  static Object* CreateOddball(Map* map,
-                               const char* to_string,
-                               Object* to_number);
+  static Object* CreateOddball(const char* to_string, Object* to_number);
 
   // Allocate empty fixed array.
   static Object* AllocateEmptyFixedArray();
 
   // Performs a minor collection in new generation.
   static void Scavenge();
-  static void ScavengeExternalStringTable();
+
+  static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
+      Object** pointer);
+
   static Address DoScavenge(ObjectVisitor* scavenge_visitor,
                             Address new_space_front);
 
@@ -1083,11 +1178,8 @@
                                           HeapObject* target,
                                           int size);
 
-  // Helper function that governs the promotion policy from new space to
-  // old.  If the object's old address lies below the new space's age
-  // mark or if we've already filled the bottom 1/16th of the to space,
-  // we try to promote this object.
-  static inline bool ShouldBePromoted(Address old_address, int object_size);
+  static void ClearJSFunctionResultCaches();
+
 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
   // Record the copy of an object in the NewSpace's statistics.
   static void RecordCopiedObject(HeapObject* obj);
@@ -1106,9 +1198,6 @@
   // Slow part of scavenge object.
   static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
 
-  // Copy memory from src to dst.
-  static inline void CopyBlock(Object** dst, Object** src, int byte_size);
-
   // Initializes a function with a shared part and prototype.
   // Returns the function.
   // Note: this code was factored out of AllocateFunction such that
@@ -1137,26 +1226,26 @@
 
 class HeapStats {
  public:
-  int *start_marker;
-  int *new_space_size;
-  int *new_space_capacity;
-  int *old_pointer_space_size;
-  int *old_pointer_space_capacity;
-  int *old_data_space_size;
-  int *old_data_space_capacity;
-  int *code_space_size;
-  int *code_space_capacity;
-  int *map_space_size;
-  int *map_space_capacity;
-  int *cell_space_size;
-  int *cell_space_capacity;
-  int *lo_space_size;
-  int *global_handle_count;
-  int *weak_global_handle_count;
-  int *pending_global_handle_count;
-  int *near_death_global_handle_count;
-  int *destroyed_global_handle_count;
-  int *end_marker;
+  int* start_marker;
+  int* new_space_size;
+  int* new_space_capacity;
+  int* old_pointer_space_size;
+  int* old_pointer_space_capacity;
+  int* old_data_space_size;
+  int* old_data_space_capacity;
+  int* code_space_size;
+  int* code_space_capacity;
+  int* map_space_size;
+  int* map_space_capacity;
+  int* cell_space_size;
+  int* cell_space_capacity;
+  int* lo_space_size;
+  int* global_handle_count;
+  int* weak_global_handle_count;
+  int* pending_global_handle_count;
+  int* near_death_global_handle_count;
+  int* destroyed_global_handle_count;
+  int* end_marker;
 };
 
 
@@ -1525,8 +1614,23 @@
 
 class GCTracer BASE_EMBEDDED {
  public:
-  GCTracer();
+  // Time spent while in the external scope counts towards the
+  // external time in the tracer and will be reported separately.
+  class ExternalScope BASE_EMBEDDED {
+   public:
+    explicit ExternalScope(GCTracer* tracer) : tracer_(tracer) {
+      start_time_ = OS::TimeCurrentMillis();
+    }
+    ~ExternalScope() {
+      tracer_->external_time_ += OS::TimeCurrentMillis() - start_time_;
+    }
 
+   private:
+    GCTracer* tracer_;
+    double start_time_;
+  };
+
+  GCTracer();
   ~GCTracer();
 
   // Sets the collector.
@@ -1540,6 +1644,7 @@
 
   // Sets the flag that this is a compacting full GC.
   void set_is_compacting() { is_compacting_ = true; }
+  bool is_compacting() const { return is_compacting_; }
 
   // Increment and decrement the count of marked objects.
   void increment_marked_count() { ++marked_count_; }
@@ -1560,6 +1665,9 @@
   double start_size_;  // Size of objects in heap set in constructor.
   GarbageCollector collector_;  // Type of collector.
 
+  // Keep track of the amount of time spent in external callbacks.
+  double external_time_;
+
   // A count (including this one, eg, the first collection is 1) of the
   // number of garbage collections.
   int gc_count_;
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index 69f2a8d..6dc584e 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -314,6 +314,12 @@
 }
 
 
+Operand::Operand(XMMRegister xmm_reg) {
+  Register reg = { xmm_reg.code() };
+  set_modrm(3, reg);
+}
+
+
 Operand::Operand(int32_t disp, RelocInfo::Mode rmode) {
   // [disp/r]
   set_modrm(0, ebp);
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 89708aa..26e40b1 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -123,8 +123,8 @@
                                   Code::ComputeFlags(Code::STUB),
                                   Handle<Code>::null());
   if (!code->IsCode()) return;
-  LOG(CodeCreateEvent(Logger::BUILTIN_TAG,
-                      Code::cast(code), "CpuFeatures::Probe"));
+  PROFILE(CodeCreateEvent(Logger::BUILTIN_TAG,
+                          Code::cast(code), "CpuFeatures::Probe"));
   typedef uint64_t (*F0)();
   F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
   supported_ = probe();
@@ -753,6 +753,13 @@
 }
 
 
+void Assembler::cld() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xFC);
+}
+
+
 void Assembler::rep_movs() {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -761,6 +768,14 @@
 }
 
 
+void Assembler::rep_stos() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF3);
+  EMIT(0xAB);
+}
+
+
 void Assembler::xchg(Register dst, Register src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -2035,6 +2050,17 @@
 }
 
 
+void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF3);
+  EMIT(0x0F);
+  EMIT(0x5A);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::addsd(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
@@ -2090,6 +2116,16 @@
 }
 
 
+void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF2);
+  EMIT(0x0F);
+  EMIT(0x51);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::comisd(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
@@ -2101,6 +2137,28 @@
 }
 
 
+void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0x2E);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movmskpd(Register dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0x50);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::movdqa(const Operand& dst, XMMRegister src ) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
@@ -2180,6 +2238,50 @@
   emit_sse_operand(dst, src);
 }
 
+void Assembler::movsd(XMMRegister dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xF2);
+  EMIT(0x0F);
+  EMIT(0x10);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movd(XMMRegister dst, const Operand& src) {
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0x6E);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::pxor(XMMRegister dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0xEF);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::ptest(XMMRegister dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0x38);
+  EMIT(0x17);
+  emit_sse_operand(dst, src);
+}
+
 
 void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
   Register ireg = { reg.code() };
@@ -2192,6 +2294,11 @@
 }
 
 
+void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
+  EMIT(0xC0 | dst.code() << 3 | src.code());
+}
+
+
 void Assembler::Print() {
   Disassembler::Decode(stdout, buffer_, pc_);
 }
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 36aad5e..6a7effd 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -93,7 +93,7 @@
 
 
 struct XMMRegister {
-  bool is_valid() const  { return 0 <= code_ && code_ < 2; }  // currently
+  bool is_valid() const  { return 0 <= code_ && code_ < 8; }
   int code() const  {
     ASSERT(is_valid());
     return code_;
@@ -241,6 +241,9 @@
   // reg
   INLINE(explicit Operand(Register reg));
 
+  // XMM reg
+  INLINE(explicit Operand(XMMRegister xmm_reg));
+
   // [disp/r]
   INLINE(explicit Operand(int32_t disp, RelocInfo::Mode rmode));
   // disp only must always be relocated
@@ -542,8 +545,12 @@
   void cmov(Condition cc, Register dst, Handle<Object> handle);
   void cmov(Condition cc, Register dst, const Operand& src);
 
+  // Flag management.
+  void cld();
+
   // Repetitive string instructions.
   void rep_movs();
+  void rep_stos();
 
   // Exchange two registers
   void xchg(Register dst, Register src);
@@ -705,6 +712,7 @@
   void fistp_s(const Operand& adr);
   void fistp_d(const Operand& adr);
 
+  // The fisttp instructions require SSE3.
   void fisttp_s(const Operand& adr);
   void fisttp_d(const Operand& adr);
 
@@ -754,14 +762,18 @@
   void cvttsd2si(Register dst, const Operand& src);
 
   void cvtsi2sd(XMMRegister dst, const Operand& src);
+  void cvtss2sd(XMMRegister dst, XMMRegister src);
 
   void addsd(XMMRegister dst, XMMRegister src);
   void subsd(XMMRegister dst, XMMRegister src);
   void mulsd(XMMRegister dst, XMMRegister src);
   void divsd(XMMRegister dst, XMMRegister src);
   void xorpd(XMMRegister dst, XMMRegister src);
+  void sqrtsd(XMMRegister dst, XMMRegister src);
 
   void comisd(XMMRegister dst, XMMRegister src);
+  void ucomisd(XMMRegister dst, XMMRegister src);
+  void movmskpd(Register dst, XMMRegister src);
 
   void movdqa(XMMRegister dst, const Operand& src);
   void movdqa(const Operand& dst, XMMRegister src);
@@ -772,6 +784,12 @@
   void movdbl(XMMRegister dst, const Operand& src);
   void movdbl(const Operand& dst, XMMRegister src);
 
+  void movd(XMMRegister dst, const Operand& src);
+  void movsd(XMMRegister dst, XMMRegister src);
+
+  void pxor(XMMRegister dst, XMMRegister src);
+  void ptest(XMMRegister dst, XMMRegister src);
+
   // Debugging
   void Print();
 
@@ -815,7 +833,7 @@
 
   void emit_sse_operand(XMMRegister reg, const Operand& adr);
   void emit_sse_operand(XMMRegister dst, XMMRegister src);
-
+  void emit_sse_operand(Register dst, XMMRegister src);
 
  private:
   byte* addr_at(int pos)  { return buffer_ + pos; }
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 54ef382..80e421b 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -63,10 +63,10 @@
     ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
   }
 
-  // JumpToRuntime expects eax to contain the number of arguments
+  // JumpToExternalReference expects eax to contain the number of arguments
   // including the receiver and the extra arguments.
   __ add(Operand(eax), Immediate(num_extra_args + 1));
-  __ JumpToRuntime(ExternalReference(id));
+  __ JumpToExternalReference(ExternalReference(id));
 }
 
 
@@ -797,38 +797,23 @@
 // register elements_array is scratched.
 static void AllocateJSArray(MacroAssembler* masm,
                             Register array_function,  // Array function.
-                            Register array_size,  // As a smi.
+                            Register array_size,  // As a smi, cannot be 0.
                             Register result,
                             Register elements_array,
                             Register elements_array_end,
                             Register scratch,
                             bool fill_with_hole,
                             Label* gc_required) {
-  Label not_empty, allocated;
+  ASSERT(scratch.is(edi));  // rep stos destination
+  ASSERT(!fill_with_hole || array_size.is(ecx));  // rep stos count
 
   // Load the initial map from the array function.
   __ mov(elements_array,
          FieldOperand(array_function,
                       JSFunction::kPrototypeOrInitialMapOffset));
 
-  // Check whether an empty sized array is requested.
-  __ test(array_size, Operand(array_size));
-  __ j(not_zero, &not_empty);
-
-  // If an empty array is requested allocate a small elements array anyway. This
-  // keeps the code below free of special casing for the empty array.
-  int size = JSArray::kSize + FixedArray::SizeFor(kPreallocatedArrayElements);
-  __ AllocateInNewSpace(size,
-                        result,
-                        elements_array_end,
-                        scratch,
-                        gc_required,
-                        TAG_OBJECT);
-  __ jmp(&allocated);
-
   // Allocate the JSArray object together with space for a FixedArray with the
   // requested elements.
-  __ bind(&not_empty);
   ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
   __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
                         times_half_pointer_size,  // array_size is a smi.
@@ -845,7 +830,6 @@
   // elements_array: initial map
   // elements_array_end: start of next object
   // array_size: size of array (smi)
-  __ bind(&allocated);
   __ mov(FieldOperand(result, JSObject::kMapOffset), elements_array);
   __ mov(elements_array, Factory::empty_fixed_array());
   __ mov(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
@@ -869,15 +853,6 @@
   __ SmiUntag(array_size);  // Convert from smi to value.
   __ mov(FieldOperand(elements_array, JSObject::kMapOffset),
          Factory::fixed_array_map());
-  Label not_empty_2, fill_array;
-  __ test(array_size, Operand(array_size));
-  __ j(not_zero, &not_empty_2);
-  // Length of the FixedArray is the number of pre-allocated elements even
-  // though the actual JSArray has length 0.
-  __ mov(FieldOperand(elements_array, Array::kLengthOffset),
-         Immediate(kPreallocatedArrayElements));
-  __ jmp(&fill_array);
-  __ bind(&not_empty_2);
   // For non-empty JSArrays the length of the FixedArray and the JSArray is the
   // same.
   __ mov(FieldOperand(elements_array, Array::kLengthOffset), array_size);
@@ -885,20 +860,18 @@
   // Fill the allocated FixedArray with the hole value if requested.
   // result: JSObject
   // elements_array: elements array
-  // elements_array_end: start of next object
-  __ bind(&fill_array);
   if (fill_with_hole) {
-    Label loop, entry;
-    __ mov(scratch, Factory::the_hole_value());
-    __ lea(elements_array, Operand(elements_array,
-                                   FixedArray::kHeaderSize - kHeapObjectTag));
-    __ jmp(&entry);
-    __ bind(&loop);
-    __ mov(Operand(elements_array, 0), scratch);
-    __ add(Operand(elements_array), Immediate(kPointerSize));
-    __ bind(&entry);
-    __ cmp(elements_array, Operand(elements_array_end));
-    __ j(below, &loop);
+    __ lea(edi, Operand(elements_array,
+                        FixedArray::kHeaderSize - kHeapObjectTag));
+
+    __ push(eax);
+    __ mov(eax, Factory::the_hole_value());
+
+    __ cld();
+    __ rep_stos();
+
+    // Restore saved registers.
+    __ pop(eax);
   }
 }
 
@@ -920,7 +893,8 @@
 static void ArrayNativeCode(MacroAssembler* masm,
                             bool construct_call,
                             Label* call_generic_code) {
-  Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call;
+  Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call,
+        empty_array, not_empty_array;
 
   // Push the constructor and argc. No need to tag argc as a smi, as there will
   // be no garbage collection with this on the stack.
@@ -936,6 +910,7 @@
   __ test(eax, Operand(eax));
   __ j(not_zero, &argc_one_or_more);
 
+  __ bind(&empty_array);
   // Handle construction of an empty array.
   AllocateEmptyJSArray(masm,
                        edi,
@@ -958,30 +933,46 @@
   __ cmp(eax, 1);
   __ j(not_equal, &argc_two_or_more);
   ASSERT(kSmiTag == 0);
-  __ test(Operand(esp, (push_count + 1) * kPointerSize),
-          Immediate(kIntptrSignBit | kSmiTagMask));
+  __ mov(ecx, Operand(esp, (push_count + 1) * kPointerSize));
+  __ test(ecx, Operand(ecx));
+  __ j(not_zero, &not_empty_array);
+
+  // The single argument passed is zero, so we jump to the code above used to
+  // handle the case of no arguments passed. To adapt the stack for that we move
+  // the return address and the pushed constructor (if pushed) one stack slot up
+  // thereby removing the passed argument. Argc is also on the stack - at the
+  // bottom - and it needs to be changed from 1 to 0 to have the call into the
+  // runtime system work in case a GC is required.
+  for (int i = push_count; i > 0; i--) {
+    __ mov(eax, Operand(esp, i * kPointerSize));
+    __ mov(Operand(esp, (i + 1) * kPointerSize), eax);
+  }
+  __ add(Operand(esp), Immediate(2 * kPointerSize));  // Drop two stack slots.
+  __ push(Immediate(0));  // Treat this as a call with argc of zero.
+  __ jmp(&empty_array);
+
+  __ bind(&not_empty_array);
+  __ test(ecx, Immediate(kIntptrSignBit | kSmiTagMask));
   __ j(not_zero, &prepare_generic_code_call);
 
   // Handle construction of an empty array of a certain size. Get the size from
   // the stack and bail out if size is to large to actually allocate an elements
   // array.
-  __ mov(edx, Operand(esp, (push_count + 1) * kPointerSize));
-  ASSERT(kSmiTag == 0);
-  __ cmp(edx, JSObject::kInitialMaxFastElementArray << kSmiTagSize);
+  __ cmp(ecx, JSObject::kInitialMaxFastElementArray << kSmiTagSize);
   __ j(greater_equal, &prepare_generic_code_call);
 
   // edx: array_size (smi)
   // edi: constructor
-  // esp[0]: argc
+  // esp[0]: argc (cannot be 0 here)
   // esp[4]: constructor (only if construct_call)
   // esp[8]: return address
   // esp[C]: argument
   AllocateJSArray(masm,
                   edi,
-                  edx,
+                  ecx,
                   eax,
                   ebx,
-                  ecx,
+                  edx,
                   edi,
                   true,
                   &prepare_generic_code_call);
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index ecb4c49..83060c1 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -39,6 +39,7 @@
 #include "register-allocator-inl.h"
 #include "runtime.h"
 #include "scopes.h"
+#include "virtual-frame-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -111,14 +112,13 @@
       allocator_(NULL),
       state_(NULL),
       loop_nesting_(0),
+      in_safe_int32_mode_(false),
+      safe_int32_mode_enabled_(true),
       function_return_is_shadowed_(false),
       in_spilled_code_(false) {
 }
 
 
-Scope* CodeGenerator::scope() { return info_->function()->scope(); }
-
-
 // Calling conventions:
 // ebp: caller's frame pointer
 // esp: stack pointer
@@ -128,6 +128,7 @@
 void CodeGenerator::Generate(CompilationInfo* info) {
   // Record the position for debugging purposes.
   CodeForFunctionPosition(info->function());
+  Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
 
   // Initialize state.
   info_ = info;
@@ -139,7 +140,8 @@
   set_in_spilled_code(false);
 
   // Adjust for function-level loop nesting.
-  loop_nesting_ += info->loop_nesting();
+  ASSERT_EQ(0, loop_nesting_);
+  loop_nesting_ = info->loop_nesting();
 
   JumpTarget::set_compiling_deferred_code(false);
 
@@ -152,8 +154,7 @@
 #endif
 
   // New scope to get automatic timing calculation.
-  {  // NOLINT
-    HistogramTimerScope codegen_timer(&Counters::code_generation);
+  { HistogramTimerScope codegen_timer(&Counters::code_generation);
     CodeGenState state(this);
 
     // Entry:
@@ -332,7 +333,8 @@
   }
 
   // Adjust for function-level loop nesting.
-  loop_nesting_ -= info->loop_nesting();
+  ASSERT_EQ(info->loop_nesting(), loop_nesting_);
+  loop_nesting_ = 0;
 
   // Code generation state must be reset.
   ASSERT(state_ == NULL);
@@ -438,14 +440,14 @@
 // frame. If the expression is boolean-valued it may be compiled (or
 // partially compiled) into control flow to the control destination.
 // If force_control is true, control flow is forced.
-void CodeGenerator::LoadCondition(Expression* x,
+void CodeGenerator::LoadCondition(Expression* expr,
                                   ControlDestination* dest,
                                   bool force_control) {
   ASSERT(!in_spilled_code());
   int original_height = frame_->height();
 
   { CodeGenState new_state(this, dest);
-    Visit(x);
+    Visit(expr);
 
     // If we hit a stack overflow, we may not have actually visited
     // the expression.  In that case, we ensure that we have a
@@ -482,64 +484,175 @@
 }
 
 
+void CodeGenerator::LoadInSafeInt32Mode(Expression* expr,
+                                         BreakTarget* unsafe_bailout) {
+  set_unsafe_bailout(unsafe_bailout);
+  set_in_safe_int32_mode(true);
+  Load(expr);
+  Result value = frame_->Pop();
+  ASSERT(frame_->HasNoUntaggedInt32Elements());
+  if (expr->GuaranteedSmiResult()) {
+    ConvertInt32ResultToSmi(&value);
+  } else {
+    ConvertInt32ResultToNumber(&value);
+  }
+  set_in_safe_int32_mode(false);
+  set_unsafe_bailout(NULL);
+  frame_->Push(&value);
+}
+
+
+void CodeGenerator::LoadWithSafeInt32ModeDisabled(Expression* expr) {
+  set_safe_int32_mode_enabled(false);
+  Load(expr);
+  set_safe_int32_mode_enabled(true);
+}
+
+
+void CodeGenerator::ConvertInt32ResultToSmi(Result* value) {
+  ASSERT(value->is_untagged_int32());
+  if (value->is_register()) {
+    __ add(value->reg(), Operand(value->reg()));
+  } else {
+    ASSERT(value->is_constant());
+    ASSERT(value->handle()->IsSmi());
+  }
+  value->set_untagged_int32(false);
+  value->set_type_info(TypeInfo::Smi());
+}
+
+
+void CodeGenerator::ConvertInt32ResultToNumber(Result* value) {
+  ASSERT(value->is_untagged_int32());
+  if (value->is_register()) {
+    Register val = value->reg();
+    JumpTarget done;
+    __ add(val, Operand(val));
+    done.Branch(no_overflow, value);
+    __ sar(val, 1);
+    // If there was an overflow, bits 30 and 31 of the original number disagree.
+    __ xor_(val, 0x80000000u);
+    if (CpuFeatures::IsSupported(SSE2)) {
+      CpuFeatures::Scope fscope(SSE2);
+      __ cvtsi2sd(xmm0, Operand(val));
+    } else {
+      // Move val to ST[0] in the FPU
+      // Push and pop are safe with respect to the virtual frame because
+      // all synced elements are below the actual stack pointer.
+      __ push(val);
+      __ fild_s(Operand(esp, 0));
+      __ pop(val);
+    }
+    Result scratch = allocator_->Allocate();
+    ASSERT(scratch.is_register());
+    Label allocation_failed;
+    __ AllocateHeapNumber(val, scratch.reg(),
+                          no_reg, &allocation_failed);
+    VirtualFrame* clone = new VirtualFrame(frame_);
+    scratch.Unuse();
+    if (CpuFeatures::IsSupported(SSE2)) {
+      CpuFeatures::Scope fscope(SSE2);
+      __ movdbl(FieldOperand(val, HeapNumber::kValueOffset), xmm0);
+    } else {
+      __ fstp_d(FieldOperand(val, HeapNumber::kValueOffset));
+    }
+    done.Jump(value);
+
+    // Establish the virtual frame, cloned from where AllocateHeapNumber
+    // jumped to allocation_failed.
+    RegisterFile empty_regs;
+    SetFrame(clone, &empty_regs);
+    __ bind(&allocation_failed);
+    unsafe_bailout_->Jump();
+
+    done.Bind(value);
+  } else {
+    ASSERT(value->is_constant());
+  }
+  value->set_untagged_int32(false);
+  value->set_type_info(TypeInfo::Integer32());
+}
+
+
 void CodeGenerator::Load(Expression* expr) {
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
   ASSERT(!in_spilled_code());
-  JumpTarget true_target;
-  JumpTarget false_target;
-  ControlDestination dest(&true_target, &false_target, true);
-  LoadCondition(expr, &dest, false);
 
-  if (dest.false_was_fall_through()) {
-    // The false target was just bound.
-    JumpTarget loaded;
-    frame_->Push(Factory::false_value());
-    // There may be dangling jumps to the true target.
-    if (true_target.is_linked()) {
-      loaded.Jump();
-      true_target.Bind();
-      frame_->Push(Factory::true_value());
-      loaded.Bind();
+  // If the expression should be a side-effect-free 32-bit int computation,
+  // compile that SafeInt32 path, and a bailout path.
+  if (!in_safe_int32_mode() &&
+      safe_int32_mode_enabled() &&
+      expr->side_effect_free() &&
+      expr->num_bit_ops() > 2 &&
+      CpuFeatures::IsSupported(SSE2)) {
+    BreakTarget unsafe_bailout;
+    JumpTarget done;
+    unsafe_bailout.set_expected_height(frame_->height());
+    LoadInSafeInt32Mode(expr, &unsafe_bailout);
+    done.Jump();
+
+    if (unsafe_bailout.is_linked()) {
+      unsafe_bailout.Bind();
+      LoadWithSafeInt32ModeDisabled(expr);
     }
-
-  } else if (dest.is_used()) {
-    // There is true, and possibly false, control flow (with true as
-    // the fall through).
-    JumpTarget loaded;
-    frame_->Push(Factory::true_value());
-    if (false_target.is_linked()) {
-      loaded.Jump();
-      false_target.Bind();
-      frame_->Push(Factory::false_value());
-      loaded.Bind();
-    }
-
+    done.Bind();
   } else {
-    // We have a valid value on top of the frame, but we still may
-    // have dangling jumps to the true and false targets from nested
-    // subexpressions (eg, the left subexpressions of the
-    // short-circuited boolean operators).
-    ASSERT(has_valid_frame());
-    if (true_target.is_linked() || false_target.is_linked()) {
+    JumpTarget true_target;
+    JumpTarget false_target;
+
+    ControlDestination dest(&true_target, &false_target, true);
+    LoadCondition(expr, &dest, false);
+
+    if (dest.false_was_fall_through()) {
+      // The false target was just bound.
       JumpTarget loaded;
-      loaded.Jump();  // Don't lose the current TOS.
+      frame_->Push(Factory::false_value());
+      // There may be dangling jumps to the true target.
       if (true_target.is_linked()) {
+        loaded.Jump();
         true_target.Bind();
         frame_->Push(Factory::true_value());
-        if (false_target.is_linked()) {
-          loaded.Jump();
-        }
+        loaded.Bind();
       }
+
+    } else if (dest.is_used()) {
+      // There is true, and possibly false, control flow (with true as
+      // the fall through).
+      JumpTarget loaded;
+      frame_->Push(Factory::true_value());
       if (false_target.is_linked()) {
+        loaded.Jump();
         false_target.Bind();
         frame_->Push(Factory::false_value());
+        loaded.Bind();
       }
-      loaded.Bind();
+
+    } else {
+      // We have a valid value on top of the frame, but we still may
+      // have dangling jumps to the true and false targets from nested
+      // subexpressions (eg, the left subexpressions of the
+      // short-circuited boolean operators).
+      ASSERT(has_valid_frame());
+      if (true_target.is_linked() || false_target.is_linked()) {
+        JumpTarget loaded;
+        loaded.Jump();  // Don't lose the current TOS.
+        if (true_target.is_linked()) {
+          true_target.Bind();
+          frame_->Push(Factory::true_value());
+          if (false_target.is_linked()) {
+            loaded.Jump();
+          }
+        }
+        if (false_target.is_linked()) {
+          false_target.Bind();
+          frame_->Push(Factory::false_value());
+        }
+        loaded.Bind();
+      }
     }
   }
-
   ASSERT(has_valid_frame());
   ASSERT(frame_->height() == original_height + 1);
 }
@@ -734,11 +847,31 @@
   Result value = frame_->Pop();
   value.ToRegister();
 
-  if (value.is_number()) {
-    Comment cmnt(masm_, "ONLY_NUMBER");
-    // Fast case if NumberInfo indicates only numbers.
+  if (value.is_integer32()) {  // Also takes Smi case.
+    Comment cmnt(masm_, "ONLY_INTEGER_32");
     if (FLAG_debug_code) {
-      __ AbortIfNotNumber(value.reg(), "ToBoolean operand is not a number.");
+      Label ok;
+      __ AbortIfNotNumber(value.reg());
+      __ test(value.reg(), Immediate(kSmiTagMask));
+      __ j(zero, &ok);
+      __ fldz();
+      __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset));
+      __ FCmp();
+      __ j(not_zero, &ok);
+      __ Abort("Smi was wrapped in HeapNumber in output from bitop");
+      __ bind(&ok);
+    }
+    // In the integer32 case there are no Smis hidden in heap numbers, so we
+    // need only test for Smi zero.
+    __ test(value.reg(), Operand(value.reg()));
+    dest->false_target()->Branch(zero);
+    value.Unuse();
+    dest->Split(not_zero);
+  } else if (value.is_number()) {
+    Comment cmnt(masm_, "ONLY_NUMBER");
+    // Fast case if TypeInfo indicates only numbers.
+    if (FLAG_debug_code) {
+      __ AbortIfNotNumber(value.reg());
     }
     // Smi => false iff zero.
     ASSERT(kSmiTag == 0);
@@ -797,6 +930,7 @@
   // operand in register number. Returns operand as floating point number
   // on FPU stack.
   static void LoadFloatOperand(MacroAssembler* masm, Register number);
+
   // Code pattern for loading floating point values. Input values must
   // be either smi or heap number objects (fp values). Requirements:
   // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
@@ -815,15 +949,26 @@
   static void CheckFloatOperands(MacroAssembler* masm,
                                  Label* non_float,
                                  Register scratch);
+
   // Takes the operands in edx and eax and loads them as integers in eax
   // and ecx.
   static void LoadAsIntegers(MacroAssembler* masm,
+                             TypeInfo type_info,
                              bool use_sse3,
                              Label* operand_conversion_failure);
+  static void LoadNumbersAsIntegers(MacroAssembler* masm,
+                                    TypeInfo type_info,
+                                    bool use_sse3,
+                                    Label* operand_conversion_failure);
+  static void LoadUnknownsAsIntegers(MacroAssembler* masm,
+                                     bool use_sse3,
+                                     Label* operand_conversion_failure);
+
   // Test if operands are smis or heap numbers and load them
   // into xmm0 and xmm1 if they are. Operands are in edx and eax.
   // Leaves operands unchanged.
   static void LoadSSE2Operands(MacroAssembler* masm);
+
   // Test if operands are numbers (smi or HeapNumber objects), and load
   // them into xmm0 and xmm1 if they are.  Jump to label not_numbers if
   // either operand is not a number.  Operands are in edx and eax.
@@ -851,13 +996,14 @@
   }
 
   OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
-               "GenericBinaryOpStub_%s_%s%s_%s%s_%s",
+               "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
                op_name,
                overwrite_name,
                (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
                args_in_registers_ ? "RegArgs" : "StackArgs",
                args_reversed_ ? "_R" : "",
-               NumberInfo::ToString(operands_type_));
+               static_operands_type_.ToString(),
+               BinaryOpIC::GetName(runtime_operands_type_));
   return name_;
 }
 
@@ -869,8 +1015,11 @@
                                 Register dst,
                                 Register left,
                                 Register right,
+                                TypeInfo left_info,
+                                TypeInfo right_info,
                                 OverwriteMode mode)
-      : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
+      : op_(op), dst_(dst), left_(left), right_(right),
+        left_info_(left_info), right_info_(right_info), mode_(mode) {
     set_comment("[ DeferredInlineBinaryOperation");
   }
 
@@ -881,6 +1030,8 @@
   Register dst_;
   Register left_;
   Register right_;
+  TypeInfo left_info_;
+  TypeInfo right_info_;
   OverwriteMode mode_;
 };
 
@@ -894,18 +1045,24 @@
     CpuFeatures::Scope use_sse2(SSE2);
     Label call_runtime, after_alloc_failure;
     Label left_smi, right_smi, load_right, do_op;
-    __ test(left_, Immediate(kSmiTagMask));
-    __ j(zero, &left_smi);
-    __ cmp(FieldOperand(left_, HeapObject::kMapOffset),
-           Factory::heap_number_map());
-    __ j(not_equal, &call_runtime);
-    __ movdbl(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
-    if (mode_ == OVERWRITE_LEFT) {
-      __ mov(dst_, left_);
-    }
-    __ jmp(&load_right);
+    if (!left_info_.IsSmi()) {
+      __ test(left_, Immediate(kSmiTagMask));
+      __ j(zero, &left_smi);
+      if (!left_info_.IsNumber()) {
+        __ cmp(FieldOperand(left_, HeapObject::kMapOffset),
+               Factory::heap_number_map());
+        __ j(not_equal, &call_runtime);
+      }
+      __ movdbl(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
+      if (mode_ == OVERWRITE_LEFT) {
+        __ mov(dst_, left_);
+      }
+      __ jmp(&load_right);
 
-    __ bind(&left_smi);
+      __ bind(&left_smi);
+    } else {
+      if (FLAG_debug_code) __ AbortIfNotSmi(left_);
+    }
     __ SmiUntag(left_);
     __ cvtsi2sd(xmm0, Operand(left_));
     __ SmiTag(left_);
@@ -917,23 +1074,29 @@
     }
 
     __ bind(&load_right);
-    __ test(right_, Immediate(kSmiTagMask));
-    __ j(zero, &right_smi);
-    __ cmp(FieldOperand(right_, HeapObject::kMapOffset),
-           Factory::heap_number_map());
-    __ j(not_equal, &call_runtime);
-    __ movdbl(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
-    if (mode_ == OVERWRITE_RIGHT) {
-      __ mov(dst_, right_);
-    } else if (mode_ == NO_OVERWRITE) {
-      Label alloc_failure;
-      __ push(left_);
-      __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
-      __ pop(left_);
-    }
-    __ jmp(&do_op);
+    if (!right_info_.IsSmi()) {
+      __ test(right_, Immediate(kSmiTagMask));
+      __ j(zero, &right_smi);
+      if (!right_info_.IsNumber()) {
+        __ cmp(FieldOperand(right_, HeapObject::kMapOffset),
+               Factory::heap_number_map());
+        __ j(not_equal, &call_runtime);
+      }
+      __ movdbl(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
+      if (mode_ == OVERWRITE_RIGHT) {
+        __ mov(dst_, right_);
+      } else if (mode_ == NO_OVERWRITE) {
+        Label alloc_failure;
+        __ push(left_);
+        __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
+        __ pop(left_);
+      }
+      __ jmp(&do_op);
 
-    __ bind(&right_smi);
+      __ bind(&right_smi);
+    } else {
+      if (FLAG_debug_code) __ AbortIfNotSmi(right_);
+    }
     __ SmiUntag(right_);
     __ cvtsi2sd(xmm1, Operand(right_));
     __ SmiTag(right_);
@@ -959,17 +1122,117 @@
     __ pop(left_);
     __ bind(&call_runtime);
   }
-  GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
+  GenericBinaryOpStub stub(op_,
+                           mode_,
+                           NO_SMI_CODE_IN_STUB,
+                           TypeInfo::Combine(left_info_, right_info_));
   stub.GenerateCall(masm_, left_, right_);
   if (!dst_.is(eax)) __ mov(dst_, eax);
   __ bind(&done);
 }
 
 
-void CodeGenerator::GenericBinaryOperation(Token::Value op,
-                                           StaticType* type,
+static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
+                                  Token::Value op,
+                                  const Result& right,
+                                  const Result& left) {
+  // Set TypeInfo of result according to the operation performed.
+  // Rely on the fact that smis have a 31 bit payload on ia32.
+  ASSERT(kSmiValueSize == 31);
+  switch (op) {
+    case Token::COMMA:
+      return right.type_info();
+    case Token::OR:
+    case Token::AND:
+      // Result type can be either of the two input types.
+      return operands_type;
+    case Token::BIT_AND: {
+      // Anding with positive Smis will give you a Smi.
+      if (right.is_constant() && right.handle()->IsSmi() &&
+          Smi::cast(*right.handle())->value() >= 0) {
+        return TypeInfo::Smi();
+      } else if (left.is_constant() && left.handle()->IsSmi() &&
+          Smi::cast(*left.handle())->value() >= 0) {
+        return TypeInfo::Smi();
+      }
+      return (operands_type.IsSmi())
+          ? TypeInfo::Smi()
+          : TypeInfo::Integer32();
+    }
+    case Token::BIT_OR: {
+      // Oring with negative Smis will give you a Smi.
+      if (right.is_constant() && right.handle()->IsSmi() &&
+          Smi::cast(*right.handle())->value() < 0) {
+        return TypeInfo::Smi();
+      } else if (left.is_constant() && left.handle()->IsSmi() &&
+          Smi::cast(*left.handle())->value() < 0) {
+        return TypeInfo::Smi();
+      }
+      return (operands_type.IsSmi())
+          ? TypeInfo::Smi()
+          : TypeInfo::Integer32();
+    }
+    case Token::BIT_XOR:
+      // Result is always a 32 bit integer. Smi property of inputs is preserved.
+      return (operands_type.IsSmi())
+          ? TypeInfo::Smi()
+          : TypeInfo::Integer32();
+    case Token::SAR:
+      if (left.is_smi()) return TypeInfo::Smi();
+      // Result is a smi if we shift by a constant >= 1, otherwise an integer32.
+      // Shift amount is masked with 0x1F (ECMA standard 11.7.2).
+      return (right.is_constant() && right.handle()->IsSmi()
+              && (Smi::cast(*right.handle())->value() & 0x1F)  >= 1)
+          ? TypeInfo::Smi()
+          : TypeInfo::Integer32();
+    case Token::SHR:
+      // Result is a smi if we shift by a constant >= 2, an integer32 if
+      // we shift by 1, and an unsigned 32-bit integer if we shift by 0.
+      if (right.is_constant() && right.handle()->IsSmi()) {
+        int shift_amount = Smi::cast(*right.handle())->value() & 0x1F;
+        if (shift_amount > 1) {
+          return TypeInfo::Smi();
+        } else if (shift_amount > 0) {
+          return TypeInfo::Integer32();
+        }
+      }
+      return TypeInfo::Number();
+    case Token::ADD:
+      if (operands_type.IsSmi()) {
+        // The Integer32 range is big enough to take the sum of any two Smis.
+        return TypeInfo::Integer32();
+      } else if (operands_type.IsNumber()) {
+        return TypeInfo::Number();
+      } else if (left.type_info().IsString() || right.type_info().IsString()) {
+        return TypeInfo::String();
+      } else {
+        return TypeInfo::Unknown();
+      }
+    case Token::SHL:
+      return TypeInfo::Integer32();
+    case Token::SUB:
+      // The Integer32 range is big enough to take the difference of any two
+      // Smis.
+      return (operands_type.IsSmi()) ?
+                    TypeInfo::Integer32() :
+                    TypeInfo::Number();
+    case Token::MUL:
+    case Token::DIV:
+    case Token::MOD:
+      // Result is always a number.
+      return TypeInfo::Number();
+    default:
+      UNREACHABLE();
+  }
+  UNREACHABLE();
+  return TypeInfo::Unknown();
+}
+
+
+void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
                                            OverwriteMode overwrite_mode) {
   Comment cmnt(masm_, "[ BinaryOperation");
+  Token::Value op = expr->op();
   Comment cmnt_token(masm_, Token::String(op));
 
   if (op == Token::COMMA) {
@@ -982,17 +1245,21 @@
   Result left = frame_->Pop();
 
   if (op == Token::ADD) {
-    bool left_is_string = left.is_constant() && left.handle()->IsString();
-    bool right_is_string = right.is_constant() && right.handle()->IsString();
+    const bool left_is_string = left.type_info().IsString();
+    const bool right_is_string = right.type_info().IsString();
+    // Make sure constant strings have string type info.
+    ASSERT(!(left.is_constant() && left.handle()->IsString()) ||
+           left_is_string);
+    ASSERT(!(right.is_constant() && right.handle()->IsString()) ||
+           right_is_string);
     if (left_is_string || right_is_string) {
       frame_->Push(&left);
       frame_->Push(&right);
       Result answer;
       if (left_is_string) {
         if (right_is_string) {
-          // TODO(lrn): if both are constant strings
-          // -- do a compile time cons, if allocation during codegen is allowed.
-          answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
+          StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+          answer = frame_->CallStub(&stub, 2);
         } else {
           answer =
             frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
@@ -1001,6 +1268,7 @@
         answer =
           frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
       }
+      answer.set_type_info(TypeInfo::String());
       frame_->Push(&answer);
       return;
     }
@@ -1021,8 +1289,10 @@
   }
 
   // Get number type of left and right sub-expressions.
-  NumberInfo::Type operands_type =
-      NumberInfo::Combine(left.number_info(), right.number_info());
+  TypeInfo operands_type =
+      TypeInfo::Combine(left.type_info(), right.type_info());
+
+  TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left);
 
   Result answer;
   if (left_is_non_smi_constant || right_is_non_smi_constant) {
@@ -1033,19 +1303,22 @@
                              operands_type);
     answer = stub.GenerateCall(masm_, frame_, &left, &right);
   } else if (right_is_smi_constant) {
-    answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
-                                        type, false, overwrite_mode);
+    answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
+                                        false, overwrite_mode);
   } else if (left_is_smi_constant) {
-    answer = ConstantSmiBinaryOperation(op, &right, left.handle(),
-                                        type, true, overwrite_mode);
+    answer = ConstantSmiBinaryOperation(expr, &right, left.handle(),
+                                        true, overwrite_mode);
   } else {
     // Set the flags based on the operation, type and loop nesting level.
     // Bit operations always assume they likely operate on Smis. Still only
     // generate the inline Smi check code if this operation is part of a loop.
     // For all other operations only inline the Smi check code for likely smis
     // if the operation is part of a loop.
-    if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
-      answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
+    if (loop_nesting() > 0 &&
+        (Token::IsBitOp(op) ||
+         operands_type.IsInteger32() ||
+         expr->type()->IsLikelySmi())) {
+      answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode);
     } else {
       GenericBinaryOpStub stub(op,
                                overwrite_mode,
@@ -1055,59 +1328,7 @@
     }
   }
 
-  // Set NumberInfo of result according to the operation performed.
-  // Rely on the fact that smis have a 31 bit payload on ia32.
-  ASSERT(kSmiValueSize == 31);
-  NumberInfo::Type result_type = NumberInfo::kUnknown;
-  switch (op) {
-    case Token::COMMA:
-      result_type = right.number_info();
-      break;
-    case Token::OR:
-    case Token::AND:
-      // Result type can be either of the two input types.
-      result_type = operands_type;
-      break;
-    case Token::BIT_OR:
-    case Token::BIT_XOR:
-    case Token::BIT_AND:
-      // Result is always a number. Smi property of inputs is preserved.
-      result_type = (operands_type == NumberInfo::kSmi)
-          ? NumberInfo::kSmi
-          : NumberInfo::kNumber;
-      break;
-    case Token::SAR:
-      // Result is a smi if we shift by a constant >= 1, otherwise a number.
-      result_type = (right.is_constant() && right.handle()->IsSmi()
-                     && Smi::cast(*right.handle())->value() >= 1)
-          ? NumberInfo::kSmi
-          : NumberInfo::kNumber;
-      break;
-    case Token::SHR:
-      // Result is a smi if we shift by a constant >= 2, otherwise a number.
-      result_type = (right.is_constant() && right.handle()->IsSmi()
-                     && Smi::cast(*right.handle())->value() >= 2)
-          ? NumberInfo::kSmi
-          : NumberInfo::kNumber;
-      break;
-    case Token::ADD:
-      // Result could be a string or a number. Check types of inputs.
-      result_type = NumberInfo::IsNumber(operands_type)
-          ? NumberInfo::kNumber
-          : NumberInfo::kUnknown;
-      break;
-    case Token::SHL:
-    case Token::SUB:
-    case Token::MUL:
-    case Token::DIV:
-    case Token::MOD:
-      // Result is always a number.
-      result_type = NumberInfo::kNumber;
-      break;
-    default:
-      UNREACHABLE();
-  }
-  answer.set_number_info(result_type);
+  answer.set_type_info(result_type);
   frame_->Push(&answer);
 }
 
@@ -1193,12 +1414,19 @@
 }
 
 
+static void CheckTwoForSminess(MacroAssembler* masm,
+                               Register left, Register right, Register scratch,
+                               TypeInfo left_info, TypeInfo right_info,
+                               DeferredInlineBinaryOperation* deferred);
+
+
 // Implements a binary operation using a deferred code object and some
 // inline code to operate on smis quickly.
-Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
+Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
                                                Result* left,
                                                Result* right,
                                                OverwriteMode overwrite_mode) {
+  Token::Value op = expr->op();
   Result answer;
   // Special handling of div and mod because they use fixed registers.
   if (op == Token::DIV || op == Token::MOD) {
@@ -1273,6 +1501,8 @@
                                           (op == Token::DIV) ? eax : edx,
                                           left->reg(),
                                           right->reg(),
+                                          left->type_info(),
+                                          right->type_info(),
                                           overwrite_mode);
     if (left->reg().is(right->reg())) {
       __ test(left->reg(), Immediate(kSmiTagMask));
@@ -1300,13 +1530,16 @@
       // Check for negative zero result.  If result is zero, and divisor
       // is negative, return a floating point negative zero.  The
       // virtual frame is unchanged in this block, so local control flow
-      // can use a Label rather than a JumpTarget.
-      Label non_zero_result;
-      __ test(left->reg(), Operand(left->reg()));
-      __ j(not_zero, &non_zero_result);
-      __ test(right->reg(), Operand(right->reg()));
-      deferred->Branch(negative);
-      __ bind(&non_zero_result);
+      // can use a Label rather than a JumpTarget.  If the context of this
+      // expression will treat -0 like 0, do not do this test.
+      if (!expr->no_negative_zero()) {
+        Label non_zero_result;
+        __ test(left->reg(), Operand(left->reg()));
+        __ j(not_zero, &non_zero_result);
+        __ test(right->reg(), Operand(right->reg()));
+        deferred->Branch(negative);
+        __ bind(&non_zero_result);
+      }
       // Check for the corner case of dividing the most negative smi by
       // -1. We cannot use the overflow flag, since it is not set by
       // idiv instruction.
@@ -1328,12 +1561,14 @@
       // the dividend is negative, return a floating point negative
       // zero.  The frame is unchanged in this block, so local control
       // flow can use a Label rather than a JumpTarget.
-      Label non_zero_result;
-      __ test(edx, Operand(edx));
-      __ j(not_zero, &non_zero_result, taken);
-      __ test(left->reg(), Operand(left->reg()));
-      deferred->Branch(negative);
-      __ bind(&non_zero_result);
+      if (!expr->no_negative_zero()) {
+        Label non_zero_result;
+        __ test(edx, Operand(edx));
+        __ j(not_zero, &non_zero_result, taken);
+        __ test(left->reg(), Operand(left->reg()));
+        deferred->Branch(negative);
+        __ bind(&non_zero_result);
+      }
       deferred->BindExit();
       left->Unuse();
       right->Unuse();
@@ -1370,15 +1605,49 @@
                                           answer.reg(),
                                           left->reg(),
                                           ecx,
+                                          left->type_info(),
+                                          right->type_info(),
                                           overwrite_mode);
-    __ mov(answer.reg(), left->reg());
-    __ or_(answer.reg(), Operand(ecx));
-    __ test(answer.reg(), Immediate(kSmiTagMask));
-    deferred->Branch(not_zero);
 
-    // Untag both operands.
-    __ mov(answer.reg(), left->reg());
-    __ SmiUntag(answer.reg());
+    Label do_op, left_nonsmi;
+    // If right is a smi we make a fast case if left is either a smi
+    // or a heapnumber.
+    if (CpuFeatures::IsSupported(SSE2) && right->type_info().IsSmi()) {
+      CpuFeatures::Scope use_sse2(SSE2);
+      __ mov(answer.reg(), left->reg());
+      // Fast case - both are actually smis.
+      if (!left->type_info().IsSmi()) {
+        __ test(answer.reg(), Immediate(kSmiTagMask));
+        __ j(not_zero, &left_nonsmi);
+      } else {
+        if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
+      }
+      if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
+      __ SmiUntag(answer.reg());
+      __ jmp(&do_op);
+
+      __ bind(&left_nonsmi);
+      // Branch if not a heapnumber.
+      __ cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset),
+             Factory::heap_number_map());
+      deferred->Branch(not_equal);
+
+      // Load integer value into answer register using truncation.
+      __ cvttsd2si(answer.reg(),
+                   FieldOperand(answer.reg(), HeapNumber::kValueOffset));
+      // Branch if we do not fit in a smi.
+      __ cmp(answer.reg(), 0xc0000000);
+      deferred->Branch(negative);
+    } else {
+      CheckTwoForSminess(masm_, left->reg(), right->reg(), answer.reg(),
+                         left->type_info(), right->type_info(), deferred);
+
+      // Untag both operands.
+      __ mov(answer.reg(), left->reg());
+      __ SmiUntag(answer.reg());
+    }
+
+    __ bind(&do_op);
     __ SmiUntag(ecx);
     // Perform the operation.
     switch (op) {
@@ -1444,16 +1713,12 @@
                                         answer.reg(),
                                         left->reg(),
                                         right->reg(),
+                                        left->type_info(),
+                                        right->type_info(),
                                         overwrite_mode);
-  if (left->reg().is(right->reg())) {
-    __ test(left->reg(), Immediate(kSmiTagMask));
-  } else {
-    __ mov(answer.reg(), left->reg());
-    __ or_(answer.reg(), Operand(right->reg()));
-    ASSERT(kSmiTag == 0);  // Adjust test if not the case.
-    __ test(answer.reg(), Immediate(kSmiTagMask));
-  }
-  deferred->Branch(not_zero);
+  CheckTwoForSminess(masm_, left->reg(), right->reg(), answer.reg(),
+                     left->type_info(), right->type_info(), deferred);
+
   __ mov(answer.reg(), left->reg());
   switch (op) {
     case Token::ADD:
@@ -1480,14 +1745,16 @@
       // argument is negative, go to slow case.  The frame is unchanged
       // in this block, so local control flow can use a Label rather
       // than a JumpTarget.
-      Label non_zero_result;
-      __ test(answer.reg(), Operand(answer.reg()));
-      __ j(not_zero, &non_zero_result, taken);
-      __ mov(answer.reg(), left->reg());
-      __ or_(answer.reg(), Operand(right->reg()));
-      deferred->Branch(negative);
-      __ xor_(answer.reg(), Operand(answer.reg()));  // Positive 0 is correct.
-      __ bind(&non_zero_result);
+      if (!expr->no_negative_zero()) {
+        Label non_zero_result;
+        __ test(answer.reg(), Operand(answer.reg()));
+        __ j(not_zero, &non_zero_result, taken);
+        __ mov(answer.reg(), left->reg());
+        __ or_(answer.reg(), Operand(right->reg()));
+        deferred->Branch(negative);
+        __ xor_(answer.reg(), Operand(answer.reg()));  // Positive 0 is correct.
+        __ bind(&non_zero_result);
+      }
       break;
     }
 
@@ -1522,13 +1789,16 @@
   DeferredInlineSmiOperation(Token::Value op,
                              Register dst,
                              Register src,
+                             TypeInfo type_info,
                              Smi* value,
                              OverwriteMode overwrite_mode)
       : op_(op),
         dst_(dst),
         src_(src),
+        type_info_(type_info),
         value_(value),
         overwrite_mode_(overwrite_mode) {
+    if (type_info.IsSmi()) overwrite_mode_ = NO_OVERWRITE;
     set_comment("[ DeferredInlineSmiOperation");
   }
 
@@ -1538,6 +1808,7 @@
   Token::Value op_;
   Register dst_;
   Register src_;
+  TypeInfo type_info_;
   Smi* value_;
   OverwriteMode overwrite_mode_;
 };
@@ -1548,7 +1819,8 @@
   GenericBinaryOpStub stub(
       op_,
       overwrite_mode_,
-      (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
+      (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB,
+      TypeInfo::Combine(TypeInfo::Smi(), type_info_));
   stub.GenerateCall(masm_, src_, value_);
   if (!dst_.is(eax)) __ mov(dst_, eax);
 }
@@ -1562,9 +1834,11 @@
                                      Register dst,
                                      Smi* value,
                                      Register src,
+                                     TypeInfo type_info,
                                      OverwriteMode overwrite_mode)
       : op_(op),
         dst_(dst),
+        type_info_(type_info),
         value_(value),
         src_(src),
         overwrite_mode_(overwrite_mode) {
@@ -1576,6 +1850,7 @@
  private:
   Token::Value op_;
   Register dst_;
+  TypeInfo type_info_;
   Smi* value_;
   Register src_;
   OverwriteMode overwrite_mode_;
@@ -1583,7 +1858,11 @@
 
 
 void DeferredInlineSmiOperationReversed::Generate() {
-  GenericBinaryOpStub igostub(op_, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+  GenericBinaryOpStub igostub(
+      op_,
+      overwrite_mode_,
+      NO_SMI_CODE_IN_STUB,
+      TypeInfo::Combine(TypeInfo::Smi(), type_info_));
   igostub.GenerateCall(masm_, value_, src_);
   if (!dst_.is(eax)) __ mov(dst_, eax);
 }
@@ -1595,9 +1874,14 @@
 class DeferredInlineSmiAdd: public DeferredCode {
  public:
   DeferredInlineSmiAdd(Register dst,
+                       TypeInfo type_info,
                        Smi* value,
                        OverwriteMode overwrite_mode)
-      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+      : dst_(dst),
+        type_info_(type_info),
+        value_(value),
+        overwrite_mode_(overwrite_mode) {
+    if (type_info_.IsSmi()) overwrite_mode_ = NO_OVERWRITE;
     set_comment("[ DeferredInlineSmiAdd");
   }
 
@@ -1605,6 +1889,7 @@
 
  private:
   Register dst_;
+  TypeInfo type_info_;
   Smi* value_;
   OverwriteMode overwrite_mode_;
 };
@@ -1613,7 +1898,11 @@
 void DeferredInlineSmiAdd::Generate() {
   // Undo the optimistic add operation and call the shared stub.
   __ sub(Operand(dst_), Immediate(value_));
-  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+  GenericBinaryOpStub igostub(
+      Token::ADD,
+      overwrite_mode_,
+      NO_SMI_CODE_IN_STUB,
+      TypeInfo::Combine(TypeInfo::Smi(), type_info_));
   igostub.GenerateCall(masm_, dst_, value_);
   if (!dst_.is(eax)) __ mov(dst_, eax);
 }
@@ -1625,9 +1914,13 @@
 class DeferredInlineSmiAddReversed: public DeferredCode {
  public:
   DeferredInlineSmiAddReversed(Register dst,
+                               TypeInfo type_info,
                                Smi* value,
                                OverwriteMode overwrite_mode)
-      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+      : dst_(dst),
+        type_info_(type_info),
+        value_(value),
+        overwrite_mode_(overwrite_mode) {
     set_comment("[ DeferredInlineSmiAddReversed");
   }
 
@@ -1635,6 +1928,7 @@
 
  private:
   Register dst_;
+  TypeInfo type_info_;
   Smi* value_;
   OverwriteMode overwrite_mode_;
 };
@@ -1643,7 +1937,11 @@
 void DeferredInlineSmiAddReversed::Generate() {
   // Undo the optimistic add operation and call the shared stub.
   __ sub(Operand(dst_), Immediate(value_));
-  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+  GenericBinaryOpStub igostub(
+      Token::ADD,
+      overwrite_mode_,
+      NO_SMI_CODE_IN_STUB,
+      TypeInfo::Combine(TypeInfo::Smi(), type_info_));
   igostub.GenerateCall(masm_, value_, dst_);
   if (!dst_.is(eax)) __ mov(dst_, eax);
 }
@@ -1656,9 +1954,14 @@
 class DeferredInlineSmiSub: public DeferredCode {
  public:
   DeferredInlineSmiSub(Register dst,
+                       TypeInfo type_info,
                        Smi* value,
                        OverwriteMode overwrite_mode)
-      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+      : dst_(dst),
+        type_info_(type_info),
+        value_(value),
+        overwrite_mode_(overwrite_mode) {
+    if (type_info.IsSmi()) overwrite_mode_ = NO_OVERWRITE;
     set_comment("[ DeferredInlineSmiSub");
   }
 
@@ -1666,6 +1969,7 @@
 
  private:
   Register dst_;
+  TypeInfo type_info_;
   Smi* value_;
   OverwriteMode overwrite_mode_;
 };
@@ -1674,18 +1978,22 @@
 void DeferredInlineSmiSub::Generate() {
   // Undo the optimistic sub operation and call the shared stub.
   __ add(Operand(dst_), Immediate(value_));
-  GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+  GenericBinaryOpStub igostub(
+      Token::SUB,
+      overwrite_mode_,
+      NO_SMI_CODE_IN_STUB,
+      TypeInfo::Combine(TypeInfo::Smi(), type_info_));
   igostub.GenerateCall(masm_, dst_, value_);
   if (!dst_.is(eax)) __ mov(dst_, eax);
 }
 
 
-Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
-                                                 Result* operand,
-                                                 Handle<Object> value,
-                                                 StaticType* type,
-                                                 bool reversed,
-                                                 OverwriteMode overwrite_mode) {
+Result CodeGenerator::ConstantSmiBinaryOperation(
+    BinaryOperation* expr,
+    Result* operand,
+    Handle<Object> value,
+    bool reversed,
+    OverwriteMode overwrite_mode) {
   // NOTE: This is an attempt to inline (a bit) more of the code for
   // some possible smi operations (like + and -) when (at least) one
   // of the operands is a constant smi.
@@ -1695,10 +2003,10 @@
   if (IsUnsafeSmi(value)) {
     Result unsafe_operand(value);
     if (reversed) {
-      return LikelySmiBinaryOperation(op, &unsafe_operand, operand,
+      return LikelySmiBinaryOperation(expr, &unsafe_operand, operand,
                                       overwrite_mode);
     } else {
-      return LikelySmiBinaryOperation(op, operand, &unsafe_operand,
+      return LikelySmiBinaryOperation(expr, operand, &unsafe_operand,
                                       overwrite_mode);
     }
   }
@@ -1707,6 +2015,7 @@
   Smi* smi_value = Smi::cast(*value);
   int int_value = smi_value->value();
 
+  Token::Value op = expr->op();
   Result answer;
   switch (op) {
     case Token::ADD: {
@@ -1718,17 +2027,23 @@
       DeferredCode* deferred = NULL;
       if (reversed) {
         deferred = new DeferredInlineSmiAddReversed(operand->reg(),
+                                                    operand->type_info(),
                                                     smi_value,
                                                     overwrite_mode);
       } else {
         deferred = new DeferredInlineSmiAdd(operand->reg(),
+                                            operand->type_info(),
                                             smi_value,
                                             overwrite_mode);
       }
       __ add(Operand(operand->reg()), Immediate(value));
       deferred->Branch(overflow);
-      __ test(operand->reg(), Immediate(kSmiTagMask));
-      deferred->Branch(not_zero);
+      if (!operand->type_info().IsSmi()) {
+        __ test(operand->reg(), Immediate(kSmiTagMask));
+        deferred->Branch(not_zero);
+      } else if (FLAG_debug_code) {
+        __ AbortIfNotSmi(operand->reg());
+      }
       deferred->BindExit();
       answer = *operand;
       break;
@@ -1743,24 +2058,31 @@
         answer = allocator()->Allocate();
         ASSERT(answer.is_valid());
         __ Set(answer.reg(), Immediate(value));
-        deferred = new DeferredInlineSmiOperationReversed(op,
-                                                          answer.reg(),
-                                                          smi_value,
-                                                          operand->reg(),
-                                                          overwrite_mode);
+        deferred =
+            new DeferredInlineSmiOperationReversed(op,
+                                                   answer.reg(),
+                                                   smi_value,
+                                                   operand->reg(),
+                                                   operand->type_info(),
+                                                   overwrite_mode);
         __ sub(answer.reg(), Operand(operand->reg()));
       } else {
         operand->ToRegister();
         frame_->Spill(operand->reg());
         answer = *operand;
         deferred = new DeferredInlineSmiSub(operand->reg(),
+                                            operand->type_info(),
                                             smi_value,
                                             overwrite_mode);
         __ sub(Operand(operand->reg()), Immediate(value));
       }
       deferred->Branch(overflow);
-      __ test(answer.reg(), Immediate(kSmiTagMask));
-      deferred->Branch(not_zero);
+      if (!operand->type_info().IsSmi()) {
+        __ test(answer.reg(), Immediate(kSmiTagMask));
+        deferred->Branch(not_zero);
+      } else if (FLAG_debug_code) {
+        __ AbortIfNotSmi(operand->reg());
+      }
       deferred->BindExit();
       operand->Unuse();
       break;
@@ -1769,7 +2091,7 @@
     case Token::SAR:
       if (reversed) {
         Result constant_operand(value);
-        answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
                                           overwrite_mode);
       } else {
         // Only the least significant 5 bits of the shift value are used.
@@ -1777,19 +2099,30 @@
         int shift_value = int_value & 0x1f;
         operand->ToRegister();
         frame_->Spill(operand->reg());
-        DeferredInlineSmiOperation* deferred =
-            new DeferredInlineSmiOperation(op,
-                                           operand->reg(),
-                                           operand->reg(),
-                                           smi_value,
-                                           overwrite_mode);
-        __ test(operand->reg(), Immediate(kSmiTagMask));
-        deferred->Branch(not_zero);
-        if (shift_value > 0) {
-          __ sar(operand->reg(), shift_value);
-          __ and_(operand->reg(), ~kSmiTagMask);
+        if (!operand->type_info().IsSmi()) {
+          DeferredInlineSmiOperation* deferred =
+              new DeferredInlineSmiOperation(op,
+                                             operand->reg(),
+                                             operand->reg(),
+                                             operand->type_info(),
+                                             smi_value,
+                                             overwrite_mode);
+          __ test(operand->reg(), Immediate(kSmiTagMask));
+          deferred->Branch(not_zero);
+          if (shift_value > 0) {
+            __ sar(operand->reg(), shift_value);
+            __ and_(operand->reg(), ~kSmiTagMask);
+          }
+          deferred->BindExit();
+        } else {
+          if (FLAG_debug_code) {
+            __ AbortIfNotSmi(operand->reg());
+          }
+          if (shift_value > 0) {
+            __ sar(operand->reg(), shift_value);
+            __ and_(operand->reg(), ~kSmiTagMask);
+          }
         }
-        deferred->BindExit();
         answer = *operand;
       }
       break;
@@ -1797,7 +2130,7 @@
     case Token::SHR:
       if (reversed) {
         Result constant_operand(value);
-        answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
                                           overwrite_mode);
       } else {
         // Only the least significant 5 bits of the shift value are used.
@@ -1810,10 +2143,15 @@
             new DeferredInlineSmiOperation(op,
                                            answer.reg(),
                                            operand->reg(),
+                                           operand->type_info(),
                                            smi_value,
                                            overwrite_mode);
-        __ test(operand->reg(), Immediate(kSmiTagMask));
-        deferred->Branch(not_zero);
+        if (!operand->type_info().IsSmi()) {
+          __ test(operand->reg(), Immediate(kSmiTagMask));
+          deferred->Branch(not_zero);
+        } else if (FLAG_debug_code) {
+          __ AbortIfNotSmi(operand->reg());
+        }
         __ mov(answer.reg(), operand->reg());
         __ SmiUntag(answer.reg());
         __ shr(answer.reg(), shift_value);
@@ -1830,12 +2168,12 @@
 
     case Token::SHL:
       if (reversed) {
+        // Move operand into ecx and also into a second register.
+        // If operand is already in a register, take advantage of that.
+        // This lets us modify ecx, but still bail out to deferred code.
         Result right;
         Result right_copy_in_ecx;
-
-        // Make sure to get a copy of the right operand into ecx. This
-        // allows us to modify it without having to restore it in the
-        // deferred code.
+        TypeInfo right_type_info = operand->type_info();
         operand->ToRegister();
         if (operand->reg().is(ecx)) {
           right = allocator()->Allocate();
@@ -1855,10 +2193,15 @@
                                                    answer.reg(),
                                                    smi_value,
                                                    right.reg(),
+                                                   right_type_info,
                                                    overwrite_mode);
         __ mov(answer.reg(), Immediate(int_value));
         __ sar(ecx, kSmiTagSize);
-        deferred->Branch(carry);
+        if (!right_type_info.IsSmi()) {
+          deferred->Branch(carry);
+        } else if (FLAG_debug_code) {
+          __ AbortIfNotSmi(right.reg());
+        }
         __ shl_cl(answer.reg());
         __ cmp(answer.reg(), 0xc0000000);
         deferred->Branch(sign);
@@ -1877,6 +2220,7 @@
               new DeferredInlineSmiOperation(op,
                                              operand->reg(),
                                              operand->reg(),
+                                             operand->type_info(),
                                              smi_value,
                                              overwrite_mode);
           __ test(operand->reg(), Immediate(kSmiTagMask));
@@ -1891,10 +2235,15 @@
               new DeferredInlineSmiOperation(op,
                                              answer.reg(),
                                              operand->reg(),
+                                             operand->type_info(),
                                              smi_value,
                                              overwrite_mode);
-          __ test(operand->reg(), Immediate(kSmiTagMask));
-          deferred->Branch(not_zero);
+          if (!operand->type_info().IsSmi()) {
+            __ test(operand->reg(), Immediate(kSmiTagMask));
+            deferred->Branch(not_zero);
+          } else if (FLAG_debug_code) {
+            __ AbortIfNotSmi(operand->reg());
+          }
           __ mov(answer.reg(), operand->reg());
           ASSERT(kSmiTag == 0);  // adjust code if not the case
           // We do no shifts, only the Smi conversion, if shift_value is 1.
@@ -1918,20 +2267,27 @@
       frame_->Spill(operand->reg());
       DeferredCode* deferred = NULL;
       if (reversed) {
-        deferred = new DeferredInlineSmiOperationReversed(op,
-                                                          operand->reg(),
-                                                          smi_value,
-                                                          operand->reg(),
-                                                          overwrite_mode);
+        deferred =
+            new DeferredInlineSmiOperationReversed(op,
+                                                   operand->reg(),
+                                                   smi_value,
+                                                   operand->reg(),
+                                                   operand->type_info(),
+                                                   overwrite_mode);
       } else {
         deferred =  new DeferredInlineSmiOperation(op,
                                                    operand->reg(),
                                                    operand->reg(),
+                                                   operand->type_info(),
                                                    smi_value,
                                                    overwrite_mode);
       }
-      __ test(operand->reg(), Immediate(kSmiTagMask));
-      deferred->Branch(not_zero);
+      if (!operand->type_info().IsSmi()) {
+        __ test(operand->reg(), Immediate(kSmiTagMask));
+        deferred->Branch(not_zero);
+      } else if (FLAG_debug_code) {
+        __ AbortIfNotSmi(operand->reg());
+      }
       if (op == Token::BIT_AND) {
         __ and_(Operand(operand->reg()), Immediate(value));
       } else if (op == Token::BIT_XOR) {
@@ -1958,6 +2314,7 @@
             new DeferredInlineSmiOperation(op,
                                            operand->reg(),
                                            operand->reg(),
+                                           operand->type_info(),
                                            smi_value,
                                            overwrite_mode);
         // Check that lowest log2(value) bits of operand are zero, and test
@@ -1974,14 +2331,15 @@
         // default case here.
         Result constant_operand(value);
         if (reversed) {
-          answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+          answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
                                             overwrite_mode);
         } else {
-          answer = LikelySmiBinaryOperation(op, operand, &constant_operand,
+          answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
                                             overwrite_mode);
         }
       }
       break;
+
     // Generate inline code for mod of powers of 2 and negative powers of 2.
     case Token::MOD:
       if (!reversed &&
@@ -1989,13 +2347,15 @@
           (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
         operand->ToRegister();
         frame_->Spill(operand->reg());
-        DeferredCode* deferred = new DeferredInlineSmiOperation(op,
-                                                                operand->reg(),
-                                                                operand->reg(),
-                                                                smi_value,
-                                                                overwrite_mode);
+        DeferredCode* deferred =
+            new DeferredInlineSmiOperation(op,
+                                           operand->reg(),
+                                           operand->reg(),
+                                           operand->type_info(),
+                                           smi_value,
+                                           overwrite_mode);
         // Check for negative or non-Smi left hand side.
-        __ test(operand->reg(), Immediate(kSmiTagMask | 0x80000000));
+        __ test(operand->reg(), Immediate(kSmiTagMask | kSmiSignMask));
         deferred->Branch(not_zero);
         if (int_value < 0) int_value = -int_value;
         if (int_value == 1) {
@@ -2012,10 +2372,10 @@
     default: {
       Result constant_operand(value);
       if (reversed) {
-        answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
                                           overwrite_mode);
       } else {
-        answer = LikelySmiBinaryOperation(op, operand, &constant_operand,
+        answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
                                           overwrite_mode);
       }
       break;
@@ -2027,12 +2387,30 @@
 
 
 static bool CouldBeNaN(const Result& result) {
+  if (result.type_info().IsSmi()) return false;
+  if (result.type_info().IsInteger32()) return false;
   if (!result.is_constant()) return true;
   if (!result.handle()->IsHeapNumber()) return false;
   return isnan(HeapNumber::cast(*result.handle())->value());
 }
 
 
+// Convert from signed to unsigned comparison to match the way EFLAGS are set
+// by FPU and XMM compare instructions.
+static Condition DoubleCondition(Condition cc) {
+  switch (cc) {
+    case less:          return below;
+    case equal:         return equal;
+    case less_equal:    return below_equal;
+    case greater:       return above;
+    case greater_equal: return above_equal;
+    default:            UNREACHABLE();
+  }
+  UNREACHABLE();
+  return equal;
+}
+
+
 void CodeGenerator::Comparison(AstNode* node,
                                Condition cc,
                                bool strict,
@@ -2063,7 +2441,8 @@
     left_side_constant_null = left_side.handle()->IsNull();
     left_side_constant_1_char_string =
         (left_side.handle()->IsString() &&
-         (String::cast(*left_side.handle())->length() == 1));
+         String::cast(*left_side.handle())->length() == 1 &&
+         String::cast(*left_side.handle())->IsAsciiRepresentation());
   }
   bool right_side_constant_smi = false;
   bool right_side_constant_null = false;
@@ -2073,7 +2452,8 @@
     right_side_constant_null = right_side.handle()->IsNull();
     right_side_constant_1_char_string =
         (right_side.handle()->IsString() &&
-         (String::cast(*right_side.handle())->length() == 1));
+         String::cast(*right_side.handle())->length() == 1 &&
+         String::cast(*right_side.handle())->IsAsciiRepresentation());
   }
 
   if (left_side_constant_smi || right_side_constant_smi) {
@@ -2103,7 +2483,7 @@
         left_side = right_side;
         right_side = temp;
         cc = ReverseCondition(cc);
-        // This may reintroduce greater or less_equal as the value of cc.
+        // This may re-introduce greater or less_equal as the value of cc.
         // CompareStub and the inline code both support all values of cc.
       }
       // Implement comparison against a constant Smi, inlining the case
@@ -2117,61 +2497,58 @@
       // a jump target and branching to duplicate the virtual frame at
       // the first split.  We manually handle the off-frame references
       // by reconstituting them on the non-fall-through path.
-      JumpTarget is_smi;
-      __ test(left_side.reg(), Immediate(kSmiTagMask));
-      is_smi.Branch(zero, taken);
 
-      bool is_for_loop_compare = (node->AsCompareOperation() != NULL)
-          && node->AsCompareOperation()->is_for_loop_condition();
-      if (!is_for_loop_compare
-          && CpuFeatures::IsSupported(SSE2)
-          && right_val->IsSmi()) {
-        // Right side is a constant smi and left side has been checked
-        // not to be a smi.
-        CpuFeatures::Scope use_sse2(SSE2);
-        JumpTarget not_number;
-        __ cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
-               Immediate(Factory::heap_number_map()));
-        not_number.Branch(not_equal, &left_side);
-        __ movdbl(xmm1,
-                  FieldOperand(left_reg, HeapNumber::kValueOffset));
-        int value = Smi::cast(*right_val)->value();
-        if (value == 0) {
-          __ xorpd(xmm0, xmm0);
-        } else {
-          Result temp = allocator()->Allocate();
-          __ mov(temp.reg(), Immediate(value));
-          __ cvtsi2sd(xmm0, Operand(temp.reg()));
-          temp.Unuse();
+      if (left_side.is_smi()) {
+        if (FLAG_debug_code) __ AbortIfNotSmi(left_side.reg());
+      } else {
+        JumpTarget is_smi;
+        __ test(left_side.reg(), Immediate(kSmiTagMask));
+        is_smi.Branch(zero, taken);
+
+        bool is_loop_condition = (node->AsExpression() != NULL) &&
+            node->AsExpression()->is_loop_condition();
+        if (!is_loop_condition &&
+            CpuFeatures::IsSupported(SSE2) &&
+            right_val->IsSmi()) {
+          // Right side is a constant smi and left side has been checked
+          // not to be a smi.
+          CpuFeatures::Scope use_sse2(SSE2);
+          JumpTarget not_number;
+          __ cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
+                 Immediate(Factory::heap_number_map()));
+          not_number.Branch(not_equal, &left_side);
+          __ movdbl(xmm1,
+                    FieldOperand(left_reg, HeapNumber::kValueOffset));
+          int value = Smi::cast(*right_val)->value();
+          if (value == 0) {
+            __ xorpd(xmm0, xmm0);
+          } else {
+            Result temp = allocator()->Allocate();
+            __ mov(temp.reg(), Immediate(value));
+            __ cvtsi2sd(xmm0, Operand(temp.reg()));
+            temp.Unuse();
+          }
+          __ comisd(xmm1, xmm0);
+          // Jump to builtin for NaN.
+          not_number.Branch(parity_even, &left_side);
+          left_side.Unuse();
+          dest->true_target()->Branch(DoubleCondition(cc));
+          dest->false_target()->Jump();
+          not_number.Bind(&left_side);
         }
-        __ comisd(xmm1, xmm0);
-        // Jump to builtin for NaN.
-        not_number.Branch(parity_even, &left_side);
-        left_side.Unuse();
-        Condition double_cc = cc;
-        switch (cc) {
-          case less:          double_cc = below;       break;
-          case equal:         double_cc = equal;       break;
-          case less_equal:    double_cc = below_equal; break;
-          case greater:       double_cc = above;       break;
-          case greater_equal: double_cc = above_equal; break;
-          default: UNREACHABLE();
-        }
-        dest->true_target()->Branch(double_cc);
+
+        // Setup and call the compare stub.
+        CompareStub stub(cc, strict, kCantBothBeNaN);
+        Result result = frame_->CallStub(&stub, &left_side, &right_side);
+        result.ToRegister();
+        __ cmp(result.reg(), 0);
+        result.Unuse();
+        dest->true_target()->Branch(cc);
         dest->false_target()->Jump();
-        not_number.Bind(&left_side);
+
+        is_smi.Bind();
       }
 
-      // Setup and call the compare stub.
-      CompareStub stub(cc, strict, kCantBothBeNaN);
-      Result result = frame_->CallStub(&stub, &left_side, &right_side);
-      result.ToRegister();
-      __ cmp(result.reg(), 0);
-      result.Unuse();
-      dest->true_target()->Branch(cc);
-      dest->false_target()->Jump();
-
-      is_smi.Bind();
       left_side = Result(left_reg);
       right_side = Result(right_val);
       // Test smi equality and comparison by signed int comparison.
@@ -2265,6 +2642,7 @@
       JumpTarget is_not_string, is_string;
       Register left_reg = left_side.reg();
       Handle<Object> right_val = right_side.handle();
+      ASSERT(StringShape(String::cast(*right_val)).IsSymbol());
       __ test(left_side.reg(), Immediate(kSmiTagMask));
       is_not_string.Branch(zero, &left_side);
       Result temp = allocator_->Allocate();
@@ -2289,7 +2667,7 @@
         dest->false_target()->Branch(not_equal);
         __ bind(&not_a_symbol);
       }
-      // If the receiver is not a string of the type we handle call the stub.
+      // Call the compare stub if the left side is not a flat ascii string.
       __ and_(temp.reg(),
           kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
       __ cmp(temp.reg(), kStringTag | kSeqStringTag | kAsciiStringTag);
@@ -2307,7 +2685,7 @@
       dest->false_target()->Jump();
 
       is_string.Bind(&left_side);
-      // Here we know we have a sequential ASCII string.
+      // left_side is a sequential ASCII string.
       left_side = Result(left_reg);
       right_side = Result(right_val);
       Result temp2 = allocator_->Allocate();
@@ -2316,36 +2694,37 @@
       if (cc == equal) {
         Label comparison_done;
         __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
-               Immediate(1));
+               Immediate(Smi::FromInt(1)));
         __ j(not_equal, &comparison_done);
         uint8_t char_value =
-            static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0));
+            static_cast<uint8_t>(String::cast(*right_val)->Get(0));
         __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
                 char_value);
         __ bind(&comparison_done);
       } else {
         __ mov(temp2.reg(),
                FieldOperand(left_side.reg(), String::kLengthOffset));
+        __ SmiUntag(temp2.reg());
         __ sub(Operand(temp2.reg()), Immediate(1));
         Label comparison;
-        // If the length is 0 then our subtraction gave -1 which compares less
+        // If the length is 0 then the subtraction gave -1 which compares less
         // than any character.
         __ j(negative, &comparison);
         // Otherwise load the first character.
         __ movzx_b(temp2.reg(),
                    FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize));
         __ bind(&comparison);
-        // Compare the first character of the string with out constant
-        // 1-character string.
+        // Compare the first character of the string with the
+        // constant 1-character string.
         uint8_t char_value =
-            static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0));
+            static_cast<uint8_t>(String::cast(*right_val)->Get(0));
         __ cmp(Operand(temp2.reg()), Immediate(char_value));
         Label characters_were_different;
         __ j(not_equal, &characters_were_different);
         // If the first character is the same then the long string sorts after
         // the short one.
         __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
-               Immediate(1));
+               Immediate(Smi::FromInt(1)));
         __ bind(&characters_were_different);
       }
       temp2.Unuse();
@@ -2354,27 +2733,55 @@
       dest->Split(cc);
     }
   } else {
-    // Neither side is a constant Smi or null.
-    // If either side is a non-smi constant, skip the smi check.
+    // Neither side is a constant Smi, constant 1-char string or constant null.
+    // If either side is a non-smi constant, or known to be a heap number skip
+    // the smi check.
     bool known_non_smi =
         (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
-        (right_side.is_constant() && !right_side.handle()->IsSmi());
+        (right_side.is_constant() && !right_side.handle()->IsSmi()) ||
+        left_side.type_info().IsDouble() ||
+        right_side.type_info().IsDouble();
     NaNInformation nan_info =
         (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
         kBothCouldBeNaN :
         kCantBothBeNaN;
+
+    // Inline number comparison handling any combination of smi's and heap
+    // numbers if:
+    //   code is in a loop
+    //   the compare operation is different from equal
+    //   compare is not a for-loop comparison
+    // The reason for excluding equal is that it will most likely be done
+    // with smi's (not heap numbers) and the code to comparing smi's is inlined
+    // separately. The same reason applies for for-loop comparison which will
+    // also most likely be smi comparisons.
+    bool is_loop_condition = (node->AsExpression() != NULL)
+        && node->AsExpression()->is_loop_condition();
+    bool inline_number_compare =
+        loop_nesting() > 0 && cc != equal && !is_loop_condition;
+
+    // Left and right needed in registers for the following code.
     left_side.ToRegister();
     right_side.ToRegister();
 
     if (known_non_smi) {
-      // When non-smi, call out to the compare stub.
-      CompareStub stub(cc, strict, nan_info);
-      Result answer = frame_->CallStub(&stub, &left_side, &right_side);
-      if (cc == equal) {
-        __ test(answer.reg(), Operand(answer.reg()));
-      } else {
-        __ cmp(answer.reg(), 0);
+      // Inline the equality check if both operands can't be a NaN. If both
+      // objects are the same they are equal.
+      if (nan_info == kCantBothBeNaN && cc == equal) {
+        __ cmp(left_side.reg(), Operand(right_side.reg()));
+        dest->true_target()->Branch(equal);
       }
+
+      // Inline number comparison.
+      if (inline_number_compare) {
+        GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
+      }
+
+      // End of in-line compare, call out to the compare stub. Don't include
+      // number comparison in the stub if it was inlined.
+      CompareStub stub(cc, strict, nan_info, !inline_number_compare);
+      Result answer = frame_->CallStub(&stub, &left_side, &right_side);
+      __ test(answer.reg(), Operand(answer.reg()));
       answer.Unuse();
       dest->Split(cc);
     } else {
@@ -2387,6 +2794,7 @@
       Register left_reg = left_side.reg();
       Register right_reg = right_side.reg();
 
+      // In-line check for comparing two smis.
       Result temp = allocator_->Allocate();
       ASSERT(temp.is_valid());
       __ mov(temp.reg(), left_side.reg());
@@ -2394,8 +2802,22 @@
       __ test(temp.reg(), Immediate(kSmiTagMask));
       temp.Unuse();
       is_smi.Branch(zero, taken);
-      // When non-smi, call out to the compare stub.
-      CompareStub stub(cc, strict, nan_info);
+
+      // Inline the equality check if both operands can't be a NaN. If both
+      // objects are the same they are equal.
+      if (nan_info == kCantBothBeNaN && cc == equal) {
+        __ cmp(left_side.reg(), Operand(right_side.reg()));
+        dest->true_target()->Branch(equal);
+      }
+
+      // Inline number comparison.
+      if (inline_number_compare) {
+        GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
+      }
+
+      // End of in-line compare, call out to the compare stub. Don't include
+      // number comparison in the stub if it was inlined.
+      CompareStub stub(cc, strict, nan_info, !inline_number_compare);
       Result answer = frame_->CallStub(&stub, &left_side, &right_side);
       if (cc == equal) {
         __ test(answer.reg(), Operand(answer.reg()));
@@ -2418,6 +2840,148 @@
 }
 
 
+// Check that the comparison operand is a number. Jump to not_numbers jump
+// target passing the left and right result if the operand is not a number.
+static void CheckComparisonOperand(MacroAssembler* masm_,
+                                   Result* operand,
+                                   Result* left_side,
+                                   Result* right_side,
+                                   JumpTarget* not_numbers) {
+  // Perform check if operand is not known to be a number.
+  if (!operand->type_info().IsNumber()) {
+    Label done;
+    __ test(operand->reg(), Immediate(kSmiTagMask));
+    __ j(zero, &done);
+    __ cmp(FieldOperand(operand->reg(), HeapObject::kMapOffset),
+           Immediate(Factory::heap_number_map()));
+    not_numbers->Branch(not_equal, left_side, right_side, not_taken);
+    __ bind(&done);
+  }
+}
+
+
+// Load a comparison operand to the FPU stack. This assumes that the operand has
+// already been checked and is a number.
+static void LoadComparisonOperand(MacroAssembler* masm_,
+                                  Result* operand) {
+  Label done;
+  if (operand->type_info().IsDouble()) {
+    // Operand is known to be a heap number, just load it.
+    __ fld_d(FieldOperand(operand->reg(), HeapNumber::kValueOffset));
+  } else if (operand->type_info().IsSmi()) {
+    // Operand is known to be a smi. Convert it to double and keep the original
+    // smi.
+    __ SmiUntag(operand->reg());
+    __ push(operand->reg());
+    __ fild_s(Operand(esp, 0));
+    __ pop(operand->reg());
+    __ SmiTag(operand->reg());
+  } else {
+    // Operand type not known, check for smi otherwise assume heap number.
+    Label smi;
+    __ test(operand->reg(), Immediate(kSmiTagMask));
+    __ j(zero, &smi);
+    __ fld_d(FieldOperand(operand->reg(), HeapNumber::kValueOffset));
+    __ jmp(&done);
+    __ bind(&smi);
+    __ SmiUntag(operand->reg());
+    __ push(operand->reg());
+    __ fild_s(Operand(esp, 0));
+    __ pop(operand->reg());
+    __ SmiTag(operand->reg());
+    __ jmp(&done);
+  }
+  __ bind(&done);
+}
+
+
+// Load a comparison operand into into a XMM register. Jump to not_numbers jump
+// target passing the left and right result if the operand is not a number.
+static void LoadComparisonOperandSSE2(MacroAssembler* masm_,
+                                      Result* operand,
+                                      XMMRegister reg,
+                                      Result* left_side,
+                                      Result* right_side,
+                                      JumpTarget* not_numbers) {
+  Label done;
+  if (operand->type_info().IsDouble()) {
+    // Operand is known to be a heap number, just load it.
+    __ movdbl(reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
+  } else if (operand->type_info().IsSmi()) {
+    // Operand is known to be a smi. Convert it to double and keep the original
+    // smi.
+    __ SmiUntag(operand->reg());
+    __ cvtsi2sd(reg, Operand(operand->reg()));
+    __ SmiTag(operand->reg());
+  } else {
+    // Operand type not known, check for smi or heap number.
+    Label smi;
+    __ test(operand->reg(), Immediate(kSmiTagMask));
+    __ j(zero, &smi);
+    if (!operand->type_info().IsNumber()) {
+      __ cmp(FieldOperand(operand->reg(), HeapObject::kMapOffset),
+             Immediate(Factory::heap_number_map()));
+      not_numbers->Branch(not_equal, left_side, right_side, taken);
+    }
+    __ movdbl(reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
+    __ jmp(&done);
+
+    __ bind(&smi);
+    // Comvert smi to float and keep the original smi.
+    __ SmiUntag(operand->reg());
+    __ cvtsi2sd(reg, Operand(operand->reg()));
+    __ SmiTag(operand->reg());
+    __ jmp(&done);
+  }
+  __ bind(&done);
+}
+
+
+void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
+                                                   Result* right_side,
+                                                   Condition cc,
+                                                   ControlDestination* dest) {
+  ASSERT(left_side->is_register());
+  ASSERT(right_side->is_register());
+
+  JumpTarget not_numbers;
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope use_sse2(SSE2);
+
+    // Load left and right operand into registers xmm0 and xmm1 and compare.
+    LoadComparisonOperandSSE2(masm_, left_side, xmm0, left_side, right_side,
+                              &not_numbers);
+    LoadComparisonOperandSSE2(masm_, right_side, xmm1, left_side, right_side,
+                              &not_numbers);
+    __ comisd(xmm0, xmm1);
+  } else {
+    Label check_right, compare;
+
+    // Make sure that both comparison operands are numbers.
+    CheckComparisonOperand(masm_, left_side, left_side, right_side,
+                           &not_numbers);
+    CheckComparisonOperand(masm_, right_side, left_side, right_side,
+                           &not_numbers);
+
+    // Load right and left operand to FPU stack and compare.
+    LoadComparisonOperand(masm_, right_side);
+    LoadComparisonOperand(masm_, left_side);
+    __ FCmp();
+  }
+
+  // Bail out if a NaN is involved.
+  not_numbers.Branch(parity_even, left_side, right_side, not_taken);
+
+  // Split to destination targets based on comparison.
+  left_side->Unuse();
+  right_side->Unuse();
+  dest->true_target()->Branch(DoubleCondition(cc));
+  dest->false_target()->Jump();
+
+  not_numbers.Bind(left_side, right_side);
+}
+
+
 // Call the function just below TOS on the stack with the given
 // arguments. The receiver is the TOS.
 void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
@@ -3318,6 +3882,26 @@
 }
 
 
+void CodeGenerator::SetTypeForStackSlot(Slot* slot, TypeInfo info) {
+  ASSERT(slot->type() == Slot::LOCAL || slot->type() == Slot::PARAMETER);
+  if (slot->type() == Slot::LOCAL) {
+    frame_->SetTypeForLocalAt(slot->index(), info);
+  } else {
+    frame_->SetTypeForParamAt(slot->index(), info);
+  }
+  if (FLAG_debug_code && info.IsSmi()) {
+    if (slot->type() == Slot::LOCAL) {
+      frame_->PushLocalAt(slot->index());
+    } else {
+      frame_->PushParameterAt(slot->index());
+    }
+    Result var = frame_->Pop();
+    var.ToRegister();
+    __ AbortIfNotSmi(var.reg());
+  }
+}
+
+
 void CodeGenerator::VisitForStatement(ForStatement* node) {
   ASSERT(!in_spilled_code());
   Comment cmnt(masm_, "[ ForStatement");
@@ -3410,6 +3994,17 @@
   }
 
   CheckStack();  // TODO(1222600): ignore if body contains calls.
+
+  // We know that the loop index is a smi if it is not modified in the
+  // loop body and it is checked against a constant limit in the loop
+  // condition.  In this case, we reset the static type information of the
+  // loop index to smi before compiling the body, the update expression, and
+  // the bottom check of the loop condition.
+  if (node->is_fast_smi_loop()) {
+    // Set number type of the loop variable to smi.
+    SetTypeForStackSlot(node->loop_variable()->slot(), TypeInfo::Smi());
+  }
+
   Visit(node->body());
 
   // If there is an update expression, compile it if necessary.
@@ -3429,6 +4024,13 @@
     }
   }
 
+  // Set the type of the loop variable to smi before compiling the test
+  // expression if we are in a fast smi loop condition.
+  if (node->is_fast_smi_loop() && has_valid_frame()) {
+    // Set number type of the loop variable to smi.
+    SetTypeForStackSlot(node->loop_variable()->slot(), TypeInfo::Smi());
+  }
+
   // Based on the condition analysis, compile the backward jump as
   // necessary.
   switch (info) {
@@ -4049,9 +4651,8 @@
 }
 
 
-Result CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
-  ASSERT(boilerplate->IsBoilerplate());
-
+Result CodeGenerator::InstantiateFunction(
+    Handle<SharedFunctionInfo> function_info) {
   // The inevitable call will sync frame elements to memory anyway, so
   // we do it eagerly to allow us to push the arguments directly into
   // place.
@@ -4059,15 +4660,15 @@
 
   // Use the fast case closure allocation code that allocates in new
   // space for nested functions that don't need literals cloning.
-  if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) {
+  if (scope()->is_function_scope() && function_info->num_literals() == 0) {
     FastNewClosureStub stub;
-    frame()->EmitPush(Immediate(boilerplate));
+    frame()->EmitPush(Immediate(function_info));
     return frame()->CallStub(&stub, 1);
   } else {
-    // Call the runtime to instantiate the function boilerplate
-    // object.
+    // Call the runtime to instantiate the function based on the
+    // shared function info.
     frame()->EmitPush(esi);
-    frame()->EmitPush(Immediate(boilerplate));
+    frame()->EmitPush(Immediate(function_info));
     return frame()->CallRuntime(Runtime::kNewClosure, 2);
   }
 }
@@ -4075,27 +4676,29 @@
 
 void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
   Comment cmnt(masm_, "[ FunctionLiteral");
-
-  // Build the function boilerplate and instantiate it.
-  Handle<JSFunction> boilerplate =
-      Compiler::BuildBoilerplate(node, script(), this);
+  ASSERT(!in_safe_int32_mode());
+  // Build the function info and instantiate it.
+  Handle<SharedFunctionInfo> function_info =
+      Compiler::BuildFunctionInfo(node, script(), this);
   // Check for stack-overflow exception.
   if (HasStackOverflow()) return;
-  Result result = InstantiateBoilerplate(boilerplate);
+  Result result = InstantiateFunction(function_info);
   frame()->Push(&result);
 }
 
 
-void CodeGenerator::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* node) {
-  Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
-  Result result = InstantiateBoilerplate(node->boilerplate());
+void CodeGenerator::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* node) {
+  ASSERT(!in_safe_int32_mode());
+  Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
+  Result result = InstantiateFunction(node->shared_function_info());
   frame()->Push(&result);
 }
 
 
 void CodeGenerator::VisitConditional(Conditional* node) {
   Comment cmnt(masm_, "[ Conditional");
+  ASSERT(!in_safe_int32_mode());
   JumpTarget then;
   JumpTarget else_;
   JumpTarget exit;
@@ -4266,6 +4869,7 @@
     Slot* slot,
     TypeofState typeof_state,
     JumpTarget* slow) {
+  ASSERT(!in_safe_int32_mode());
   // Check that no extension objects have been created by calls to
   // eval from the current scope to the global scope.
   Register context = esi;
@@ -4434,10 +5038,20 @@
 }
 
 
-void CodeGenerator::VisitSlot(Slot* node) {
+void CodeGenerator::VisitSlot(Slot* slot) {
   Comment cmnt(masm_, "[ Slot");
-  Result result = LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
-  frame()->Push(&result);
+  if (in_safe_int32_mode()) {
+    if ((slot->type() == Slot::LOCAL  && !slot->is_arguments())) {
+      frame()->UntaggedPushLocalAt(slot->index());
+    } else if (slot->type() == Slot::PARAMETER) {
+      frame()->UntaggedPushParameterAt(slot->index());
+    } else {
+      UNREACHABLE();
+    }
+  } else {
+    Result result = LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
+    frame()->Push(&result);
+  }
 }
 
 
@@ -4449,6 +5063,7 @@
     Visit(expr);
   } else {
     ASSERT(var->is_global());
+    ASSERT(!in_safe_int32_mode());
     Reference ref(this, node);
     ref.GetValue();
   }
@@ -4457,7 +5072,11 @@
 
 void CodeGenerator::VisitLiteral(Literal* node) {
   Comment cmnt(masm_, "[ Literal");
-  frame_->Push(node->handle());
+  if (in_safe_int32_mode()) {
+    frame_->PushUntaggedElement(node->handle());
+  } else {
+    frame_->Push(node->handle());
+  }
 }
 
 
@@ -4531,6 +5150,7 @@
 
 
 void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
+  ASSERT(!in_safe_int32_mode());
   Comment cmnt(masm_, "[ RegExp Literal");
 
   // Retrieve the literals array and check the allocated entry.  Begin
@@ -4567,6 +5187,7 @@
 
 
 void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
+  ASSERT(!in_safe_int32_mode());
   Comment cmnt(masm_, "[ ObjectLiteral");
 
   // Load a writable copy of the function of this activation in a
@@ -4585,11 +5206,13 @@
   frame_->Push(Smi::FromInt(node->literal_index()));
   // Constant properties.
   frame_->Push(node->constant_properties());
+  // Should the object literal have fast elements?
+  frame_->Push(Smi::FromInt(node->fast_elements() ? 1 : 0));
   Result clone;
   if (node->depth() > 1) {
-    clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 3);
+    clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
   } else {
-    clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
+    clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
   }
   frame_->Push(&clone);
 
@@ -4649,6 +5272,7 @@
 
 
 void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
+  ASSERT(!in_safe_int32_mode());
   Comment cmnt(masm_, "[ ArrayLiteral");
 
   // Load a writable copy of the function of this activation in a
@@ -4720,6 +5344,7 @@
 
 
 void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
+  ASSERT(!in_safe_int32_mode());
   ASSERT(!in_spilled_code());
   // Call runtime routine to allocate the catch extension object and
   // assign the exception value to the catch variable.
@@ -4744,17 +5369,23 @@
 
   // Evaluate the right-hand side.
   if (node->is_compound()) {
+    // For a compound assignment the right-hand side is a binary operation
+    // between the current property value and the actual right-hand side.
     Result result = LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
     frame()->Push(&result);
     Load(node->value());
 
+    // Perform the binary operation.
     bool overwrite_value =
         (node->value()->AsBinaryOperation() != NULL &&
          node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
-    GenericBinaryOperation(node->binary_op(),
-                           node->type(),
+    // Construct the implicit binary operation.
+    BinaryOperation expr(node, node->binary_op(), node->target(),
+                         node->value());
+    GenericBinaryOperation(&expr,
                            overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
   } else {
+    // For non-compound assignment just load the right-hand side.
     Load(node->value());
   }
 
@@ -4777,7 +5408,9 @@
   Property* prop = node->target()->AsProperty();
   ASSERT(var == NULL || (prop == NULL && var->is_global()));
 
-  // Initialize name and evaluate the receiver subexpression if necessary.
+  // Initialize name and evaluate the receiver sub-expression if necessary. If
+  // the receiver is trivial it is not placed on the stack at this point, but
+  // loaded whenever actually needed.
   Handle<String> name;
   bool is_trivial_receiver = false;
   if (var != NULL) {
@@ -4791,10 +5424,13 @@
     if (!is_trivial_receiver) Load(prop->obj());
   }
 
+  // Change to slow case in the beginning of an initialization block to
+  // avoid the quadratic behavior of repeatedly adding fast properties.
   if (node->starts_initialization_block()) {
+    // Initialization block consists of assignments of the form expr.x = ..., so
+    // this will never be an assignment to a variable, so there must be a
+    // receiver object.
     ASSERT_EQ(NULL, var);
-    // Change to slow case in the beginning of an initialization block to
-    // avoid the quadratic behavior of repeatedly adding fast properties.
     if (is_trivial_receiver) {
       frame()->Push(prop->obj());
     } else {
@@ -4803,14 +5439,21 @@
     Result ignored = frame()->CallRuntime(Runtime::kToSlowProperties, 1);
   }
 
+  // Change to fast case at the end of an initialization block. To prepare for
+  // that add an extra copy of the receiver to the frame, so that it can be
+  // converted back to fast case after the assignment.
   if (node->ends_initialization_block() && !is_trivial_receiver) {
-    // Add an extra copy of the receiver to the frame, so that it can be
-    // converted back to fast case after the assignment.
     frame()->Dup();
   }
 
+  // Stack layout:
+  // [tos]   : receiver (only materialized if non-trivial)
+  // [tos+1] : receiver if at the end of an initialization block
+
   // Evaluate the right-hand side.
   if (node->is_compound()) {
+    // For a compound assignment the right-hand side is a binary operation
+    // between the current property value and the actual right-hand side.
     if (is_trivial_receiver) {
       frame()->Push(prop->obj());
     } else if (var != NULL) {
@@ -4828,13 +5471,21 @@
     bool overwrite_value =
         (node->value()->AsBinaryOperation() != NULL &&
          node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
-    GenericBinaryOperation(node->binary_op(),
-                           node->type(),
+    // Construct the implicit binary operation.
+    BinaryOperation expr(node, node->binary_op(), node->target(),
+                         node->value());
+    GenericBinaryOperation(&expr,
                            overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
   } else {
+    // For non-compound assignment just load the right-hand side.
     Load(node->value());
   }
 
+  // Stack layout:
+  // [tos]   : value
+  // [tos+1] : receiver (only materialized if non-trivial)
+  // [tos+2] : receiver if at the end of an initialization block
+
   // Perform the assignment.  It is safe to ignore constants here.
   ASSERT(var == NULL || var->mode() != Variable::CONST);
   ASSERT_NE(Token::INIT_CONST, node->op());
@@ -4848,6 +5499,10 @@
   Result answer = EmitNamedStore(name, is_contextual);
   frame()->Push(&answer);
 
+  // Stack layout:
+  // [tos]   : result
+  // [tos+1] : receiver if at the end of an initialization block
+
   if (node->ends_initialization_block()) {
     ASSERT_EQ(NULL, var);
     // The argument to the runtime call is the receiver.
@@ -4864,6 +5519,9 @@
     Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
   }
 
+  // Stack layout:
+  // [tos]   : result
+
   ASSERT_EQ(frame()->height(), original_height + 1);
 }
 
@@ -4872,54 +5530,76 @@
 #ifdef DEBUG
   int original_height = frame()->height();
 #endif
-  Comment cmnt(masm_, "[ Named Property Assignment");
+  Comment cmnt(masm_, "[ Keyed Property Assignment");
   Property* prop = node->target()->AsProperty();
   ASSERT_NOT_NULL(prop);
 
   // Evaluate the receiver subexpression.
   Load(prop->obj());
 
+  // Change to slow case in the beginning of an initialization block to
+  // avoid the quadratic behavior of repeatedly adding fast properties.
   if (node->starts_initialization_block()) {
-    // Change to slow case in the beginning of an initialization block to
-    // avoid the quadratic behavior of repeatedly adding fast properties.
     frame_->Dup();
     Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
   }
 
+  // Change to fast case at the end of an initialization block. To prepare for
+  // that add an extra copy of the receiver to the frame, so that it can be
+  // converted back to fast case after the assignment.
   if (node->ends_initialization_block()) {
-    // Add an extra copy of the receiver to the frame, so that it can be
-    // converted back to fast case after the assignment.
     frame_->Dup();
   }
 
   // Evaluate the key subexpression.
   Load(prop->key());
 
+  // Stack layout:
+  // [tos]   : key
+  // [tos+1] : receiver
+  // [tos+2] : receiver if at the end of an initialization block
+
   // Evaluate the right-hand side.
   if (node->is_compound()) {
-    // Duplicate receiver and key.
+    // For a compound assignment the right-hand side is a binary operation
+    // between the current property value and the actual right-hand side.
+    // Duplicate receiver and key for loading the current property value.
     frame()->PushElementAt(1);
     frame()->PushElementAt(1);
     Result value = EmitKeyedLoad();
     frame()->Push(&value);
     Load(node->value());
 
+    // Perform the binary operation.
     bool overwrite_value =
         (node->value()->AsBinaryOperation() != NULL &&
          node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
-    GenericBinaryOperation(node->binary_op(),
-                           node->type(),
+    BinaryOperation expr(node, node->binary_op(), node->target(),
+                         node->value());
+    GenericBinaryOperation(&expr,
                            overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
   } else {
+    // For non-compound assignment just load the right-hand side.
     Load(node->value());
   }
 
+  // Stack layout:
+  // [tos]   : value
+  // [tos+1] : key
+  // [tos+2] : receiver
+  // [tos+3] : receiver if at the end of an initialization block
+
   // Perform the assignment.  It is safe to ignore constants here.
   ASSERT(node->op() != Token::INIT_CONST);
   CodeForSourcePosition(node->position());
   Result answer = EmitKeyedStore(prop->key()->type());
   frame()->Push(&answer);
 
+  // Stack layout:
+  // [tos]   : result
+  // [tos+1] : receiver if at the end of an initialization block
+
+  // Change to fast case at the end of an initialization block.
   if (node->ends_initialization_block()) {
     // The argument to the runtime call is the extra copy of the receiver,
     // which is below the value of the assignment.  Swap the receiver and
@@ -4931,11 +5611,15 @@
     Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
   }
 
+  // Stack layout:
+  // [tos]   : result
+
   ASSERT(frame()->height() == original_height + 1);
 }
 
 
 void CodeGenerator::VisitAssignment(Assignment* node) {
+  ASSERT(!in_safe_int32_mode());
 #ifdef DEBUG
   int original_height = frame()->height();
 #endif
@@ -4971,6 +5655,7 @@
 
 
 void CodeGenerator::VisitThrow(Throw* node) {
+  ASSERT(!in_safe_int32_mode());
   Comment cmnt(masm_, "[ Throw");
   Load(node->exception());
   Result result = frame_->CallRuntime(Runtime::kThrow, 1);
@@ -4979,6 +5664,7 @@
 
 
 void CodeGenerator::VisitProperty(Property* node) {
+  ASSERT(!in_safe_int32_mode());
   Comment cmnt(masm_, "[ Property");
   Reference property(this, node);
   property.GetValue();
@@ -4986,6 +5672,7 @@
 
 
 void CodeGenerator::VisitCall(Call* node) {
+  ASSERT(!in_safe_int32_mode());
   Comment cmnt(masm_, "[ Call");
 
   Expression* function = node->expression();
@@ -5201,6 +5888,7 @@
 
 
 void CodeGenerator::VisitCallNew(CallNew* node) {
+  ASSERT(!in_safe_int32_mode());
   Comment cmnt(masm_, "[ CallNew");
 
   // According to ECMA-262, section 11.2.2, page 44, the function
@@ -5270,7 +5958,7 @@
   Result value = frame_->Pop();
   value.ToRegister();
   ASSERT(value.is_valid());
-  __ test(value.reg(), Immediate(kSmiTagMask | 0x80000000));
+  __ test(value.reg(), Immediate(kSmiTagMask | kSmiSignMask));
   value.Unuse();
   destination()->Split(zero);
 }
@@ -5286,43 +5974,11 @@
   Comment(masm_, "[ GenerateFastCharCodeAt");
   ASSERT(args->length() == 2);
 
-  Label slow_case;
-  Label end;
-  Label not_a_flat_string;
-  Label try_again_with_new_string;
-  Label ascii_string;
-  Label got_char_code;
-
   Load(args->at(0));
   Load(args->at(1));
   Result index = frame_->Pop();
   Result object = frame_->Pop();
 
-  // Get register ecx to use as shift amount later.
-  Result shift_amount;
-  if (object.is_register() && object.reg().is(ecx)) {
-    Result fresh = allocator_->Allocate();
-    shift_amount = object;
-    object = fresh;
-    __ mov(object.reg(), ecx);
-  }
-  if (index.is_register() && index.reg().is(ecx)) {
-    Result fresh = allocator_->Allocate();
-    shift_amount = index;
-    index = fresh;
-    __ mov(index.reg(), ecx);
-  }
-  // There could be references to ecx in the frame. Allocating will
-  // spill them, otherwise spill explicitly.
-  if (shift_amount.is_valid()) {
-    frame_->Spill(ecx);
-  } else {
-    shift_amount = allocator()->Allocate(ecx);
-  }
-  ASSERT(shift_amount.is_register());
-  ASSERT(shift_amount.reg().is(ecx));
-  ASSERT(allocator_->count(ecx) == 1);
-
   // We will mutate the index register and possibly the object register.
   // The case where they are somehow the same register is handled
   // because we only mutate them in the case where the receiver is a
@@ -5332,93 +5988,58 @@
   frame_->Spill(object.reg());
   frame_->Spill(index.reg());
 
-  // We need a single extra temporary register.
-  Result temp = allocator()->Allocate();
-  ASSERT(temp.is_valid());
+  // We need two extra registers.
+  Result result = allocator()->Allocate();
+  ASSERT(result.is_valid());
+  Result scratch = allocator()->Allocate();
+  ASSERT(scratch.is_valid());
 
   // There is no virtual frame effect from here up to the final result
   // push.
-
-  // If the receiver is a smi trigger the slow case.
-  ASSERT(kSmiTag == 0);
-  __ test(object.reg(), Immediate(kSmiTagMask));
-  __ j(zero, &slow_case);
-
-  // If the index is negative or non-smi trigger the slow case.
-  ASSERT(kSmiTag == 0);
-  __ test(index.reg(), Immediate(kSmiTagMask | 0x80000000));
-  __ j(not_zero, &slow_case);
-  // Untag the index.
-  __ SmiUntag(index.reg());
-
-  __ bind(&try_again_with_new_string);
-  // Fetch the instance type of the receiver into ecx.
-  __ mov(ecx, FieldOperand(object.reg(), HeapObject::kMapOffset));
-  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-  // If the receiver is not a string trigger the slow case.
-  __ test(ecx, Immediate(kIsNotStringMask));
-  __ j(not_zero, &slow_case);
-
-  // Fetch the length field into the temporary register.
-  __ mov(temp.reg(), FieldOperand(object.reg(), String::kLengthOffset));
-  // Check for index out of range.
-  __ cmp(index.reg(), Operand(temp.reg()));
-  __ j(greater_equal, &slow_case);
-  // Reload the instance type (into the temp register this time)..
-  __ mov(temp.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
-  __ movzx_b(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
-
-  // We need special handling for non-flat strings.
-  ASSERT(kSeqStringTag == 0);
-  __ test(temp.reg(), Immediate(kStringRepresentationMask));
-  __ j(not_zero, &not_a_flat_string);
-  // Check for 1-byte or 2-byte string.
-  __ test(temp.reg(), Immediate(kStringEncodingMask));
-  __ j(not_zero, &ascii_string);
-
-  // 2-byte string.
-  // Load the 2-byte character code into the temp register.
-  __ movzx_w(temp.reg(), FieldOperand(object.reg(),
-                                      index.reg(),
-                                      times_2,
-                                      SeqTwoByteString::kHeaderSize));
-  __ jmp(&got_char_code);
-
-  // ASCII string.
-  __ bind(&ascii_string);
-  // Load the byte into the temp register.
-  __ movzx_b(temp.reg(), FieldOperand(object.reg(),
-                                      index.reg(),
-                                      times_1,
-                                      SeqAsciiString::kHeaderSize));
-  __ bind(&got_char_code);
-  __ SmiTag(temp.reg());
-  __ jmp(&end);
-
-  // Handle non-flat strings.
-  __ bind(&not_a_flat_string);
-  __ and_(temp.reg(), kStringRepresentationMask);
-  __ cmp(temp.reg(), kConsStringTag);
-  __ j(not_equal, &slow_case);
-
-  // ConsString.
-  // Check that the right hand side is the empty string (ie if this is really a
-  // flat string in a cons string).  If that is not the case we would rather go
-  // to the runtime system now, to flatten the string.
-  __ mov(temp.reg(), FieldOperand(object.reg(), ConsString::kSecondOffset));
-  __ cmp(Operand(temp.reg()), Factory::empty_string());
-  __ j(not_equal, &slow_case);
-  // Get the first of the two strings.
-  __ mov(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
-  __ jmp(&try_again_with_new_string);
+  Label slow_case;
+  Label exit;
+  StringHelper::GenerateFastCharCodeAt(masm_,
+                                       object.reg(),
+                                       index.reg(),
+                                       scratch.reg(),
+                                       result.reg(),
+                                       &slow_case,
+                                       &slow_case,
+                                       &slow_case,
+                                       &slow_case);
+  __ jmp(&exit);
 
   __ bind(&slow_case);
   // Move the undefined value into the result register, which will
   // trigger the slow case.
-  __ Set(temp.reg(), Immediate(Factory::undefined_value()));
+  __ Set(result.reg(), Immediate(Factory::undefined_value()));
 
-  __ bind(&end);
-  frame_->Push(&temp);
+  __ bind(&exit);
+  frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
+  Comment(masm_, "[ GenerateCharFromCode");
+  ASSERT(args->length() == 1);
+
+  Load(args->at(0));
+
+  Result code = frame_->Pop();
+  code.ToRegister();
+  ASSERT(code.is_valid());
+
+  // StringHelper::GenerateCharFromCode may do a runtime call.
+  frame_->SpillAll();
+
+  Result result = allocator()->Allocate();
+  ASSERT(result.is_valid());
+
+  StringHelper::GenerateCharFromCode(masm_,
+                                     code.reg(),
+                                     result.reg(),
+                                     CALL_FUNCTION);
+  frame_->Push(&result);
 }
 
 
@@ -5554,12 +6175,30 @@
 
 void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 0);
-  // ArgumentsAccessStub takes the parameter count as an input argument
-  // in register eax.  Create a constant result for it.
-  Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
-  // Call the shared stub to get to the arguments.length.
-  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
-  Result result = frame_->CallStub(&stub, &count);
+
+  Result fp = allocator_->Allocate();
+  Result result = allocator_->Allocate();
+  ASSERT(fp.is_valid() && result.is_valid());
+
+  Label exit;
+
+  // Get the number of formal parameters.
+  __ Set(result.reg(), Immediate(Smi::FromInt(scope()->num_parameters())));
+
+  // Check if the calling frame is an arguments adaptor frame.
+  __ mov(fp.reg(), Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
+         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(not_equal, &exit);
+
+  // Arguments adaptor case: Read the arguments length from the
+  // adaptor frame.
+  __ mov(result.reg(),
+         Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+  __ bind(&exit);
+  result.set_type_info(TypeInfo::Smi());
+  if (FLAG_debug_code) __ AbortIfNotSmi(result.reg());
   frame_->Push(&result);
 }
 
@@ -5695,7 +6334,7 @@
 }
 
 
-void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   // ArgumentsAccessStub expects the key in edx and the formal
@@ -5738,25 +6377,55 @@
 }
 
 
-void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateRandomHeapNumber(
+    ZoneList<Expression*>* args) {
   ASSERT(args->length() == 0);
   frame_->SpillAll();
 
-  // Make sure the frame is aligned like the OS expects.
-  static const int kFrameAlignment = OS::ActivationFrameAlignment();
-  if (kFrameAlignment > 0) {
-    ASSERT(IsPowerOf2(kFrameAlignment));
-    __ mov(edi, Operand(esp));  // Save in callee-saved register.
-    __ and_(esp, -kFrameAlignment);
-  }
+  Label slow_allocate_heapnumber;
+  Label heapnumber_allocated;
 
-  // Call V8::RandomPositiveSmi().
-  __ call(FUNCTION_ADDR(V8::RandomPositiveSmi), RelocInfo::RUNTIME_ENTRY);
+  __ AllocateHeapNumber(edi, ebx, ecx, &slow_allocate_heapnumber);
+  __ jmp(&heapnumber_allocated);
 
-  // Restore stack pointer from callee-saved register edi.
-  if (kFrameAlignment > 0) {
-    __ mov(esp, Operand(edi));
+  __ bind(&slow_allocate_heapnumber);
+  // To allocate a heap number, and ensure that it is not a smi, we
+  // call the runtime function FUnaryMinus on 0, returning the double
+  // -0.0.  A new, distinct heap number is returned each time.
+  __ push(Immediate(Smi::FromInt(0)));
+  __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
+  __ mov(edi, eax);
+
+  __ bind(&heapnumber_allocated);
+
+  __ PrepareCallCFunction(0, ebx);
+  __ CallCFunction(ExternalReference::random_uint32_function(), 0);
+
+  // Convert 32 random bits in eax to 0.(32 random bits) in a double
+  // by computing:
+  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
+  // This is implemented on both SSE2 and FPU.
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope fscope(SSE2);
+    __ mov(ebx, Immediate(0x49800000));  // 1.0 x 2^20 as single.
+    __ movd(xmm1, Operand(ebx));
+    __ movd(xmm0, Operand(eax));
+    __ cvtss2sd(xmm1, xmm1);
+    __ pxor(xmm0, xmm1);
+    __ subsd(xmm0, xmm1);
+    __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
+  } else {
+    // 0x4130000000000000 is 1.0 x 2^20 as a double.
+    __ mov(FieldOperand(edi, HeapNumber::kExponentOffset),
+           Immediate(0x41300000));
+    __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), eax);
+    __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
+    __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), Immediate(0));
+    __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
+    __ fsubp(1);
+    __ fstp_d(FieldOperand(edi, HeapNumber::kValueOffset));
   }
+  __ mov(eax, edi);
 
   Result result = allocator_->Allocate(eax);
   frame_->Push(&result);
@@ -5801,7 +6470,7 @@
 
 
 void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 4);
+  ASSERT_EQ(4, args->length());
 
   // Load the arguments on the stack and call the stub.
   Load(args->at(0));
@@ -5814,6 +6483,175 @@
 }
 
 
+void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
+  // No stub. This code only occurs a few times in regexp.js.
+  const int kMaxInlineLength = 100;
+  ASSERT_EQ(3, args->length());
+  Load(args->at(0));  // Size of array, smi.
+  Load(args->at(1));  // "index" property value.
+  Load(args->at(2));  // "input" property value.
+  {
+    VirtualFrame::SpilledScope spilled_scope;
+
+    Label slowcase;
+    Label done;
+    __ mov(ebx, Operand(esp, kPointerSize * 2));
+    __ test(ebx, Immediate(kSmiTagMask));
+    __ j(not_zero, &slowcase);
+    __ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength)));
+    __ j(above, &slowcase);
+    // Smi-tagging is equivalent to multiplying by 2.
+    STATIC_ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTagSize == 1);
+    // Allocate RegExpResult followed by FixedArray with size in ebx.
+    // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
+    // Elements:  [Map][Length][..elements..]
+    __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
+                          times_half_pointer_size,
+                          ebx,  // In: Number of elements (times 2, being a smi)
+                          eax,  // Out: Start of allocation (tagged).
+                          ecx,  // Out: End of allocation.
+                          edx,  // Scratch register
+                          &slowcase,
+                          TAG_OBJECT);
+    // eax: Start of allocated area, object-tagged.
+
+    // Set JSArray map to global.regexp_result_map().
+    // Set empty properties FixedArray.
+    // Set elements to point to FixedArray allocated right after the JSArray.
+    // Interleave operations for better latency.
+    __ mov(edx, ContextOperand(esi, Context::GLOBAL_INDEX));
+    __ mov(ecx, Immediate(Factory::empty_fixed_array()));
+    __ lea(ebx, Operand(eax, JSRegExpResult::kSize));
+    __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalContextOffset));
+    __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
+    __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
+    __ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX));
+    __ mov(FieldOperand(eax, HeapObject::kMapOffset), edx);
+
+    // Set input, index and length fields from arguments.
+    __ pop(FieldOperand(eax, JSRegExpResult::kInputOffset));
+    __ pop(FieldOperand(eax, JSRegExpResult::kIndexOffset));
+    __ pop(ecx);
+    __ mov(FieldOperand(eax, JSArray::kLengthOffset), ecx);
+
+    // Fill out the elements FixedArray.
+    // eax: JSArray.
+    // ebx: FixedArray.
+    // ecx: Number of elements in array, as smi.
+
+    // Set map.
+    __ mov(FieldOperand(ebx, HeapObject::kMapOffset),
+           Immediate(Factory::fixed_array_map()));
+    // Set length.
+    __ SmiUntag(ecx);
+    __ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
+    // Fill contents of fixed-array with the-hole.
+    __ mov(edx, Immediate(Factory::the_hole_value()));
+    __ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
+    // Fill fixed array elements with hole.
+    // eax: JSArray.
+    // ecx: Number of elements to fill.
+    // ebx: Start of elements in FixedArray.
+    // edx: the hole.
+    Label loop;
+    __ test(ecx, Operand(ecx));
+    __ bind(&loop);
+    __ j(less_equal, &done);  // Jump if ecx is negative or zero.
+    __ sub(Operand(ecx), Immediate(1));
+    __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
+    __ jmp(&loop);
+
+    __ bind(&slowcase);
+    __ CallRuntime(Runtime::kRegExpConstructResult, 3);
+
+    __ bind(&done);
+  }
+  frame_->Forget(3);
+  frame_->Push(eax);
+}
+
+
+class DeferredSearchCache: public DeferredCode {
+ public:
+  DeferredSearchCache(Register dst, Register cache, Register key)
+      : dst_(dst), cache_(cache), key_(key) {
+    set_comment("[ DeferredSearchCache");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register dst_, cache_, key_;
+};
+
+
+void DeferredSearchCache::Generate() {
+  __ push(cache_);
+  __ push(key_);
+  __ CallRuntime(Runtime::kGetFromCache, 2);
+  if (!dst_.is(eax)) {
+    __ mov(dst_, eax);
+  }
+}
+
+
+void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
+  ASSERT_EQ(2, args->length());
+
+  ASSERT_NE(NULL, args->at(0)->AsLiteral());
+  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+
+  Handle<FixedArray> jsfunction_result_caches(
+      Top::global_context()->jsfunction_result_caches());
+  if (jsfunction_result_caches->length() <= cache_id) {
+    __ Abort("Attempt to use undefined cache.");
+    frame_->Push(Factory::undefined_value());
+    return;
+  }
+
+  Load(args->at(1));
+  Result key = frame_->Pop();
+  key.ToRegister();
+
+  Result cache = allocator()->Allocate();
+  ASSERT(cache.is_valid());
+  __ mov(cache.reg(), ContextOperand(esi, Context::GLOBAL_INDEX));
+  __ mov(cache.reg(),
+         FieldOperand(cache.reg(), GlobalObject::kGlobalContextOffset));
+  __ mov(cache.reg(),
+         ContextOperand(cache.reg(), Context::JSFUNCTION_RESULT_CACHES_INDEX));
+  __ mov(cache.reg(),
+         FieldOperand(cache.reg(), FixedArray::OffsetOfElementAt(cache_id)));
+
+  Result tmp = allocator()->Allocate();
+  ASSERT(tmp.is_valid());
+
+  DeferredSearchCache* deferred = new DeferredSearchCache(tmp.reg(),
+                                                          cache.reg(),
+                                                          key.reg());
+
+  const int kFingerOffset =
+      FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
+  // tmp.reg() now holds finger offset as a smi.
+  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  __ mov(tmp.reg(), FieldOperand(cache.reg(), kFingerOffset));
+  __ cmp(key.reg(), FieldOperand(cache.reg(),
+                                 tmp.reg(),  // as smi
+                                 times_half_pointer_size,
+                                 FixedArray::kHeaderSize));
+  deferred->Branch(not_equal);
+
+  __ mov(tmp.reg(), FieldOperand(cache.reg(),
+                                 tmp.reg(),  // as smi
+                                 times_half_pointer_size,
+                                 kPointerSize + FixedArray::kHeaderSize));
+
+  deferred->BindExit();
+  frame_->Push(&tmp);
+}
+
+
 void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
   ASSERT_EQ(args->length(), 1);
 
@@ -5825,6 +6663,322 @@
 }
 
 
+class DeferredSwapElements: public DeferredCode {
+ public:
+  DeferredSwapElements(Register object, Register index1, Register index2)
+      : object_(object), index1_(index1), index2_(index2) {
+    set_comment("[ DeferredSwapElements");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register object_, index1_, index2_;
+};
+
+
+void DeferredSwapElements::Generate() {
+  __ push(object_);
+  __ push(index1_);
+  __ push(index2_);
+  __ CallRuntime(Runtime::kSwapElements, 3);
+}
+
+
+void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
+  // Note: this code assumes that indices are passed are within
+  // elements' bounds and refer to valid (not holes) values.
+  Comment cmnt(masm_, "[ GenerateSwapElements");
+
+  ASSERT_EQ(3, args->length());
+
+  Load(args->at(0));
+  Load(args->at(1));
+  Load(args->at(2));
+
+  Result index2 = frame_->Pop();
+  index2.ToRegister();
+
+  Result index1 = frame_->Pop();
+  index1.ToRegister();
+
+  Result object = frame_->Pop();
+  object.ToRegister();
+
+  Result tmp1 = allocator()->Allocate();
+  tmp1.ToRegister();
+  Result tmp2 = allocator()->Allocate();
+  tmp2.ToRegister();
+
+  frame_->Spill(object.reg());
+  frame_->Spill(index1.reg());
+  frame_->Spill(index2.reg());
+
+  DeferredSwapElements* deferred = new DeferredSwapElements(object.reg(),
+                                                            index1.reg(),
+                                                            index2.reg());
+
+  // Fetch the map and check if array is in fast case.
+  // Check that object doesn't require security checks and
+  // has no indexed interceptor.
+  __ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg());
+  deferred->Branch(less);
+  __ movzx_b(tmp1.reg(), FieldOperand(tmp1.reg(), Map::kBitFieldOffset));
+  __ test(tmp1.reg(), Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
+  deferred->Branch(not_zero);
+
+  // Check the object's elements are in fast case.
+  __ mov(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
+  __ cmp(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
+         Immediate(Factory::fixed_array_map()));
+  deferred->Branch(not_equal);
+
+  // Smi-tagging is equivalent to multiplying by 2.
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+
+  // Check that both indices are smis.
+  __ mov(tmp2.reg(), index1.reg());
+  __ or_(tmp2.reg(), Operand(index2.reg()));
+  __ test(tmp2.reg(), Immediate(kSmiTagMask));
+  deferred->Branch(not_zero);
+
+  // Bring addresses into index1 and index2.
+  __ lea(index1.reg(), FieldOperand(tmp1.reg(),
+                                    index1.reg(),
+                                    times_half_pointer_size,  // index1 is Smi
+                                    FixedArray::kHeaderSize));
+  __ lea(index2.reg(), FieldOperand(tmp1.reg(),
+                                    index2.reg(),
+                                    times_half_pointer_size,  // index2 is Smi
+                                    FixedArray::kHeaderSize));
+
+  // Swap elements.
+  __ mov(object.reg(), Operand(index1.reg(), 0));
+  __ mov(tmp2.reg(),   Operand(index2.reg(), 0));
+  __ mov(Operand(index2.reg(), 0), object.reg());
+  __ mov(Operand(index1.reg(), 0), tmp2.reg());
+
+  Label done;
+  __ InNewSpace(tmp1.reg(), tmp2.reg(), equal, &done);
+  // Possible optimization: do a check that both values are Smis
+  // (or them and test against Smi mask.)
+
+  __ mov(tmp2.reg(), tmp1.reg());
+  RecordWriteStub recordWrite1(tmp2.reg(), index1.reg(), object.reg());
+  __ CallStub(&recordWrite1);
+
+  RecordWriteStub recordWrite2(tmp1.reg(), index2.reg(), object.reg());
+  __ CallStub(&recordWrite2);
+
+  __ bind(&done);
+
+  deferred->BindExit();
+  frame_->Push(Factory::undefined_value());
+}
+
+
+void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
+  Comment cmnt(masm_, "[ GenerateCallFunction");
+
+  ASSERT(args->length() >= 2);
+
+  int n_args = args->length() - 2;  // for receiver and function.
+  Load(args->at(0));  // receiver
+  for (int i = 0; i < n_args; i++) {
+    Load(args->at(i + 1));
+  }
+  Load(args->at(n_args + 1));  // function
+  Result result = frame_->CallJSFunction(n_args);
+  frame_->Push(&result);
+}
+
+
+// Generates the Math.pow method. Only handles special cases and
+// branches to the runtime system for everything else. Please note
+// that this function assumes that the callsite has executed ToNumber
+// on both arguments.
+void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 2);
+  Load(args->at(0));
+  Load(args->at(1));
+  if (!CpuFeatures::IsSupported(SSE2)) {
+    Result res = frame_->CallRuntime(Runtime::kMath_pow, 2);
+    frame_->Push(&res);
+  } else {
+    CpuFeatures::Scope use_sse2(SSE2);
+    Label allocate_return;
+    // Load the two operands while leaving the values on the frame.
+    frame()->Dup();
+    Result exponent = frame()->Pop();
+    exponent.ToRegister();
+    frame()->Spill(exponent.reg());
+    frame()->PushElementAt(1);
+    Result base = frame()->Pop();
+    base.ToRegister();
+    frame()->Spill(base.reg());
+
+    Result answer = allocator()->Allocate();
+    ASSERT(answer.is_valid());
+    ASSERT(!exponent.reg().is(base.reg()));
+    JumpTarget call_runtime;
+
+    // Save 1 in xmm3 - we need this several times later on.
+    __ mov(answer.reg(), Immediate(1));
+    __ cvtsi2sd(xmm3, Operand(answer.reg()));
+
+    Label exponent_nonsmi;
+    Label base_nonsmi;
+    // If the exponent is a heap number go to that specific case.
+    __ test(exponent.reg(), Immediate(kSmiTagMask));
+    __ j(not_zero, &exponent_nonsmi);
+    __ test(base.reg(), Immediate(kSmiTagMask));
+    __ j(not_zero, &base_nonsmi);
+
+    // Optimized version when y is an integer.
+    Label powi;
+    __ SmiUntag(base.reg());
+    __ cvtsi2sd(xmm0, Operand(base.reg()));
+    __ jmp(&powi);
+    // exponent is smi and base is a heapnumber.
+    __ bind(&base_nonsmi);
+    __ cmp(FieldOperand(base.reg(), HeapObject::kMapOffset),
+           Factory::heap_number_map());
+    call_runtime.Branch(not_equal);
+
+    __ movdbl(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
+
+    // Optimized version of pow if y is an integer.
+    __ bind(&powi);
+    __ SmiUntag(exponent.reg());
+
+    // Save exponent in base as we need to check if exponent is negative later.
+    // We know that base and exponent are in different registers.
+    __ mov(base.reg(), exponent.reg());
+
+    // Get absolute value of exponent.
+    Label no_neg;
+    __ cmp(exponent.reg(), 0);
+    __ j(greater_equal, &no_neg);
+    __ neg(exponent.reg());
+    __ bind(&no_neg);
+
+    // Load xmm1 with 1.
+    __ movsd(xmm1, xmm3);
+    Label while_true;
+    Label no_multiply;
+
+    __ bind(&while_true);
+    __ shr(exponent.reg(), 1);
+    __ j(not_carry, &no_multiply);
+    __ mulsd(xmm1, xmm0);
+    __ bind(&no_multiply);
+    __ test(exponent.reg(), Operand(exponent.reg()));
+    __ mulsd(xmm0, xmm0);
+    __ j(not_zero, &while_true);
+
+    // x has the original value of y - if y is negative return 1/result.
+    __ test(base.reg(), Operand(base.reg()));
+    __ j(positive, &allocate_return);
+    // Special case if xmm1 has reached infinity.
+    __ mov(answer.reg(), Immediate(0x7FB00000));
+    __ movd(xmm0, Operand(answer.reg()));
+    __ cvtss2sd(xmm0, xmm0);
+    __ ucomisd(xmm0, xmm1);
+    call_runtime.Branch(equal);
+    __ divsd(xmm3, xmm1);
+    __ movsd(xmm1, xmm3);
+    __ jmp(&allocate_return);
+
+    // exponent (or both) is a heapnumber - no matter what we should now work
+    // on doubles.
+    __ bind(&exponent_nonsmi);
+    __ cmp(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
+           Factory::heap_number_map());
+    call_runtime.Branch(not_equal);
+    __ movdbl(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
+    // Test if exponent is nan.
+    __ ucomisd(xmm1, xmm1);
+    call_runtime.Branch(parity_even);
+
+    Label base_not_smi;
+    Label handle_special_cases;
+    __ test(base.reg(), Immediate(kSmiTagMask));
+    __ j(not_zero, &base_not_smi);
+    __ SmiUntag(base.reg());
+    __ cvtsi2sd(xmm0, Operand(base.reg()));
+    __ jmp(&handle_special_cases);
+    __ bind(&base_not_smi);
+    __ cmp(FieldOperand(base.reg(), HeapObject::kMapOffset),
+           Factory::heap_number_map());
+    call_runtime.Branch(not_equal);
+    __ mov(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
+    __ and_(answer.reg(), HeapNumber::kExponentMask);
+    __ cmp(Operand(answer.reg()), Immediate(HeapNumber::kExponentMask));
+    // base is NaN or +/-Infinity
+    call_runtime.Branch(greater_equal);
+    __ movdbl(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
+
+    // base is in xmm0 and exponent is in xmm1.
+    __ bind(&handle_special_cases);
+    Label not_minus_half;
+    // Test for -0.5.
+    // Load xmm2 with -0.5.
+    __ mov(answer.reg(), Immediate(0xBF000000));
+    __ movd(xmm2, Operand(answer.reg()));
+    __ cvtss2sd(xmm2, xmm2);
+    // xmm2 now has -0.5.
+    __ ucomisd(xmm2, xmm1);
+    __ j(not_equal, &not_minus_half);
+
+    // Calculates reciprocal of square root.
+    // Note that 1/sqrt(x) = sqrt(1/x))
+    __ divsd(xmm3, xmm0);
+    __ movsd(xmm1, xmm3);
+    __ sqrtsd(xmm1, xmm1);
+    __ jmp(&allocate_return);
+
+    // Test for 0.5.
+    __ bind(&not_minus_half);
+    // Load xmm2 with 0.5.
+    // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
+    __ addsd(xmm2, xmm3);
+    // xmm2 now has 0.5.
+    __ comisd(xmm2, xmm1);
+    call_runtime.Branch(not_equal);
+    // Calculates square root.
+    __ movsd(xmm1, xmm0);
+    __ sqrtsd(xmm1, xmm1);
+
+    JumpTarget done;
+    Label failure, success;
+    __ bind(&allocate_return);
+    // Make a copy of the frame to enable us to handle allocation
+    // failure after the JumpTarget jump.
+    VirtualFrame* clone = new VirtualFrame(frame());
+    __ AllocateHeapNumber(answer.reg(), exponent.reg(),
+                          base.reg(), &failure);
+    __ movdbl(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1);
+    // Remove the two original values from the frame - we only need those
+    // in the case where we branch to runtime.
+    frame()->Drop(2);
+    exponent.Unuse();
+    base.Unuse();
+    done.Jump(&answer);
+    // Use the copy of the original frame as our current frame.
+    RegisterFile empty_regs;
+    SetFrame(clone, &empty_regs);
+    // If we experience an allocation failure we branch to runtime.
+    __ bind(&failure);
+    call_runtime.Bind();
+    answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2);
+
+    done.Bind(&answer);
+    frame()->Push(&answer);
+  }
+}
+
+
 void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
   ASSERT_EQ(args->length(), 1);
   Load(args->at(0));
@@ -5843,7 +6997,65 @@
 }
 
 
+// Generates the Math.sqrt method. Please note - this function assumes that
+// the callsite has executed ToNumber on the argument.
+void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
+  ASSERT_EQ(args->length(), 1);
+  Load(args->at(0));
+
+  if (!CpuFeatures::IsSupported(SSE2)) {
+    Result result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
+    frame()->Push(&result);
+  } else {
+    CpuFeatures::Scope use_sse2(SSE2);
+    // Leave original value on the frame if we need to call runtime.
+    frame()->Dup();
+    Result result = frame()->Pop();
+    result.ToRegister();
+    frame()->Spill(result.reg());
+    Label runtime;
+    Label non_smi;
+    Label load_done;
+    JumpTarget end;
+
+    __ test(result.reg(), Immediate(kSmiTagMask));
+    __ j(not_zero, &non_smi);
+    __ SmiUntag(result.reg());
+    __ cvtsi2sd(xmm0, Operand(result.reg()));
+    __ jmp(&load_done);
+    __ bind(&non_smi);
+    __ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset),
+           Factory::heap_number_map());
+    __ j(not_equal, &runtime);
+    __ movdbl(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
+
+    __ bind(&load_done);
+    __ sqrtsd(xmm0, xmm0);
+    // A copy of the virtual frame to allow us to go to runtime after the
+    // JumpTarget jump.
+    Result scratch = allocator()->Allocate();
+    VirtualFrame* clone = new VirtualFrame(frame());
+    __ AllocateHeapNumber(result.reg(), scratch.reg(), no_reg, &runtime);
+
+    __ movdbl(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0);
+    frame()->Drop(1);
+    scratch.Unuse();
+    end.Jump(&result);
+    // We only branch to runtime if we have an allocation error.
+    // Use the copy of the original frame as our current frame.
+    RegisterFile empty_regs;
+    SetFrame(clone, &empty_regs);
+    __ bind(&runtime);
+    result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
+
+    end.Bind(&result);
+    frame()->Push(&result);
+  }
+}
+
+
 void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
+  ASSERT(!in_safe_int32_mode());
   if (CheckForInlineRuntimeCall(node)) {
     return;
   }
@@ -5970,64 +7182,123 @@
     }
 
   } else {
-    Load(node->expression());
-    bool overwrite =
-        (node->expression()->AsBinaryOperation() != NULL &&
-         node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
-    switch (op) {
-      case Token::SUB: {
-        GenericUnaryOpStub stub(Token::SUB, overwrite);
-        Result operand = frame_->Pop();
-        Result answer = frame_->CallStub(&stub, &operand);
-        frame_->Push(&answer);
-        break;
+    if (in_safe_int32_mode()) {
+      Visit(node->expression());
+      Result value = frame_->Pop();
+      ASSERT(value.is_untagged_int32());
+      // Registers containing an int32 value are not multiply used.
+      ASSERT(!value.is_register() || !frame_->is_used(value.reg()));
+      value.ToRegister();
+      switch (op) {
+        case Token::SUB: {
+          __ neg(value.reg());
+          if (node->no_negative_zero()) {
+            // -MIN_INT is MIN_INT with the overflow flag set.
+            unsafe_bailout_->Branch(overflow);
+          } else {
+            // MIN_INT and 0 both have bad negations.  They both have 31 zeros.
+            __ test(value.reg(), Immediate(0x7FFFFFFF));
+            unsafe_bailout_->Branch(zero);
+          }
+          break;
+        }
+        case Token::BIT_NOT: {
+          __ not_(value.reg());
+          break;
+        }
+        case Token::ADD: {
+          // Unary plus has no effect on int32 values.
+          break;
+        }
+        default:
+          UNREACHABLE();
+          break;
       }
+      frame_->Push(&value);
+    } else {
+      Load(node->expression());
+      bool overwrite =
+          (node->expression()->AsBinaryOperation() != NULL &&
+           node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
+      switch (op) {
+        case Token::NOT:
+        case Token::DELETE:
+        case Token::TYPEOF:
+          UNREACHABLE();  // handled above
+          break;
 
-      case Token::BIT_NOT: {
-        // Smi check.
-        JumpTarget smi_label;
-        JumpTarget continue_label;
-        Result operand = frame_->Pop();
-        operand.ToRegister();
-        __ test(operand.reg(), Immediate(kSmiTagMask));
-        smi_label.Branch(zero, &operand, taken);
+        case Token::SUB: {
+          GenericUnaryOpStub stub(Token::SUB, overwrite);
+          Result operand = frame_->Pop();
+          Result answer = frame_->CallStub(&stub, &operand);
+          answer.set_type_info(TypeInfo::Number());
+          frame_->Push(&answer);
+          break;
+        }
+        case Token::BIT_NOT: {
+          // Smi check.
+          JumpTarget smi_label;
+          JumpTarget continue_label;
+          Result operand = frame_->Pop();
+          TypeInfo operand_info = operand.type_info();
+          operand.ToRegister();
+          if (operand_info.IsSmi()) {
+            if (FLAG_debug_code) __ AbortIfNotSmi(operand.reg());
+            frame_->Spill(operand.reg());
+            // Set smi tag bit. It will be reset by the not operation.
+            __ lea(operand.reg(), Operand(operand.reg(), kSmiTagMask));
+            __ not_(operand.reg());
+            Result answer = operand;
+            answer.set_type_info(TypeInfo::Smi());
+            frame_->Push(&answer);
+          } else {
+            __ test(operand.reg(), Immediate(kSmiTagMask));
+            smi_label.Branch(zero, &operand, taken);
 
-        GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
-        Result answer = frame_->CallStub(&stub, &operand);
-        continue_label.Jump(&answer);
+            GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+            Result answer = frame_->CallStub(&stub, &operand);
+            continue_label.Jump(&answer);
 
-        smi_label.Bind(&answer);
-        answer.ToRegister();
-        frame_->Spill(answer.reg());
-        __ not_(answer.reg());
-        __ and_(answer.reg(), ~kSmiTagMask);  // Remove inverted smi-tag.
+            smi_label.Bind(&answer);
+            answer.ToRegister();
+            frame_->Spill(answer.reg());
+            // Set smi tag bit. It will be reset by the not operation.
+            __ lea(answer.reg(), Operand(answer.reg(), kSmiTagMask));
+            __ not_(answer.reg());
 
-        continue_label.Bind(&answer);
-        frame_->Push(&answer);
-        break;
-      }
+            continue_label.Bind(&answer);
+            answer.set_type_info(TypeInfo::Integer32());
+            frame_->Push(&answer);
+          }
+          break;
+        }
+        case Token::ADD: {
+          // Smi check.
+          JumpTarget continue_label;
+          Result operand = frame_->Pop();
+          TypeInfo operand_info = operand.type_info();
+          operand.ToRegister();
+          __ test(operand.reg(), Immediate(kSmiTagMask));
+          continue_label.Branch(zero, &operand, taken);
 
-      case Token::ADD: {
-        // Smi check.
-        JumpTarget continue_label;
-        Result operand = frame_->Pop();
-        operand.ToRegister();
-        __ test(operand.reg(), Immediate(kSmiTagMask));
-        continue_label.Branch(zero, &operand, taken);
-
-        frame_->Push(&operand);
-        Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
+          frame_->Push(&operand);
+          Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
                                               CALL_FUNCTION, 1);
 
-        continue_label.Bind(&answer);
-        frame_->Push(&answer);
-        break;
+          continue_label.Bind(&answer);
+          if (operand_info.IsSmi()) {
+            answer.set_type_info(TypeInfo::Smi());
+          } else if (operand_info.IsInteger32()) {
+            answer.set_type_info(TypeInfo::Integer32());
+          } else {
+            answer.set_type_info(TypeInfo::Number());
+          }
+          frame_->Push(&answer);
+          break;
+        }
+        default:
+          UNREACHABLE();
       }
-
-      default:
-        // NOT, DELETE, TYPEOF, and VOID are handled outside the
-        // switch.
-        UNREACHABLE();
     }
   }
 }
@@ -6039,8 +7310,10 @@
 // specialized add or subtract stub.  The result is left in dst.
 class DeferredPrefixCountOperation: public DeferredCode {
  public:
-  DeferredPrefixCountOperation(Register dst, bool is_increment)
-      : dst_(dst), is_increment_(is_increment) {
+  DeferredPrefixCountOperation(Register dst,
+                               bool is_increment,
+                               TypeInfo input_type)
+      : dst_(dst), is_increment_(is_increment), input_type_(input_type) {
     set_comment("[ DeferredCountOperation");
   }
 
@@ -6049,6 +7322,7 @@
  private:
   Register dst_;
   bool is_increment_;
+  TypeInfo input_type_;
 };
 
 
@@ -6059,15 +7333,21 @@
   } else {
     __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
   }
-  __ push(dst_);
-  __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
-  __ push(eax);
-  __ push(Immediate(Smi::FromInt(1)));
-  if (is_increment_) {
-    __ CallRuntime(Runtime::kNumberAdd, 2);
+  Register left;
+  if (input_type_.IsNumber()) {
+    left = dst_;
   } else {
-    __ CallRuntime(Runtime::kNumberSub, 2);
+    __ push(dst_);
+    __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+    left = eax;
   }
+
+  GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
+                           NO_OVERWRITE,
+                           NO_GENERIC_BINARY_FLAGS,
+                           TypeInfo::Number());
+  stub.GenerateCall(masm_, left, Smi::FromInt(1));
+
   if (!dst_.is(eax)) __ mov(dst_, eax);
 }
 
@@ -6079,8 +7359,14 @@
 // The result is left in dst.
 class DeferredPostfixCountOperation: public DeferredCode {
  public:
-  DeferredPostfixCountOperation(Register dst, Register old, bool is_increment)
-      : dst_(dst), old_(old), is_increment_(is_increment) {
+  DeferredPostfixCountOperation(Register dst,
+                                Register old,
+                                bool is_increment,
+                                TypeInfo input_type)
+      : dst_(dst),
+        old_(old),
+        is_increment_(is_increment),
+        input_type_(input_type) {
     set_comment("[ DeferredCountOperation");
   }
 
@@ -6090,6 +7376,7 @@
   Register dst_;
   Register old_;
   bool is_increment_;
+  TypeInfo input_type_;
 };
 
 
@@ -6100,26 +7387,30 @@
   } else {
     __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
   }
-  __ push(dst_);
-  __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
-
-  // Save the result of ToNumber to use as the old value.
-  __ push(eax);
-
-  // Call the runtime for the addition or subtraction.
-  __ push(eax);
-  __ push(Immediate(Smi::FromInt(1)));
-  if (is_increment_) {
-    __ CallRuntime(Runtime::kNumberAdd, 2);
+  Register left;
+  if (input_type_.IsNumber()) {
+    __ push(dst_);  // Save the input to use as the old value.
+    left = dst_;
   } else {
-    __ CallRuntime(Runtime::kNumberSub, 2);
+    __ push(dst_);
+    __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+    __ push(eax);  // Save the result of ToNumber to use as the old value.
+    left = eax;
   }
+
+  GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
+                           NO_OVERWRITE,
+                           NO_GENERIC_BINARY_FLAGS,
+                           TypeInfo::Number());
+  stub.GenerateCall(masm_, left, Smi::FromInt(1));
+
   if (!dst_.is(eax)) __ mov(dst_, eax);
   __ pop(old_);
 }
 
 
 void CodeGenerator::VisitCountOperation(CountOperation* node) {
+  ASSERT(!in_safe_int32_mode());
   Comment cmnt(masm_, "[ CountOperation");
 
   bool is_postfix = node->is_postfix();
@@ -6154,31 +7445,35 @@
       old_value = allocator_->Allocate();
       ASSERT(old_value.is_valid());
       __ mov(old_value.reg(), new_value.reg());
+
+      // The return value for postfix operations is ToNumber(input).
+      // Keep more precise type info if the input is some kind of
+      // number already. If the input is not a number we have to wait
+      // for the deferred code to convert it.
+      if (new_value.type_info().IsNumber()) {
+        old_value.set_type_info(new_value.type_info());
+      }
     }
+
     // Ensure the new value is writable.
     frame_->Spill(new_value.reg());
 
-    // In order to combine the overflow and the smi tag check, we need
-    // to be able to allocate a byte register.  We attempt to do so
-    // without spilling.  If we fail, we will generate separate overflow
-    // and smi tag checks.
-    //
-    // We allocate and clear the temporary byte register before
-    // performing the count operation since clearing the register using
-    // xor will clear the overflow flag.
-    Result tmp = allocator_->AllocateByteRegisterWithoutSpilling();
-    if (tmp.is_valid()) {
-      __ Set(tmp.reg(), Immediate(0));
-    }
-
-    DeferredCode* deferred = NULL;
-    if (is_postfix) {
-      deferred = new DeferredPostfixCountOperation(new_value.reg(),
-                                                   old_value.reg(),
-                                                   is_increment);
+    Result tmp;
+    if (new_value.is_smi()) {
+      if (FLAG_debug_code) __ AbortIfNotSmi(new_value.reg());
     } else {
-      deferred = new DeferredPrefixCountOperation(new_value.reg(),
-                                                  is_increment);
+      // We don't know statically if the input is a smi.
+      // In order to combine the overflow and the smi tag check, we need
+      // to be able to allocate a byte register.  We attempt to do so
+      // without spilling.  If we fail, we will generate separate overflow
+      // and smi tag checks.
+      // We allocate and clear a temporary byte register before performing
+      // the count operation since clearing the register using xor will clear
+      // the overflow flag.
+      tmp = allocator_->AllocateByteRegisterWithoutSpilling();
+      if (tmp.is_valid()) {
+        __ Set(tmp.reg(), Immediate(0));
+      }
     }
 
     if (is_increment) {
@@ -6187,25 +7482,57 @@
       __ sub(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
     }
 
-    // If the count operation didn't overflow and the result is a valid
-    // smi, we're done. Otherwise, we jump to the deferred slow-case
-    // code.
-    if (tmp.is_valid()) {
+    DeferredCode* deferred = NULL;
+    if (is_postfix) {
+      deferred = new DeferredPostfixCountOperation(new_value.reg(),
+                                                   old_value.reg(),
+                                                   is_increment,
+                                                   new_value.type_info());
+    } else {
+      deferred = new DeferredPrefixCountOperation(new_value.reg(),
+                                                  is_increment,
+                                                  new_value.type_info());
+    }
+
+    if (new_value.is_smi()) {
+      // In case we have a smi as input just check for overflow.
+      deferred->Branch(overflow);
+    } else {
+      // If the count operation didn't overflow and the result is a valid
+      // smi, we're done. Otherwise, we jump to the deferred slow-case
+      // code.
       // We combine the overflow and the smi tag check if we could
       // successfully allocate a temporary byte register.
-      __ setcc(overflow, tmp.reg());
-      __ or_(Operand(tmp.reg()), new_value.reg());
-      __ test(tmp.reg(), Immediate(kSmiTagMask));
-      tmp.Unuse();
-      deferred->Branch(not_zero);
-    } else {
-      // Otherwise we test separately for overflow and smi tag.
-      deferred->Branch(overflow);
-      __ test(new_value.reg(), Immediate(kSmiTagMask));
-      deferred->Branch(not_zero);
+      if (tmp.is_valid()) {
+        __ setcc(overflow, tmp.reg());
+        __ or_(Operand(tmp.reg()), new_value.reg());
+        __ test(tmp.reg(), Immediate(kSmiTagMask));
+        tmp.Unuse();
+        deferred->Branch(not_zero);
+      } else {
+        // Otherwise we test separately for overflow and smi tag.
+        deferred->Branch(overflow);
+        __ test(new_value.reg(), Immediate(kSmiTagMask));
+        deferred->Branch(not_zero);
+      }
     }
     deferred->BindExit();
 
+    // Postfix count operations return their input converted to
+    // number. The case when the input is already a number is covered
+    // above in the allocation code for old_value.
+    if (is_postfix && !new_value.type_info().IsNumber()) {
+      old_value.set_type_info(TypeInfo::Number());
+    }
+
+    // The result of ++ or -- is an Integer32 if the
+    // input is a smi. Otherwise it is a number.
+    if (new_value.is_smi()) {
+      new_value.set_type_info(TypeInfo::Integer32());
+    } else {
+      new_value.set_type_info(TypeInfo::Number());
+    }
+
     // Postfix: store the old value in the allocated slot under the
     // reference.
     if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
@@ -6220,10 +7547,224 @@
 }
 
 
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
-  Comment cmnt(masm_, "[ BinaryOperation");
+void CodeGenerator::Int32BinaryOperation(BinaryOperation* node) {
   Token::Value op = node->op();
+  Comment cmnt(masm_, "[ Int32BinaryOperation");
+  ASSERT(in_safe_int32_mode());
+  ASSERT(safe_int32_mode_enabled());
+  ASSERT(FLAG_safe_int32_compiler);
 
+  if (op == Token::COMMA) {
+    // Discard left value.
+    frame_->Nip(1);
+    return;
+  }
+
+  Result right = frame_->Pop();
+  Result left = frame_->Pop();
+
+  ASSERT(right.is_untagged_int32());
+  ASSERT(left.is_untagged_int32());
+  // Registers containing an int32 value are not multiply used.
+  ASSERT(!left.is_register() || !frame_->is_used(left.reg()));
+  ASSERT(!right.is_register() || !frame_->is_used(right.reg()));
+
+  switch (op) {
+    case Token::COMMA:
+    case Token::OR:
+    case Token::AND:
+      UNREACHABLE();
+      break;
+    case Token::BIT_OR:
+    case Token::BIT_XOR:
+    case Token::BIT_AND:
+      if (left.is_constant() || right.is_constant()) {
+        int32_t value;  // Put constant in value, non-constant in left.
+        // Constants are known to be int32 values, from static analysis,
+        // or else will be converted to int32 by implicit ECMA [[ToInt32]].
+        if (left.is_constant()) {
+          ASSERT(left.handle()->IsSmi() || left.handle()->IsHeapNumber());
+          value = NumberToInt32(*left.handle());
+          left = right;
+        } else {
+          ASSERT(right.handle()->IsSmi() || right.handle()->IsHeapNumber());
+          value = NumberToInt32(*right.handle());
+        }
+
+        left.ToRegister();
+        if (op == Token::BIT_OR) {
+          __ or_(Operand(left.reg()), Immediate(value));
+        } else if (op == Token::BIT_XOR) {
+          __ xor_(Operand(left.reg()), Immediate(value));
+        } else {
+          ASSERT(op == Token::BIT_AND);
+          __ and_(Operand(left.reg()), Immediate(value));
+        }
+      } else {
+        ASSERT(left.is_register());
+        ASSERT(right.is_register());
+        if (op == Token::BIT_OR) {
+          __ or_(left.reg(), Operand(right.reg()));
+        } else if (op == Token::BIT_XOR) {
+          __ xor_(left.reg(), Operand(right.reg()));
+        } else {
+          ASSERT(op == Token::BIT_AND);
+          __ and_(left.reg(), Operand(right.reg()));
+        }
+      }
+      frame_->Push(&left);
+      right.Unuse();
+      break;
+    case Token::SAR:
+    case Token::SHL:
+    case Token::SHR: {
+      bool test_shr_overflow = false;
+      left.ToRegister();
+      if (right.is_constant()) {
+        ASSERT(right.handle()->IsSmi() || right.handle()->IsHeapNumber());
+        int shift_amount = NumberToInt32(*right.handle()) & 0x1F;
+        if (op == Token::SAR) {
+          __ sar(left.reg(), shift_amount);
+        } else if (op == Token::SHL) {
+          __ shl(left.reg(), shift_amount);
+        } else {
+          ASSERT(op == Token::SHR);
+          __ shr(left.reg(), shift_amount);
+          if (shift_amount == 0) test_shr_overflow = true;
+        }
+      } else {
+        // Move right to ecx
+        if (left.is_register() && left.reg().is(ecx)) {
+          right.ToRegister();
+          __ xchg(left.reg(), right.reg());
+          left = right;  // Left is unused here, copy of right unused by Push.
+        } else {
+          right.ToRegister(ecx);
+          left.ToRegister();
+        }
+        if (op == Token::SAR) {
+          __ sar_cl(left.reg());
+        } else if (op == Token::SHL) {
+          __ shl_cl(left.reg());
+        } else {
+          ASSERT(op == Token::SHR);
+          __ shr_cl(left.reg());
+          test_shr_overflow = true;
+        }
+      }
+      {
+        Register left_reg = left.reg();
+        frame_->Push(&left);
+        right.Unuse();
+        if (test_shr_overflow && !node->to_int32()) {
+          // Uint32 results with top bit set are not Int32 values.
+          // If they will be forced to Int32, skip the test.
+          // Test is needed because shr with shift amount 0 does not set flags.
+          __ test(left_reg, Operand(left_reg));
+          unsafe_bailout_->Branch(sign);
+        }
+      }
+      break;
+    }
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+      if ((left.is_constant() && op != Token::SUB) || right.is_constant()) {
+        int32_t value;  // Put constant in value, non-constant in left.
+        if (right.is_constant()) {
+          ASSERT(right.handle()->IsSmi() || right.handle()->IsHeapNumber());
+          value = NumberToInt32(*right.handle());
+        } else {
+          ASSERT(left.handle()->IsSmi() || left.handle()->IsHeapNumber());
+          value = NumberToInt32(*left.handle());
+          left = right;
+        }
+
+        left.ToRegister();
+        if (op == Token::ADD) {
+          __ add(Operand(left.reg()), Immediate(value));
+        } else if (op == Token::SUB) {
+          __ sub(Operand(left.reg()), Immediate(value));
+        } else {
+          ASSERT(op == Token::MUL);
+          __ imul(left.reg(), left.reg(), value);
+        }
+      } else {
+        left.ToRegister();
+        ASSERT(left.is_register());
+        ASSERT(right.is_register());
+        if (op == Token::ADD) {
+          __ add(left.reg(), Operand(right.reg()));
+        } else if (op == Token::SUB) {
+          __ sub(left.reg(), Operand(right.reg()));
+        } else {
+          ASSERT(op == Token::MUL);
+          // We have statically verified that a negative zero can be ignored.
+          __ imul(left.reg(), Operand(right.reg()));
+        }
+      }
+      right.Unuse();
+      frame_->Push(&left);
+      if (!node->to_int32()) {
+        // If ToInt32 is called on the result of ADD, SUB, or MUL, we don't
+        // care about overflows.
+        unsafe_bailout_->Branch(overflow);
+      }
+      break;
+    case Token::DIV:
+    case Token::MOD: {
+      if (right.is_register() && (right.reg().is(eax) || right.reg().is(edx))) {
+        if (left.is_register() && left.reg().is(edi)) {
+          right.ToRegister(ebx);
+        } else {
+          right.ToRegister(edi);
+        }
+      }
+      left.ToRegister(eax);
+      Result edx_reg = allocator_->Allocate(edx);
+      right.ToRegister();
+      // The results are unused here because BreakTarget::Branch cannot handle
+      // live results.
+      Register right_reg = right.reg();
+      left.Unuse();
+      right.Unuse();
+      edx_reg.Unuse();
+      __ cmp(right_reg, 0);
+      // Ensure divisor is positive: no chance of non-int32 or -0 result.
+      unsafe_bailout_->Branch(less_equal);
+      __ cdq();  // Sign-extend eax into edx:eax
+      __ idiv(right_reg);
+      if (op == Token::MOD) {
+        // Negative zero can arise as a negative divident with a zero result.
+        if (!node->no_negative_zero()) {
+          Label not_negative_zero;
+          __ test(edx, Operand(edx));
+          __ j(not_zero, &not_negative_zero);
+          __ test(eax, Operand(eax));
+          unsafe_bailout_->Branch(negative);
+          __ bind(&not_negative_zero);
+        }
+        Result edx_result(edx, TypeInfo::Integer32());
+        edx_result.set_untagged_int32(true);
+        frame_->Push(&edx_result);
+      } else {
+        ASSERT(op == Token::DIV);
+        __ test(edx, Operand(edx));
+        unsafe_bailout_->Branch(not_equal);
+        Result eax_result(eax, TypeInfo::Integer32());
+        eax_result.set_untagged_int32(true);
+        frame_->Push(&eax_result);
+      }
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
   // According to ECMA-262 section 11.11, page 58, the binary logical
   // operators must yield the result of one of the two expressions
   // before any ToBoolean() conversions. This means that the value
@@ -6233,7 +7774,8 @@
   // control flow), we force the right hand side to do the same. This
   // is necessary because we assume that if we get control flow on the
   // last path out of an expression we got it on all paths.
-  if (op == Token::AND) {
+  if (node->op() == Token::AND) {
+    ASSERT(!in_safe_int32_mode());
     JumpTarget is_true;
     ControlDestination dest(&is_true, destination()->false_target(), true);
     LoadCondition(node->left(), &dest, false);
@@ -6296,7 +7838,9 @@
       exit.Bind();
     }
 
-  } else if (op == Token::OR) {
+  } else {
+    ASSERT(node->op() == Token::OR);
+    ASSERT(!in_safe_int32_mode());
     JumpTarget is_false;
     ControlDestination dest(destination()->true_target(), &is_false, false);
     LoadCondition(node->left(), &dest, false);
@@ -6357,7 +7901,19 @@
       // Exit (always with a materialized value).
       exit.Bind();
     }
+  }
+}
 
+
+void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
+  Comment cmnt(masm_, "[ BinaryOperation");
+
+  if (node->op() == Token::AND || node->op() == Token::OR) {
+    GenerateLogicalBooleanOperation(node);
+  } else if (in_safe_int32_mode()) {
+    Visit(node->left());
+    Visit(node->right());
+    Int32BinaryOperation(node);
   } else {
     // NOTE: The code below assumes that the slow cases (calls to runtime)
     // never return a constant/immutable object.
@@ -6370,19 +7926,28 @@
       overwrite_mode = OVERWRITE_RIGHT;
     }
 
-    Load(node->left());
-    Load(node->right());
-    GenericBinaryOperation(node->op(), node->type(), overwrite_mode);
+    if (node->left()->IsTrivial()) {
+      Load(node->right());
+      Result right = frame_->Pop();
+      frame_->Push(node->left());
+      frame_->Push(&right);
+    } else {
+      Load(node->left());
+      Load(node->right());
+    }
+    GenericBinaryOperation(node, overwrite_mode);
   }
 }
 
 
 void CodeGenerator::VisitThisFunction(ThisFunction* node) {
+  ASSERT(!in_safe_int32_mode());
   frame_->PushFunction();
 }
 
 
 void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
+  ASSERT(!in_safe_int32_mode());
   Comment cmnt(masm_, "[ CompareOperation");
 
   bool left_already_loaded = false;
@@ -6571,8 +8136,20 @@
     default:
       UNREACHABLE();
   }
-  if (!left_already_loaded) Load(left);
-  Load(right);
+
+  if (left->IsTrivial()) {
+    if (!left_already_loaded) {
+      Load(right);
+      Result right_result = frame_->Pop();
+      frame_->Push(left);
+      frame_->Push(&right_result);
+    } else {
+      Load(right);
+    }
+  } else {
+    if (!left_already_loaded) Load(left);
+    Load(right);
+  }
   Comparison(node, cc, strict, destination());
 }
 
@@ -6702,8 +8279,12 @@
  public:
   DeferredReferenceSetKeyedValue(Register value,
                                  Register key,
-                                 Register receiver)
-      : value_(value), key_(key), receiver_(receiver) {
+                                 Register receiver,
+                                 Register scratch)
+      : value_(value),
+        key_(key),
+        receiver_(receiver),
+        scratch_(scratch) {
     set_comment("[ DeferredReferenceSetKeyedValue");
   }
 
@@ -6715,17 +8296,65 @@
   Register value_;
   Register key_;
   Register receiver_;
+  Register scratch_;
   Label patch_site_;
 };
 
 
 void DeferredReferenceSetKeyedValue::Generate() {
   __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
-  // Push receiver and key arguments on the stack.
-  __ push(receiver_);
-  __ push(key_);
-  // Move value argument to eax as expected by the IC stub.
-  if (!value_.is(eax)) __ mov(eax, value_);
+  // Move value_ to eax, key_ to ecx, and receiver_ to edx.
+  Register old_value = value_;
+
+  // First, move value to eax.
+  if (!value_.is(eax)) {
+    if (key_.is(eax)) {
+      // Move key_ out of eax, preferably to ecx.
+      if (!value_.is(ecx) && !receiver_.is(ecx)) {
+        __ mov(ecx, key_);
+        key_ = ecx;
+      } else {
+        __ mov(scratch_, key_);
+        key_ = scratch_;
+      }
+    }
+    if (receiver_.is(eax)) {
+      // Move receiver_ out of eax, preferably to edx.
+      if (!value_.is(edx) && !key_.is(edx)) {
+        __ mov(edx, receiver_);
+        receiver_ = edx;
+      } else {
+        // Both moves to scratch are from eax, also, no valid path hits both.
+        __ mov(scratch_, receiver_);
+        receiver_ = scratch_;
+      }
+    }
+    __ mov(eax, value_);
+    value_ = eax;
+  }
+
+  // Now value_ is in eax.  Move the other two to the right positions.
+  // We do not update the variables key_ and receiver_ to ecx and edx.
+  if (key_.is(ecx)) {
+    if (!receiver_.is(edx)) {
+      __ mov(edx, receiver_);
+    }
+  } else if (key_.is(edx)) {
+    if (receiver_.is(ecx)) {
+      __ xchg(edx, ecx);
+    } else {
+      __ mov(ecx, key_);
+      if (!receiver_.is(edx)) {
+        __ mov(edx, receiver_);
+      }
+    }
+  } else {  // Key is not in edx or ecx.
+    if (!receiver_.is(edx)) {
+      __ mov(edx, receiver_);
+    }
+    __ mov(ecx, key_);
+  }
+
   // Call the IC stub.
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
   __ call(ic, RelocInfo::CODE_TARGET);
@@ -6738,11 +8367,8 @@
   // Here we use masm_-> instead of the __ macro because this is the
   // instruction that gets patched and coverage code gets in the way.
   masm_->test(eax, Immediate(-delta_to_patch_site));
-  // Restore value (returned from store IC), key and receiver
-  // registers.
-  if (!value_.is(eax)) __ mov(value_, eax);
-  __ pop(key_);
-  __ pop(receiver_);
+  // Restore value (returned from store IC) register.
+  if (!old_value.is(eax)) __ mov(old_value, eax);
 }
 
 
@@ -6866,8 +8492,12 @@
     deferred->Branch(not_equal);
 
     // Check that the key is a smi.
-    __ test(key.reg(), Immediate(kSmiTagMask));
-    deferred->Branch(not_zero);
+    if (!key.is_smi()) {
+      __ test(key.reg(), Immediate(kSmiTagMask));
+      deferred->Branch(not_zero);
+    } else {
+      if (FLAG_debug_code) __ AbortIfNotSmi(key.reg());
+    }
 
     // Get the elements array from the receiver and check that it
     // is not a dictionary.
@@ -6878,7 +8508,7 @@
     deferred->Branch(not_equal);
 
     // Shift the key to get the actual index value and check that
-    // it is within bounds.
+    // it is within bounds. Use unsigned comparison to handle negative keys.
     __ mov(result.reg(), key.reg());
     __ SmiUntag(result.reg());
     __ cmp(result.reg(),
@@ -6928,6 +8558,8 @@
 
     Result tmp = allocator_->Allocate();
     ASSERT(tmp.is_valid());
+    Result tmp2 = allocator_->Allocate();
+    ASSERT(tmp2.is_valid());
 
     // Determine whether the value is a constant before putting it in a
     // register.
@@ -6941,41 +8573,47 @@
     DeferredReferenceSetKeyedValue* deferred =
         new DeferredReferenceSetKeyedValue(result.reg(),
                                            key.reg(),
-                                           receiver.reg());
-
-    // Check that the value is a smi if it is not a constant.  We can skip
-    // the write barrier for smis and constants.
-    if (!value_is_constant) {
-      __ test(result.reg(), Immediate(kSmiTagMask));
-      deferred->Branch(not_zero);
-    }
-
-    // Check that the key is a non-negative smi.
-    __ test(key.reg(), Immediate(kSmiTagMask | 0x80000000));
-    deferred->Branch(not_zero);
+                                           receiver.reg(),
+                                           tmp.reg());
 
     // Check that the receiver is not a smi.
     __ test(receiver.reg(), Immediate(kSmiTagMask));
     deferred->Branch(zero);
 
+    // Check that the key is a smi.
+    if (!key.is_smi()) {
+      __ test(key.reg(), Immediate(kSmiTagMask));
+      deferred->Branch(not_zero);
+    } else {
+      if (FLAG_debug_code) __ AbortIfNotSmi(key.reg());
+    }
+
     // Check that the receiver is a JSArray.
-    __ mov(tmp.reg(),
-           FieldOperand(receiver.reg(), HeapObject::kMapOffset));
-    __ movzx_b(tmp.reg(),
-               FieldOperand(tmp.reg(), Map::kInstanceTypeOffset));
-    __ cmp(tmp.reg(), JS_ARRAY_TYPE);
+    __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, tmp.reg());
     deferred->Branch(not_equal);
 
     // Check that the key is within bounds.  Both the key and the length of
-    // the JSArray are smis.
+    // the JSArray are smis. Use unsigned comparison to handle negative keys.
     __ cmp(key.reg(),
            FieldOperand(receiver.reg(), JSArray::kLengthOffset));
-    deferred->Branch(greater_equal);
+    deferred->Branch(above_equal);
 
     // Get the elements array from the receiver and check that it is not a
     // dictionary.
     __ mov(tmp.reg(),
-           FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+           FieldOperand(receiver.reg(), JSArray::kElementsOffset));
+
+    // Check whether it is possible to omit the write barrier. If the elements
+    // array is in new space or the value written is a smi we can safely update
+    // the elements array without updating the remembered set.
+    Label in_new_space;
+    __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
+    if (!value_is_constant) {
+      __ test(result.reg(), Immediate(kSmiTagMask));
+      deferred->Branch(not_zero);
+    }
+
+    __ bind(&in_new_space);
     // Bind the deferred code patch site to be able to locate the fixed
     // array map comparison.  When debugging, we patch this comparison to
     // always fail so that we will hit the IC call in the deferred code
@@ -7001,7 +8639,6 @@
     // indicate that we have generated an inline version of the
     // keyed store.
     __ nop();
-    frame()->Drop(2);
   }
   ASSERT(frame()->height() == original_height - 3);
   return result;
@@ -7012,6 +8649,40 @@
 #define __ ACCESS_MASM(masm)
 
 
+static void CheckTwoForSminess(MacroAssembler* masm,
+                               Register left, Register right, Register scratch,
+                               TypeInfo left_info, TypeInfo right_info,
+                               DeferredInlineBinaryOperation* deferred) {
+  if (left.is(right)) {
+    if (!left_info.IsSmi()) {
+      __ test(left, Immediate(kSmiTagMask));
+      deferred->Branch(not_zero);
+    } else {
+      if (FLAG_debug_code) __ AbortIfNotSmi(left);
+    }
+  } else if (!left_info.IsSmi()) {
+    if (!right_info.IsSmi()) {
+      __ mov(scratch, left);
+      __ or_(scratch, Operand(right));
+      __ test(scratch, Immediate(kSmiTagMask));
+      deferred->Branch(not_zero);
+    } else {
+      __ test(left, Immediate(kSmiTagMask));
+      deferred->Branch(not_zero);
+      if (FLAG_debug_code) __ AbortIfNotSmi(right);
+    }
+  } else {
+    if (FLAG_debug_code) __ AbortIfNotSmi(left);
+    if (!right_info.IsSmi()) {
+      __ test(right, Immediate(kSmiTagMask));
+      deferred->Branch(not_zero);
+    } else {
+      if (FLAG_debug_code) __ AbortIfNotSmi(right);
+    }
+  }
+}
+
+
 Handle<String> Reference::GetName() {
   ASSERT(type_ == NAMED);
   Property* property = expression_->AsProperty();
@@ -7143,6 +8814,7 @@
       Comment cmnt(masm, "[ Store to keyed Property");
       Property* property = expression()->AsProperty();
       ASSERT(property != NULL);
+
       Result answer = cgen_->EmitKeyedStore(property->key()->type());
       cgen_->frame()->Push(&answer);
       set_unloaded();
@@ -7157,12 +8829,12 @@
 
 
 void FastNewClosureStub::Generate(MacroAssembler* masm) {
-  // Clone the boilerplate in new space. Set the context to the
-  // current context in esi.
+  // Create a new closure from the given function info in new
+  // space. Set the context to the current context in esi.
   Label gc;
   __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
 
-  // Get the boilerplate function from the stack.
+  // Get the function info from the stack.
   __ mov(edx, Operand(esp, 1 * kPointerSize));
 
   // Compute the function map in the current global context and set that
@@ -7172,18 +8844,16 @@
   __ mov(ecx, Operand(ecx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
   __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
 
-  // Clone the rest of the boilerplate fields. We don't have to update
-  // the write barrier because the allocated object is in new space.
-  for (int offset = kPointerSize;
-       offset < JSFunction::kSize;
-       offset += kPointerSize) {
-    if (offset == JSFunction::kContextOffset) {
-      __ mov(FieldOperand(eax, offset), esi);
-    } else {
-      __ mov(ebx, FieldOperand(edx, offset));
-      __ mov(FieldOperand(eax, offset), ebx);
-    }
-  }
+  // Initialize the rest of the function. We don't have to update the
+  // write barrier because the allocated object is in new space.
+  __ mov(ebx, Immediate(Factory::empty_fixed_array()));
+  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx);
+  __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
+  __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset),
+         Immediate(Factory::the_hole_value()));
+  __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
+  __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
+  __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
 
   // Return and remove the on-stack parameter.
   __ ret(1 * kPointerSize);
@@ -7195,7 +8865,7 @@
   __ push(esi);
   __ push(edx);
   __ push(ecx);  // Restore return address.
-  __ TailCallRuntime(ExternalReference(Runtime::kNewClosure), 2, 1);
+  __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
 }
 
 
@@ -7239,7 +8909,7 @@
 
   // Need to collect. Call into runtime system.
   __ bind(&gc);
-  __ TailCallRuntime(ExternalReference(Runtime::kNewContext), 1, 1);
+  __ TailCallRuntime(Runtime::kNewContext, 1, 1);
 }
 
 
@@ -7294,8 +8964,7 @@
   __ ret(3 * kPointerSize);
 
   __ bind(&slow_case);
-  ExternalReference runtime(Runtime::kCreateArrayLiteralShallow);
-  __ TailCallRuntime(runtime, 3, 1);
+  __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
 }
 
 
@@ -7325,6 +8994,7 @@
   __ cmp(ecx, FIRST_NONSTRING_TYPE);
   __ j(above_equal, &not_string);
   __ mov(edx, FieldOperand(eax, String::kLengthOffset));
+  ASSERT(kSmiTag == 0);
   __ test(edx, Operand(edx));
   __ j(zero, &false_result);
   __ jmp(&true_result);
@@ -7512,6 +9182,26 @@
     __ mov(left, Operand(esp, 2 * kPointerSize));
   }
 
+  if (static_operands_type_.IsSmi()) {
+    if (FLAG_debug_code) {
+      __ AbortIfNotSmi(left);
+      __ AbortIfNotSmi(right);
+    }
+    if (op_ == Token::BIT_OR) {
+      __ or_(right, Operand(left));
+      GenerateReturn(masm);
+      return;
+    } else if (op_ == Token::BIT_AND) {
+      __ and_(right, Operand(left));
+      GenerateReturn(masm);
+      return;
+    } else if (op_ == Token::BIT_XOR) {
+      __ xor_(right, Operand(left));
+      GenerateReturn(masm);
+      return;
+    }
+  }
+
   // 2. Prepare the smi check of both operands by oring them together.
   Comment smi_check_comment(masm, "-- Smi check arguments");
   Label not_smis;
@@ -7820,146 +9510,181 @@
   // Generate fast case smi code if requested. This flag is set when the fast
   // case smi code is not generated by the caller. Generating it here will speed
   // up common operations.
-  if (HasSmiCodeInStub()) {
+  if (ShouldGenerateSmiCode()) {
     GenerateSmiCode(masm, &call_runtime);
   } else if (op_ != Token::MOD) {  // MOD goes straight to runtime.
-    GenerateLoadArguments(masm);
+    if (!HasArgsInRegisters()) {
+      GenerateLoadArguments(masm);
+    }
   }
 
   // Floating point case.
-  switch (op_) {
-    case Token::ADD:
-    case Token::SUB:
-    case Token::MUL:
-    case Token::DIV: {
-      if (CpuFeatures::IsSupported(SSE2)) {
-        CpuFeatures::Scope use_sse2(SSE2);
-        if (NumberInfo::IsNumber(operands_type_)) {
-          if (FLAG_debug_code) {
-            // Assert at runtime that inputs are only numbers.
-            __ AbortIfNotNumber(edx,
-                                "GenericBinaryOpStub operand not a number.");
-            __ AbortIfNotNumber(eax,
-                                "GenericBinaryOpStub operand not a number.");
-          }
-          FloatingPointHelper::LoadSSE2Operands(masm);
-        } else {
-          FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime);
+  if (ShouldGenerateFPCode()) {
+    switch (op_) {
+      case Token::ADD:
+      case Token::SUB:
+      case Token::MUL:
+      case Token::DIV: {
+        if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
+            HasSmiCodeInStub()) {
+          // Execution reaches this point when the first non-smi argument occurs
+          // (and only if smi code is generated). This is the right moment to
+          // patch to HEAP_NUMBERS state. The transition is attempted only for
+          // the four basic operations. The stub stays in the DEFAULT state
+          // forever for all other operations (also if smi code is skipped).
+          GenerateTypeTransition(masm);
         }
 
-        switch (op_) {
-          case Token::ADD: __ addsd(xmm0, xmm1); break;
-          case Token::SUB: __ subsd(xmm0, xmm1); break;
-          case Token::MUL: __ mulsd(xmm0, xmm1); break;
-          case Token::DIV: __ divsd(xmm0, xmm1); break;
-          default: UNREACHABLE();
-        }
-        GenerateHeapResultAllocation(masm, &call_runtime);
-        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
-        GenerateReturn(masm);
-      } else {  // SSE2 not available, use FPU.
-        if (NumberInfo::IsNumber(operands_type_)) {
-          if (FLAG_debug_code) {
-            // Assert at runtime that inputs are only numbers.
-            __ AbortIfNotNumber(edx,
-                                "GenericBinaryOpStub operand not a number.");
-            __ AbortIfNotNumber(eax,
-                                "GenericBinaryOpStub operand not a number.");
-          }
-        } else {
-          FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
-        }
-        FloatingPointHelper::LoadFloatOperands(
-            masm,
-            ecx,
-            FloatingPointHelper::ARGS_IN_REGISTERS);
-        switch (op_) {
-          case Token::ADD: __ faddp(1); break;
-          case Token::SUB: __ fsubp(1); break;
-          case Token::MUL: __ fmulp(1); break;
-          case Token::DIV: __ fdivp(1); break;
-          default: UNREACHABLE();
-        }
-        Label after_alloc_failure;
-        GenerateHeapResultAllocation(masm, &after_alloc_failure);
-        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
-        GenerateReturn(masm);
-        __ bind(&after_alloc_failure);
-        __ ffree();
-        __ jmp(&call_runtime);
-      }
-    }
-    case Token::MOD: {
-      // For MOD we go directly to runtime in the non-smi case.
-      break;
-    }
-    case Token::BIT_OR:
-    case Token::BIT_AND:
-    case Token::BIT_XOR:
-    case Token::SAR:
-    case Token::SHL:
-    case Token::SHR: {
-      Label non_smi_result;
-      FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
-      switch (op_) {
-        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
-        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
-        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
-        case Token::SAR: __ sar_cl(eax); break;
-        case Token::SHL: __ shl_cl(eax); break;
-        case Token::SHR: __ shr_cl(eax); break;
-        default: UNREACHABLE();
-      }
-      if (op_ == Token::SHR) {
-        // Check if result is non-negative and fits in a smi.
-        __ test(eax, Immediate(0xc0000000));
-        __ j(not_zero, &call_runtime);
-      } else {
-        // Check if result fits in a smi.
-        __ cmp(eax, 0xc0000000);
-        __ j(negative, &non_smi_result);
-      }
-      // Tag smi result and return.
-      __ SmiTag(eax);
-      GenerateReturn(masm);
-
-      // All ops except SHR return a signed int32 that we load in a HeapNumber.
-      if (op_ != Token::SHR) {
-        __ bind(&non_smi_result);
-        // Allocate a heap number if needed.
-        __ mov(ebx, Operand(eax));  // ebx: result
-        Label skip_allocation;
-        switch (mode_) {
-          case OVERWRITE_LEFT:
-          case OVERWRITE_RIGHT:
-            // If the operand was an object, we skip the
-            // allocation of a heap number.
-            __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
-                                1 * kPointerSize : 2 * kPointerSize));
-            __ test(eax, Immediate(kSmiTagMask));
-            __ j(not_zero, &skip_allocation, not_taken);
-            // Fall through!
-          case NO_OVERWRITE:
-            __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
-            __ bind(&skip_allocation);
-            break;
-          default: UNREACHABLE();
-        }
-        // Store the result in the HeapNumber and return.
+        Label not_floats;
         if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatures::Scope use_sse2(SSE2);
-          __ cvtsi2sd(xmm0, Operand(ebx));
+          if (static_operands_type_.IsNumber()) {
+            if (FLAG_debug_code) {
+              // Assert at runtime that inputs are only numbers.
+              __ AbortIfNotNumber(edx);
+              __ AbortIfNotNumber(eax);
+            }
+            if (static_operands_type_.IsSmi()) {
+              if (FLAG_debug_code) {
+                __ AbortIfNotSmi(edx);
+                __ AbortIfNotSmi(eax);
+              }
+              FloatingPointHelper::LoadSSE2Smis(masm, ecx);
+            } else {
+              FloatingPointHelper::LoadSSE2Operands(masm);
+            }
+          } else {
+            FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime);
+          }
+
+          switch (op_) {
+            case Token::ADD: __ addsd(xmm0, xmm1); break;
+            case Token::SUB: __ subsd(xmm0, xmm1); break;
+            case Token::MUL: __ mulsd(xmm0, xmm1); break;
+            case Token::DIV: __ divsd(xmm0, xmm1); break;
+            default: UNREACHABLE();
+          }
+          GenerateHeapResultAllocation(masm, &call_runtime);
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
-        } else {
-          __ mov(Operand(esp, 1 * kPointerSize), ebx);
-          __ fild_s(Operand(esp, 1 * kPointerSize));
+          GenerateReturn(masm);
+        } else {  // SSE2 not available, use FPU.
+          if (static_operands_type_.IsNumber()) {
+            if (FLAG_debug_code) {
+              // Assert at runtime that inputs are only numbers.
+              __ AbortIfNotNumber(edx);
+              __ AbortIfNotNumber(eax);
+            }
+          } else {
+            FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
+          }
+          FloatingPointHelper::LoadFloatOperands(
+              masm,
+              ecx,
+              FloatingPointHelper::ARGS_IN_REGISTERS);
+          switch (op_) {
+            case Token::ADD: __ faddp(1); break;
+            case Token::SUB: __ fsubp(1); break;
+            case Token::MUL: __ fmulp(1); break;
+            case Token::DIV: __ fdivp(1); break;
+            default: UNREACHABLE();
+          }
+          Label after_alloc_failure;
+          GenerateHeapResultAllocation(masm, &after_alloc_failure);
           __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+          GenerateReturn(masm);
+          __ bind(&after_alloc_failure);
+          __ ffree();
+          __ jmp(&call_runtime);
         }
-        GenerateReturn(masm);
+        __ bind(&not_floats);
+        if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
+            !HasSmiCodeInStub()) {
+          // Execution reaches this point when the first non-number argument
+          // occurs (and only if smi code is skipped from the stub, otherwise
+          // the patching has already been done earlier in this case branch).
+          // Try patching to STRINGS for ADD operation.
+          if (op_ == Token::ADD) {
+            GenerateTypeTransition(masm);
+          }
+        }
+        break;
       }
-      break;
+      case Token::MOD: {
+        // For MOD we go directly to runtime in the non-smi case.
+        break;
+      }
+      case Token::BIT_OR:
+      case Token::BIT_AND:
+      case Token::BIT_XOR:
+      case Token::SAR:
+      case Token::SHL:
+      case Token::SHR: {
+        Label non_smi_result;
+        FloatingPointHelper::LoadAsIntegers(masm,
+                                            static_operands_type_,
+                                            use_sse3_,
+                                            &call_runtime);
+        switch (op_) {
+          case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
+          case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
+          case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+          case Token::SAR: __ sar_cl(eax); break;
+          case Token::SHL: __ shl_cl(eax); break;
+          case Token::SHR: __ shr_cl(eax); break;
+          default: UNREACHABLE();
+        }
+        if (op_ == Token::SHR) {
+          // Check if result is non-negative and fits in a smi.
+          __ test(eax, Immediate(0xc0000000));
+          __ j(not_zero, &call_runtime);
+        } else {
+          // Check if result fits in a smi.
+          __ cmp(eax, 0xc0000000);
+          __ j(negative, &non_smi_result);
+        }
+        // Tag smi result and return.
+        __ SmiTag(eax);
+        GenerateReturn(masm);
+
+        // All ops except SHR return a signed int32 that we load in
+        // a HeapNumber.
+        if (op_ != Token::SHR) {
+          __ bind(&non_smi_result);
+          // Allocate a heap number if needed.
+          __ mov(ebx, Operand(eax));  // ebx: result
+          Label skip_allocation;
+          switch (mode_) {
+            case OVERWRITE_LEFT:
+            case OVERWRITE_RIGHT:
+              // If the operand was an object, we skip the
+              // allocation of a heap number.
+              __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
+                                  1 * kPointerSize : 2 * kPointerSize));
+              __ test(eax, Immediate(kSmiTagMask));
+              __ j(not_zero, &skip_allocation, not_taken);
+              // Fall through!
+            case NO_OVERWRITE:
+              __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
+              __ bind(&skip_allocation);
+              break;
+            default: UNREACHABLE();
+          }
+          // Store the result in the HeapNumber and return.
+          if (CpuFeatures::IsSupported(SSE2)) {
+            CpuFeatures::Scope use_sse2(SSE2);
+            __ cvtsi2sd(xmm0, Operand(ebx));
+            __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+          } else {
+            __ mov(Operand(esp, 1 * kPointerSize), ebx);
+            __ fild_s(Operand(esp, 1 * kPointerSize));
+            __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+          }
+          GenerateReturn(masm);
+        }
+        break;
+      }
+      default: UNREACHABLE(); break;
     }
-    default: UNREACHABLE(); break;
   }
 
   // If all else fails, use the runtime system to get the correct
@@ -7967,30 +9692,40 @@
   // stack in the correct order below the return address.
   __ bind(&call_runtime);
   if (HasArgsInRegisters()) {
-    __ pop(ecx);
-    if (HasArgsReversed()) {
-      __ push(eax);
-      __ push(edx);
-    } else {
-      __ push(edx);
-      __ push(eax);
-    }
-    __ push(ecx);
+    GenerateRegisterArgsPush(masm);
   }
+
   switch (op_) {
     case Token::ADD: {
       // Test for string arguments before calling runtime.
       Label not_strings, not_string1, string1, string1_smi2;
-      Result answer;
-      __ test(edx, Immediate(kSmiTagMask));
+
+      // If this stub has already generated FP-specific code then the arguments
+      // are already in edx, eax
+      if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
+        GenerateLoadArguments(masm);
+      }
+
+      // Registers containing left and right operands respectively.
+      Register lhs, rhs;
+      if (HasArgsReversed()) {
+        lhs = eax;
+        rhs = edx;
+      } else {
+        lhs = edx;
+        rhs = eax;
+      }
+
+      // Test if first argument is a string.
+      __ test(lhs, Immediate(kSmiTagMask));
       __ j(zero, &not_string1);
-      __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ecx);
+      __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
       __ j(above_equal, &not_string1);
 
       // First argument is a string, test second.
-      __ test(eax, Immediate(kSmiTagMask));
+      __ test(rhs, Immediate(kSmiTagMask));
       __ j(zero, &string1_smi2);
-      __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
+      __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
       __ j(above_equal, &string1);
 
       // First and second argument are strings. Jump to the string add stub.
@@ -8001,36 +9736,26 @@
       // First argument is a string, second is a smi. Try to lookup the number
       // string for the smi in the number string cache.
       NumberToStringStub::GenerateLookupNumberStringCache(
-          masm, eax, edi, ebx, ecx, true, &string1);
+          masm, rhs, edi, ebx, ecx, true, &string1);
 
-      // Call the string add stub to make the result.
-      __ EnterInternalFrame();
-      __ push(edx);  // Original first argument.
-      __ push(edi);  // Number to string result for second argument.
-      __ CallStub(&string_add_stub);
-      __ LeaveInternalFrame();
-      __ ret(2 * kPointerSize);
+      // Replace second argument on stack and tailcall string add stub to make
+      // the result.
+      __ mov(Operand(esp, 1 * kPointerSize), edi);
+      __ TailCallStub(&string_add_stub);
 
+      // Only first argument is a string.
       __ bind(&string1);
-      __ InvokeBuiltin(
-          HasArgsReversed() ?
-              Builtins::STRING_ADD_RIGHT :
-              Builtins::STRING_ADD_LEFT,
-          JUMP_FUNCTION);
+      __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
 
       // First argument was not a string, test second.
       __ bind(&not_string1);
-      __ test(eax, Immediate(kSmiTagMask));
+      __ test(rhs, Immediate(kSmiTagMask));
       __ j(zero, &not_strings);
-      __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
+      __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
       __ j(above_equal, &not_strings);
 
       // Only second argument is a string.
-      __ InvokeBuiltin(
-          HasArgsReversed() ?
-              Builtins::STRING_ADD_LEFT :
-              Builtins::STRING_ADD_RIGHT,
-          JUMP_FUNCTION);
+      __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
 
       __ bind(&not_strings);
       // Neither argument is a string.
@@ -8123,10 +9848,9 @@
 
 void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
   // If arguments are not passed in registers read them from the stack.
-  if (!HasArgsInRegisters()) {
-    __ mov(eax, Operand(esp, 1 * kPointerSize));
-    __ mov(edx, Operand(esp, 2 * kPointerSize));
-  }
+  ASSERT(!HasArgsInRegisters());
+  __ mov(eax, Operand(esp, 1 * kPointerSize));
+  __ mov(edx, Operand(esp, 2 * kPointerSize));
 }
 
 
@@ -8141,6 +9865,75 @@
 }
 
 
+void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+  ASSERT(HasArgsInRegisters());
+  __ pop(ecx);
+  if (HasArgsReversed()) {
+    __ push(eax);
+    __ push(edx);
+  } else {
+    __ push(edx);
+    __ push(eax);
+  }
+  __ push(ecx);
+}
+
+
+void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+  Label get_result;
+
+  // Keep a copy of operands on the stack and make sure they are also in
+  // edx, eax.
+  if (HasArgsInRegisters()) {
+    GenerateRegisterArgsPush(masm);
+  } else {
+    GenerateLoadArguments(masm);
+  }
+
+  // Internal frame is necessary to handle exceptions properly.
+  __ EnterInternalFrame();
+
+  // Push arguments on stack if the stub expects them there.
+  if (!HasArgsInRegisters()) {
+    __ push(edx);
+    __ push(eax);
+  }
+  // Call the stub proper to get the result in eax.
+  __ call(&get_result);
+  __ LeaveInternalFrame();
+
+  __ pop(ecx);  // Return address.
+  // Left and right arguments are now on top.
+  // Push the operation result. The tail call to BinaryOp_Patch will
+  // return it to the original caller.
+  __ push(eax);
+  // Push this stub's key. Although the operation and the type info are
+  // encoded into the key, the encoding is opaque, so push them too.
+  __ push(Immediate(Smi::FromInt(MinorKey())));
+  __ push(Immediate(Smi::FromInt(op_)));
+  __ push(Immediate(Smi::FromInt(runtime_operands_type_)));
+
+  __ push(ecx);  // Return address.
+
+  // Patch the caller to an appropriate specialized stub
+  // and return the operation result.
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
+      6,
+      1);
+
+  // The entry point for the result calculation is assumed to be immediately
+  // after this sequence.
+  __ bind(&get_result);
+}
+
+
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
+  GenericBinaryOpStub stub(key, type_info);
+  return stub.GetCode();
+}
+
+
 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
   // Input on stack:
   // esp[4]: argument (should be number).
@@ -8205,8 +9998,7 @@
   __ j(zero, &runtime_call_clear_stack);
 #ifdef DEBUG
   // Check that the layout of cache elements match expectations.
-  {  // NOLINT - doesn't like a single brace on a line.
-    TranscendentalCache::Element test_elem[2];
+  { TranscendentalCache::Element test_elem[2];
     char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
     char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
     char* elem_in0  = reinterpret_cast<char*>(&(test_elem[0].in[0]));
@@ -8247,7 +10039,7 @@
   __ bind(&runtime_call_clear_stack);
   __ fstp(0);
   __ bind(&runtime_call);
-  __ TailCallRuntime(ExternalReference(RuntimeFunction()), 1, 1);
+  __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
 }
 
 
@@ -8353,24 +10145,29 @@
 // trashed registers.
 void IntegerConvert(MacroAssembler* masm,
                     Register source,
+                    TypeInfo type_info,
                     bool use_sse3,
                     Label* conversion_failure) {
   ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
   Label done, right_exponent, normal_exponent;
   Register scratch = ebx;
   Register scratch2 = edi;
-  // Get exponent word.
-  __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
-  // Get exponent alone in scratch2.
-  __ mov(scratch2, scratch);
-  __ and_(scratch2, HeapNumber::kExponentMask);
+  if (!type_info.IsInteger32() || !use_sse3) {
+    // Get exponent word.
+    __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
+    // Get exponent alone in scratch2.
+    __ mov(scratch2, scratch);
+    __ and_(scratch2, HeapNumber::kExponentMask);
+  }
   if (use_sse3) {
     CpuFeatures::Scope scope(SSE3);
-    // Check whether the exponent is too big for a 64 bit signed integer.
-    static const uint32_t kTooBigExponent =
-        (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
-    __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
-    __ j(greater_equal, conversion_failure);
+    if (!type_info.IsInteger32()) {
+      // Check whether the exponent is too big for a 64 bit signed integer.
+      static const uint32_t kTooBigExponent =
+          (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+      __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
+      __ j(greater_equal, conversion_failure);
+    }
     // Load x87 register with heap number.
     __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
     // Reserve space for 64 bit answer.
@@ -8484,16 +10281,70 @@
 
 // Input: edx, eax are the left and right objects of a bit op.
 // Output: eax, ecx are left and right integers for a bit op.
-void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
-                                         bool use_sse3,
-                                         Label* conversion_failure) {
+void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm,
+                                                TypeInfo type_info,
+                                                bool use_sse3,
+                                                Label* conversion_failure) {
   // Check float operands.
   Label arg1_is_object, check_undefined_arg1;
   Label arg2_is_object, check_undefined_arg2;
   Label load_arg2, done;
 
+  if (!type_info.IsDouble()) {
+    if (!type_info.IsSmi()) {
+      __ test(edx, Immediate(kSmiTagMask));
+      __ j(not_zero, &arg1_is_object);
+    } else {
+      if (FLAG_debug_code) __ AbortIfNotSmi(edx);
+    }
+    __ SmiUntag(edx);
+    __ jmp(&load_arg2);
+  }
+
+  __ bind(&arg1_is_object);
+
+  // Get the untagged integer version of the edx heap number in ecx.
+  IntegerConvert(masm, edx, type_info, use_sse3, conversion_failure);
+  __ mov(edx, ecx);
+
+  // Here edx has the untagged integer, eax has a Smi or a heap number.
+  __ bind(&load_arg2);
+  if (!type_info.IsDouble()) {
+    // Test if arg2 is a Smi.
+    if (!type_info.IsSmi()) {
+      __ test(eax, Immediate(kSmiTagMask));
+      __ j(not_zero, &arg2_is_object);
+    } else {
+      if (FLAG_debug_code) __ AbortIfNotSmi(eax);
+    }
+    __ SmiUntag(eax);
+    __ mov(ecx, eax);
+    __ jmp(&done);
+  }
+
+  __ bind(&arg2_is_object);
+
+  // Get the untagged integer version of the eax heap number in ecx.
+  IntegerConvert(masm, eax, type_info, use_sse3, conversion_failure);
+  __ bind(&done);
+  __ mov(eax, edx);
+}
+
+
+// Input: edx, eax are the left and right objects of a bit op.
+// Output: eax, ecx are left and right integers for a bit op.
+void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
+                                                 bool use_sse3,
+                                                 Label* conversion_failure) {
+  // Check float operands.
+  Label arg1_is_object, check_undefined_arg1;
+  Label arg2_is_object, check_undefined_arg2;
+  Label load_arg2, done;
+
+  // Test if arg1 is a Smi.
   __ test(edx, Immediate(kSmiTagMask));
   __ j(not_zero, &arg1_is_object);
+
   __ SmiUntag(edx);
   __ jmp(&load_arg2);
 
@@ -8508,15 +10359,22 @@
   __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
   __ cmp(ebx, Factory::heap_number_map());
   __ j(not_equal, &check_undefined_arg1);
+
   // Get the untagged integer version of the edx heap number in ecx.
-  IntegerConvert(masm, edx, use_sse3, conversion_failure);
+  IntegerConvert(masm,
+                 edx,
+                 TypeInfo::Unknown(),
+                 use_sse3,
+                 conversion_failure);
   __ mov(edx, ecx);
 
   // Here edx has the untagged integer, eax has a Smi or a heap number.
   __ bind(&load_arg2);
+
   // Test if arg2 is a Smi.
   __ test(eax, Immediate(kSmiTagMask));
   __ j(not_zero, &arg2_is_object);
+
   __ SmiUntag(eax);
   __ mov(ecx, eax);
   __ jmp(&done);
@@ -8532,13 +10390,30 @@
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
   __ cmp(ebx, Factory::heap_number_map());
   __ j(not_equal, &check_undefined_arg2);
+
   // Get the untagged integer version of the eax heap number in ecx.
-  IntegerConvert(masm, eax, use_sse3, conversion_failure);
+  IntegerConvert(masm,
+                 eax,
+                 TypeInfo::Unknown(),
+                 use_sse3,
+                 conversion_failure);
   __ bind(&done);
   __ mov(eax, edx);
 }
 
 
+void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
+                                         TypeInfo type_info,
+                                         bool use_sse3,
+                                         Label* conversion_failure) {
+  if (type_info.IsNumber()) {
+    LoadNumbersAsIntegers(masm, type_info, use_sse3, conversion_failure);
+  } else {
+    LoadUnknownsAsIntegers(masm, use_sse3, conversion_failure);
+  }
+}
+
+
 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
                                            Register number) {
   Label load_smi, done;
@@ -8775,7 +10650,11 @@
     __ j(not_equal, &slow, not_taken);
 
     // Convert the heap number in eax to an untagged integer in ecx.
-    IntegerConvert(masm, eax, CpuFeatures::IsSupported(SSE3), &slow);
+    IntegerConvert(masm,
+                   eax,
+                   TypeInfo::Unknown(),
+                   CpuFeatures::IsSupported(SSE3),
+                   &slow);
 
     // Do the bitwise operation and check if the result fits in a smi.
     Label try_float;
@@ -8833,30 +10712,6 @@
 }
 
 
-void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
-  // Check if the calling frame is an arguments adaptor frame.
-  __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
-  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
-  // Arguments adaptor case: Read the arguments length from the
-  // adaptor frame and return it.
-  // Otherwise nothing to do: The number of formal parameters has already been
-  // passed in register eax by calling function. Just return it.
-  if (CpuFeatures::IsSupported(CMOV)) {
-    CpuFeatures::Scope use_cmov(CMOV);
-    __ cmov(equal, eax,
-            Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  } else {
-    Label exit;
-    __ j(not_equal, &exit);
-    __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-    __ bind(&exit);
-  }
-  __ ret(0);
-}
-
-
 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
   // The key is in edx and the parameter count is in eax.
 
@@ -8911,7 +10766,7 @@
   __ pop(ebx);  // Return address.
   __ push(edx);
   __ push(ebx);
-  __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1, 1);
+  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
 }
 
 
@@ -9012,7 +10867,7 @@
 
   // Do the runtime call to allocate the arguments object.
   __ bind(&runtime);
-  __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
+  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
 }
 
 
@@ -9020,11 +10875,11 @@
   // Just jump directly to runtime if native RegExp is not selected at compile
   // time or if regexp entry in generated code is turned off runtime switch or
   // at compilation.
-#ifndef V8_NATIVE_REGEXP
-  __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
-#else  // V8_NATIVE_REGEXP
+#ifdef V8_INTERPRETED_REGEXP
+  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#else  // V8_INTERPRETED_REGEXP
   if (!FLAG_regexp_entry_native) {
-    __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
+    __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
     return;
   }
 
@@ -9096,16 +10951,16 @@
   // Get the length of the string to ebx.
   __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
 
-  // ebx: Length of subject string
+  // ebx: Length of subject string as a smi
   // ecx: RegExp data (FixedArray)
   // edx: Number of capture registers
-  // Check that the third argument is a positive smi.
   // Check that the third argument is a positive smi less than the subject
-  // string length. A negative value will be greater (usigned comparison).
+  // string length. A negative value will be greater (unsigned comparison).
   __ mov(eax, Operand(esp, kPreviousIndexOffset));
-  __ SmiUntag(eax);
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, &runtime);
   __ cmp(eax, Operand(ebx));
-  __ j(above, &runtime);
+  __ j(above_equal, &runtime);
 
   // ecx: RegExp data (FixedArray)
   // edx: Number of capture registers
@@ -9148,9 +11003,8 @@
   // string. In that case the subject string is just the first part of the cons
   // string. Also in this case the first part of the cons string is known to be
   // a sequential string or an external string.
-  __ mov(edx, ebx);
-  __ and_(edx, kStringRepresentationMask);
-  __ cmp(edx, kConsStringTag);
+  __ and_(ebx, kStringRepresentationMask);
+  __ cmp(ebx, kConsStringTag);
   __ j(not_equal, &runtime);
   __ mov(edx, FieldOperand(eax, ConsString::kSecondOffset));
   __ cmp(Operand(edx), Factory::empty_string());
@@ -9169,7 +11023,8 @@
   // ecx: RegExp data (FixedArray)
   // Check that the irregexp code has been generated for an ascii string. If
   // it has, the field contains a code object otherwise it contains the hole.
-  __ cmp(ebx, kStringTag | kSeqStringTag | kTwoByteStringTag);
+  const int kSeqTwoByteString = kStringTag | kSeqStringTag | kTwoByteStringTag;
+  __ cmp(ebx, kSeqTwoByteString);
   __ j(equal, &seq_two_byte_string);
   if (FLAG_debug_code) {
     __ cmp(ebx, kStringTag | kSeqStringTag | kAsciiStringTag);
@@ -9207,48 +11062,52 @@
   // All checks done. Now push arguments for native regexp code.
   __ IncrementCounter(&Counters::regexp_entry_native, 1);
 
+  static const int kRegExpExecuteArguments = 7;
+  __ PrepareCallCFunction(kRegExpExecuteArguments, ecx);
+
   // Argument 7: Indicate that this is a direct call from JavaScript.
-  __ push(Immediate(1));
+  __ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
 
   // Argument 6: Start (high end) of backtracking stack memory area.
   __ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address));
   __ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
-  __ push(ecx);
+  __ mov(Operand(esp, 5 * kPointerSize), ecx);
 
   // Argument 5: static offsets vector buffer.
-  __ push(Immediate(ExternalReference::address_of_static_offsets_vector()));
+  __ mov(Operand(esp, 4 * kPointerSize),
+         Immediate(ExternalReference::address_of_static_offsets_vector()));
 
   // Argument 4: End of string data
   // Argument 3: Start of string data
-  Label push_two_byte, push_rest;
+  Label setup_two_byte, setup_rest;
   __ test(edi, Operand(edi));
   __ mov(edi, FieldOperand(eax, String::kLengthOffset));
-  __ j(zero, &push_two_byte);
+  __ j(zero, &setup_two_byte);
+  __ SmiUntag(edi);
   __ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize));
-  __ push(ecx);  // Argument 4.
+  __ mov(Operand(esp, 3 * kPointerSize), ecx);  // Argument 4.
   __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
-  __ push(ecx);  // Argument 3.
-  __ jmp(&push_rest);
+  __ mov(Operand(esp, 2 * kPointerSize), ecx);  // Argument 3.
+  __ jmp(&setup_rest);
 
-  __ bind(&push_two_byte);
-  __ lea(ecx, FieldOperand(eax, edi, times_2, SeqTwoByteString::kHeaderSize));
-  __ push(ecx);  // Argument 4.
+  __ bind(&setup_two_byte);
+  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);  // edi is smi (powered by 2).
+  __ lea(ecx, FieldOperand(eax, edi, times_1, SeqTwoByteString::kHeaderSize));
+  __ mov(Operand(esp, 3 * kPointerSize), ecx);  // Argument 4.
   __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
-  __ push(ecx);  // Argument 3.
+  __ mov(Operand(esp, 2 * kPointerSize), ecx);  // Argument 3.
 
-  __ bind(&push_rest);
+  __ bind(&setup_rest);
 
   // Argument 2: Previous index.
-  __ push(ebx);
+  __ mov(Operand(esp, 1 * kPointerSize), ebx);
 
   // Argument 1: Subject string.
-  __ push(eax);
+  __ mov(Operand(esp, 0 * kPointerSize), eax);
 
   // Locate the code entry and call it.
   __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
-  __ call(Operand(edx));
-  // Remove arguments.
-  __ add(Operand(esp), Immediate(7 * kPointerSize));
+  __ CallCFunction(edx, kRegExpExecuteArguments);
 
   // Check the result.
   Label success;
@@ -9263,7 +11122,7 @@
   // Result must now be exception. If there is no pending exception already a
   // stack overflow (on the backtrack stack) was detected in RegExp code but
   // haven't created the exception yet. Handle that in the runtime system.
-  // TODO(592) Rerunning the RegExp to get the stack overflow exception.
+  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
   ExternalReference pending_exception(Top::k_pending_exception_address);
   __ mov(eax,
          Operand::StaticVariable(ExternalReference::the_hole_value_location()));
@@ -9314,7 +11173,6 @@
   // ecx: offsets vector
   // edx: number of capture registers
   Label next_capture, done;
-  __ mov(eax, Operand(esp, kPreviousIndexOffset));
   // Capture register counter starts from number of capture registers and
   // counts down until wraping after zero.
   __ bind(&next_capture);
@@ -9322,15 +11180,7 @@
   __ j(negative, &done);
   // Read the value from the static offsets vector buffer.
   __ mov(edi, Operand(ecx, edx, times_int_size, 0));
-  // Perform explicit shift
-  ASSERT_EQ(0, kSmiTag);
-  __ shl(edi, kSmiTagSize);
-  // Add previous index (from its stack slot) if value is not negative.
-  Label capture_negative;
-  // Carry flag set by shift above.
-  __ j(negative, &capture_negative, not_taken);
-  __ add(edi, Operand(eax));  // Add previous index (adding smi to smi).
-  __ bind(&capture_negative);
+  __ SmiTag(edi);
   // Store the smi value in the last match info.
   __ mov(FieldOperand(ebx,
                       edx,
@@ -9346,8 +11196,8 @@
 
   // Do the runtime call to execute the regexp.
   __ bind(&runtime);
-  __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
-#endif  // V8_NATIVE_REGEXP
+  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#endif  // V8_INTERPRETED_REGEXP
 }
 
 
@@ -9358,14 +11208,6 @@
                                                          Register scratch2,
                                                          bool object_is_smi,
                                                          Label* not_found) {
-  // Currently only lookup for smis. Check for smi if object is not known to be
-  // a smi.
-  if (!object_is_smi) {
-    ASSERT(kSmiTag == 0);
-    __ test(object, Immediate(kSmiTagMask));
-    __ j(not_zero, not_found);
-  }
-
   // Use of registers. Register result is used as a temporary.
   Register number_string_cache = result;
   Register mask = scratch1;
@@ -9381,23 +11223,74 @@
   __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
   __ shr(mask, 1);  // Divide length by two (length is not a smi).
   __ sub(Operand(mask), Immediate(1));  // Make mask.
+
   // Calculate the entry in the number string cache. The hash value in the
-  // number string cache for smis is just the smi value.
-  __ mov(scratch, object);
-  __ SmiUntag(scratch);
+  // number string cache for smis is just the smi value, and the hash for
+  // doubles is the xor of the upper and lower words. See
+  // Heap::GetNumberStringCache.
+  Label smi_hash_calculated;
+  Label load_result_from_cache;
+  if (object_is_smi) {
+    __ mov(scratch, object);
+    __ SmiUntag(scratch);
+  } else {
+    Label not_smi, hash_calculated;
+    ASSERT(kSmiTag == 0);
+    __ test(object, Immediate(kSmiTagMask));
+    __ j(not_zero, &not_smi);
+    __ mov(scratch, object);
+    __ SmiUntag(scratch);
+    __ jmp(&smi_hash_calculated);
+    __ bind(&not_smi);
+    __ cmp(FieldOperand(object, HeapObject::kMapOffset),
+           Factory::heap_number_map());
+    __ j(not_equal, not_found);
+    ASSERT_EQ(8, kDoubleSize);
+    __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+    __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
+    // Object is heap number and hash is now in scratch. Calculate cache index.
+    __ and_(scratch, Operand(mask));
+    Register index = scratch;
+    Register probe = mask;
+    __ mov(probe,
+           FieldOperand(number_string_cache,
+                        index,
+                        times_twice_pointer_size,
+                        FixedArray::kHeaderSize));
+    __ test(probe, Immediate(kSmiTagMask));
+    __ j(zero, not_found);
+    if (CpuFeatures::IsSupported(SSE2)) {
+      CpuFeatures::Scope fscope(SSE2);
+      __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+      __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
+      __ comisd(xmm0, xmm1);
+    } else {
+      __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
+      __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
+      __ FCmp();
+    }
+    __ j(parity_even, not_found);  // Bail out if NaN is involved.
+    __ j(not_equal, not_found);  // The cache did not contain this value.
+    __ jmp(&load_result_from_cache);
+  }
+
+  __ bind(&smi_hash_calculated);
+  // Object is smi and hash is now in scratch. Calculate cache index.
   __ and_(scratch, Operand(mask));
+  Register index = scratch;
   // Check if the entry is the smi we are looking for.
   __ cmp(object,
          FieldOperand(number_string_cache,
-                      scratch,
+                      index,
                       times_twice_pointer_size,
                       FixedArray::kHeaderSize));
   __ j(not_equal, not_found);
 
   // Get the result from the cache.
+  __ bind(&load_result_from_cache);
   __ mov(result,
          FieldOperand(number_string_cache,
-                      scratch,
+                      index,
                       times_twice_pointer_size,
                       FixedArray::kHeaderSize + kPointerSize));
   __ IncrementCounter(&Counters::number_to_string_native, 1);
@@ -9415,7 +11308,21 @@
 
   __ bind(&runtime);
   // Handle number to string in the runtime system if not found in the cache.
-  __ TailCallRuntime(ExternalReference(Runtime::kNumberToString), 1, 1);
+  __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
+}
+
+
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+  masm->RecordWriteHelper(object_, addr_, scratch_);
+  masm->ret(0);
+}
+
+
+static int NegativeComparisonResult(Condition cc) {
+  ASSERT(cc != equal);
+  ASSERT((cc == less) || (cc == less_equal)
+      || (cc == greater) || (cc == greater_equal));
+  return (cc == greater || cc == greater_equal) ? LESS : GREATER;
 }
 
 
@@ -9425,56 +11332,80 @@
   // NOTICE! This code is only reached after a smi-fast-case check, so
   // it is certain that at least one operand isn't a smi.
 
-  if (cc_ == equal) {  // Both strict and non-strict.
-    Label slow;  // Fallthrough label.
-    // Equality is almost reflexive (everything but NaN), so start by testing
-    // for "identity and not NaN".
-    {
-      Label not_identical;
-      __ cmp(eax, Operand(edx));
-      __ j(not_equal, &not_identical);
-      // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
-      // so we do the second best thing - test it ourselves.
+  // Identical objects can be compared fast, but there are some tricky cases
+  // for NaN and undefined.
+  {
+    Label not_identical;
+    __ cmp(eax, Operand(edx));
+    __ j(not_equal, &not_identical);
 
-      if (never_nan_nan_) {
-        __ Set(eax, Immediate(0));
-        __ ret(0);
-      } else {
-        Label return_equal;
-        Label heap_number;
-        // If it's not a heap number, then return equal.
-        __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
-               Immediate(Factory::heap_number_map()));
-        __ j(equal, &heap_number);
-        __ bind(&return_equal);
-        __ Set(eax, Immediate(0));
-        __ ret(0);
+    if (cc_ != equal) {
+      // Check for undefined.  undefined OP undefined is false even though
+      // undefined == undefined.
+      Label check_for_nan;
+      __ cmp(edx, Factory::undefined_value());
+      __ j(not_equal, &check_for_nan);
+      __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
+      __ ret(0);
+      __ bind(&check_for_nan);
+    }
 
-        __ bind(&heap_number);
-        // It is a heap number, so return non-equal if it's NaN and equal if
-        // it's not NaN.
-        // The representation of NaN values has all exponent bits (52..62) set,
-        // and not all mantissa bits (0..51) clear.
-        // We only accept QNaNs, which have bit 51 set.
-        // Read top bits of double representation (second word of value).
+    // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+    // so we do the second best thing - test it ourselves.
+    // Note: if cc_ != equal, never_nan_nan_ is not used.
+    if (never_nan_nan_ && (cc_ == equal)) {
+      __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+      __ ret(0);
+    } else {
+      Label return_equal;
+      Label heap_number;
+      // If it's not a heap number, then return equal.
+      __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+             Immediate(Factory::heap_number_map()));
+      __ j(equal, &heap_number);
+      __ bind(&return_equal);
+      __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+      __ ret(0);
 
-        // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
-        // all bits in the mask are set. We only need to check the word
-        // that contains the exponent and high bit of the mantissa.
-        ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
-        __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
-        __ xor_(eax, Operand(eax));
-        // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
-        // bits.
-        __ add(edx, Operand(edx));
-        __ cmp(edx, kQuietNaNHighBitsMask << 1);
+      __ bind(&heap_number);
+      // It is a heap number, so return non-equal if it's NaN and equal if
+      // it's not NaN.
+      // The representation of NaN values has all exponent bits (52..62) set,
+      // and not all mantissa bits (0..51) clear.
+      // We only accept QNaNs, which have bit 51 set.
+      // Read top bits of double representation (second word of value).
+
+      // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
+      // all bits in the mask are set. We only need to check the word
+      // that contains the exponent and high bit of the mantissa.
+      ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
+      __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
+      __ xor_(eax, Operand(eax));
+      // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
+      // bits.
+      __ add(edx, Operand(edx));
+      __ cmp(edx, kQuietNaNHighBitsMask << 1);
+      if (cc_ == equal) {
+        ASSERT_NE(1, EQUAL);
         __ setcc(above_equal, eax);
         __ ret(0);
+      } else {
+        Label nan;
+        __ j(above_equal, &nan);
+        __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+        __ ret(0);
+        __ bind(&nan);
+        __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
+        __ ret(0);
       }
-
-      __ bind(&not_identical);
     }
 
+    __ bind(&not_identical);
+  }
+
+  if (cc_ == equal) {  // Both strict and non-strict.
+    Label slow;  // Fallthrough label.
+
     // If we're doing a strict equality comparison, we don't have to do
     // type conversion, so we generate code to do fast comparison for objects
     // and oddballs. Non-smi numbers and strings still go through the usual
@@ -9560,63 +11491,70 @@
   __ push(edx);
   __ push(ecx);
 
-  // Inlined floating point compare.
-  // Call builtin if operands are not floating point or smi.
-  Label check_for_symbols;
-  Label unordered;
-  if (CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope use_sse2(SSE2);
-    CpuFeatures::Scope use_cmov(CMOV);
+  // Generate the number comparison code.
+  if (include_number_compare_) {
+    Label non_number_comparison;
+    Label unordered;
+    if (CpuFeatures::IsSupported(SSE2)) {
+      CpuFeatures::Scope use_sse2(SSE2);
+      CpuFeatures::Scope use_cmov(CMOV);
 
-    FloatingPointHelper::LoadSSE2Operands(masm, &check_for_symbols);
-    __ comisd(xmm0, xmm1);
+      FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
+      __ comisd(xmm0, xmm1);
 
-    // Jump to builtin for NaN.
-    __ j(parity_even, &unordered, not_taken);
-    __ mov(eax, 0);  // equal
-    __ mov(ecx, Immediate(Smi::FromInt(1)));
-    __ cmov(above, eax, Operand(ecx));
-    __ mov(ecx, Immediate(Smi::FromInt(-1)));
-    __ cmov(below, eax, Operand(ecx));
-    __ ret(2 * kPointerSize);
-  } else {
-    FloatingPointHelper::CheckFloatOperands(masm, &check_for_symbols, ebx);
-    FloatingPointHelper::LoadFloatOperands(masm, ecx);
-    __ FCmp();
+      // Don't base result on EFLAGS when a NaN is involved.
+      __ j(parity_even, &unordered, not_taken);
+      // Return a result of -1, 0, or 1, based on EFLAGS.
+      __ mov(eax, 0);  // equal
+      __ mov(ecx, Immediate(Smi::FromInt(1)));
+      __ cmov(above, eax, Operand(ecx));
+      __ mov(ecx, Immediate(Smi::FromInt(-1)));
+      __ cmov(below, eax, Operand(ecx));
+      __ ret(2 * kPointerSize);
+    } else {
+      FloatingPointHelper::CheckFloatOperands(
+          masm, &non_number_comparison, ebx);
+      FloatingPointHelper::LoadFloatOperands(masm, ecx);
+      __ FCmp();
 
-    // Jump to builtin for NaN.
-    __ j(parity_even, &unordered, not_taken);
+      // Don't base result on EFLAGS when a NaN is involved.
+      __ j(parity_even, &unordered, not_taken);
 
-    Label below_lbl, above_lbl;
-    // Return a result of -1, 0, or 1, to indicate result of comparison.
-    __ j(below, &below_lbl, not_taken);
-    __ j(above, &above_lbl, not_taken);
+      Label below_label, above_label;
+      // Return a result of -1, 0, or 1, based on EFLAGS. In all cases remove
+      // two arguments from the stack as they have been pushed in preparation
+      // of a possible runtime call.
+      __ j(below, &below_label, not_taken);
+      __ j(above, &above_label, not_taken);
 
-    __ xor_(eax, Operand(eax));  // equal
-    // Both arguments were pushed in case a runtime call was needed.
-    __ ret(2 * kPointerSize);
+      __ xor_(eax, Operand(eax));
+      __ ret(2 * kPointerSize);
 
-    __ bind(&below_lbl);
-    __ mov(eax, Immediate(Smi::FromInt(-1)));
-    __ ret(2 * kPointerSize);
+      __ bind(&below_label);
+      __ mov(eax, Immediate(Smi::FromInt(-1)));
+      __ ret(2 * kPointerSize);
 
-    __ bind(&above_lbl);
-    __ mov(eax, Immediate(Smi::FromInt(1)));
+      __ bind(&above_label);
+      __ mov(eax, Immediate(Smi::FromInt(1)));
+      __ ret(2 * kPointerSize);
+    }
+
+    // If one of the numbers was NaN, then the result is always false.
+    // The cc is never not-equal.
+    __ bind(&unordered);
+    ASSERT(cc_ != not_equal);
+    if (cc_ == less || cc_ == less_equal) {
+      __ mov(eax, Immediate(Smi::FromInt(1)));
+    } else {
+      __ mov(eax, Immediate(Smi::FromInt(-1)));
+    }
     __ ret(2 * kPointerSize);  // eax, edx were pushed
+
+    // The number comparison code did not provide a valid result.
+    __ bind(&non_number_comparison);
   }
-  // If one of the numbers was NaN, then the result is always false.
-  // The cc is never not-equal.
-  __ bind(&unordered);
-  ASSERT(cc_ != not_equal);
-  if (cc_ == less || cc_ == less_equal) {
-    __ mov(eax, Immediate(Smi::FromInt(1)));
-  } else {
-    __ mov(eax, Immediate(Smi::FromInt(-1)));
-  }
-  __ ret(2 * kPointerSize);  // eax, edx were pushed
 
   // Fast negative check for symbol-to-symbol equality.
-  __ bind(&check_for_symbols);
   Label check_for_strings;
   if (cc_ == equal) {
     BranchIfNonSymbol(masm, &check_for_strings, eax, ecx);
@@ -9657,14 +11595,7 @@
     builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
   } else {
     builtin = Builtins::COMPARE;
-    int ncr;  // NaN compare result
-    if (cc_ == less || cc_ == less_equal) {
-      ncr = GREATER;
-    } else {
-      ASSERT(cc_ == greater || cc_ == greater_equal);  // remaining cases
-      ncr = LESS;
-    }
-    __ push(Immediate(Smi::FromInt(ncr)));
+    __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
   }
 
   // Restore return address on the stack.
@@ -9700,7 +11631,7 @@
   __ push(eax);
 
   // Do tail-call to runtime routine.
-  __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1, 1);
+  __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
 }
 
 
@@ -9797,7 +11728,7 @@
 // If true, a Handle<T> passed by value is passed and returned by
 // using the location_ field directly.  If false, it is passed and
 // returned as a pointer to a handle.
-#ifdef USING_MAC_ABI
+#ifdef USING_BSD_ABI
 static const bool kPassHandlesDirectly = true;
 #else
 static const bool kPassHandlesDirectly = false;
@@ -9861,9 +11792,7 @@
   __ LeaveExitFrame(ExitFrame::MODE_NORMAL);
   __ ret(0);
   __ bind(&promote_scheduled_exception);
-  __ TailCallRuntime(ExternalReference(Runtime::kPromoteScheduledException),
-                     0,
-                     1);
+  __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
 }
 
 
@@ -9872,7 +11801,8 @@
                               Label* throw_termination_exception,
                               Label* throw_out_of_memory_exception,
                               bool do_gc,
-                              bool always_allocate_scope) {
+                              bool always_allocate_scope,
+                              int /* alignment_skew */) {
   // eax: result parameter for PerformGC, if any
   // ebx: pointer to C function  (C callee-saved)
   // ebp: frame pointer  (restored after C call)
@@ -9882,7 +11812,17 @@
 
   // Result returned in eax, or eax+edx if result_size_ is 2.
 
+  // Check stack alignment.
+  if (FLAG_debug_code) {
+    __ CheckStackAlignment();
+  }
+
   if (do_gc) {
+    // Pass failure code returned from last attempt as first argument to
+    // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
+    // stack alignment is known to be correct. This function takes one argument
+    // which is passed on the stack, and we know that the stack has been
+    // prepared to pass at least one argument.
     __ mov(Operand(esp, 0 * kPointerSize), eax);  // Result.
     __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
   }
@@ -10228,55 +12168,208 @@
 }
 
 
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-const char* CompareStub::GetName() {
-  switch (cc_) {
-    case less: return "CompareStub_LT";
-    case greater: return "CompareStub_GT";
-    case less_equal: return "CompareStub_LE";
-    case greater_equal: return "CompareStub_GE";
-    case not_equal: {
-      if (strict_) {
-        if (never_nan_nan_) {
-          return "CompareStub_NE_STRICT_NO_NAN";
-        } else {
-          return "CompareStub_NE_STRICT";
-        }
-      } else {
-        if (never_nan_nan_) {
-          return "CompareStub_NE_NO_NAN";
-        } else {
-          return "CompareStub_NE";
-        }
-      }
-    }
-    case equal: {
-      if (strict_) {
-        if (never_nan_nan_) {
-          return "CompareStub_EQ_STRICT_NO_NAN";
-        } else {
-          return "CompareStub_EQ_STRICT";
-        }
-      } else {
-        if (never_nan_nan_) {
-          return "CompareStub_EQ_NO_NAN";
-        } else {
-          return "CompareStub_EQ";
-        }
-      }
-    }
-    default: return "CompareStub";
-  }
+int CompareStub::MinorKey() {
+  // Encode the three parameters in a unique 16 bit value. To avoid duplicate
+  // stubs the never NaN NaN condition is only taken into account if the
+  // condition is equals.
+  ASSERT(static_cast<unsigned>(cc_) < (1 << 13));
+  return ConditionField::encode(static_cast<unsigned>(cc_))
+         | StrictField::encode(strict_)
+         | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
+         | IncludeNumberCompareField::encode(include_number_compare_);
 }
 
 
-int CompareStub::MinorKey() {
-  // Encode the three parameters in a unique 16 bit value.
-  ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
-  int nnn_value = (never_nan_nan_ ? 2 : 0);
-  if (cc_ != equal) nnn_value = 0;  // Avoid duplicate stubs.
-  return (static_cast<unsigned>(cc_) << 2) | nnn_value | (strict_ ? 1 : 0);
+// Unfortunately you have to run without snapshots to see most of these
+// names in the profile since most compare stubs end up in the snapshot.
+const char* CompareStub::GetName() {
+  if (name_ != NULL) return name_;
+  const int kMaxNameLength = 100;
+  name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+  if (name_ == NULL) return "OOM";
+
+  const char* cc_name;
+  switch (cc_) {
+    case less: cc_name = "LT"; break;
+    case greater: cc_name = "GT"; break;
+    case less_equal: cc_name = "LE"; break;
+    case greater_equal: cc_name = "GE"; break;
+    case equal: cc_name = "EQ"; break;
+    case not_equal: cc_name = "NE"; break;
+    default: cc_name = "UnknownCondition"; break;
+  }
+
+  const char* strict_name = "";
+  if (strict_ && (cc_ == equal || cc_ == not_equal)) {
+    strict_name = "_STRICT";
+  }
+
+  const char* never_nan_nan_name = "";
+  if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
+    never_nan_nan_name = "_NO_NAN";
+  }
+
+  const char* include_number_compare_name = "";
+  if (!include_number_compare_) {
+    include_number_compare_name = "_NO_NUMBER";
+  }
+
+  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+               "CompareStub_%s%s%s%s",
+               cc_name,
+               strict_name,
+               never_nan_nan_name,
+               include_number_compare_name);
+  return name_;
+}
+
+
+void StringHelper::GenerateFastCharCodeAt(MacroAssembler* masm,
+                                          Register object,
+                                          Register index,
+                                          Register scratch,
+                                          Register result,
+                                          Label* receiver_not_string,
+                                          Label* index_not_smi,
+                                          Label* index_out_of_range,
+                                          Label* slow_case) {
+  Label not_a_flat_string;
+  Label try_again_with_new_string;
+  Label ascii_string;
+  Label got_char_code;
+
+  // If the receiver is a smi trigger the non-string case.
+  ASSERT(kSmiTag == 0);
+  __ test(object, Immediate(kSmiTagMask));
+  __ j(zero, receiver_not_string);
+
+  // Fetch the instance type of the receiver into result register.
+  __ mov(result, FieldOperand(object, HeapObject::kMapOffset));
+  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
+  // If the receiver is not a string trigger the non-string case.
+  __ test(result, Immediate(kIsNotStringMask));
+  __ j(not_zero, receiver_not_string);
+
+  // If the index is non-smi trigger the non-smi case.
+  ASSERT(kSmiTag == 0);
+  __ test(index, Immediate(kSmiTagMask));
+  __ j(not_zero, index_not_smi);
+
+  // Check for index out of range.
+  __ cmp(index, FieldOperand(object, String::kLengthOffset));
+  __ j(above_equal, index_out_of_range);
+
+  __ bind(&try_again_with_new_string);
+  // ----------- S t a t e -------------
+  //  -- object  : string to access
+  //  -- result  : instance type of the string
+  //  -- scratch : non-negative index < length
+  // -----------------------------------
+
+  // We need special handling for non-flat strings.
+  ASSERT(kSeqStringTag == 0);
+  __ test(result, Immediate(kStringRepresentationMask));
+  __ j(not_zero, &not_a_flat_string);
+
+  // Check for 1-byte or 2-byte string.
+  ASSERT(kAsciiStringTag != 0);
+  __ test(result, Immediate(kStringEncodingMask));
+  __ j(not_zero, &ascii_string);
+
+  // 2-byte string.
+  // Load the 2-byte character code into the result register.
+  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);  // index is smi (powered by 2).
+  __ movzx_w(result, FieldOperand(object,
+                                  index, times_1,
+                                  SeqTwoByteString::kHeaderSize));
+  __ jmp(&got_char_code);
+
+  // Handle non-flat strings.
+  __ bind(&not_a_flat_string);
+  __ and_(result, kStringRepresentationMask);
+  __ cmp(result, kConsStringTag);
+  __ j(not_equal, slow_case);
+
+  // ConsString.
+  // Check whether the right hand side is the empty string (i.e. if
+  // this is really a flat string in a cons string). If that is not
+  // the case we would rather go to the runtime system now to flatten
+  // the string.
+  __ mov(result, FieldOperand(object, ConsString::kSecondOffset));
+  __ cmp(Operand(result), Factory::empty_string());
+  __ j(not_equal, slow_case);
+  // Get the first of the two strings and load its instance type.
+  __ mov(object, FieldOperand(object, ConsString::kFirstOffset));
+  __ mov(result, FieldOperand(object, HeapObject::kMapOffset));
+  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
+  __ jmp(&try_again_with_new_string);
+
+  // ASCII string.
+  __ bind(&ascii_string);
+    // Put untagged index into scratch register.
+  __ mov(scratch, index);
+  __ SmiUntag(scratch);
+
+  // Load the byte into the result register.
+  __ movzx_b(result, FieldOperand(object,
+                                  scratch, times_1,
+                                  SeqAsciiString::kHeaderSize));
+  __ bind(&got_char_code);
+  __ SmiTag(result);
+}
+
+
+void StringHelper::GenerateCharFromCode(MacroAssembler* masm,
+                                        Register code,
+                                        Register result,
+                                        InvokeFlag flag) {
+  ASSERT(!code.is(result));
+
+  Label slow_case;
+  Label exit;
+
+  // Fast case of Heap::LookupSingleCharacterStringFromCode.
+  ASSERT(kSmiTag == 0);
+  ASSERT(kSmiShiftSize == 0);
+  ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+  __ test(code,
+          Immediate(kSmiTagMask |
+                    ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+  __ j(not_zero, &slow_case, not_taken);
+
+  __ Set(result, Immediate(Factory::single_character_string_cache()));
+  ASSERT(kSmiTag == 0);
+  ASSERT(kSmiTagSize == 1);
+  ASSERT(kSmiShiftSize == 0);
+  // At this point code register contains smi tagged ascii char code.
+  __ mov(result, FieldOperand(result,
+                              code, times_half_pointer_size,
+                              FixedArray::kHeaderSize));
+  __ cmp(result, Factory::undefined_value());
+  __ j(equal, &slow_case, not_taken);
+  __ jmp(&exit);
+
+  __ bind(&slow_case);
+  if (flag == CALL_FUNCTION) {
+    __ push(code);
+    __ CallRuntime(Runtime::kCharFromCode, 1);
+    if (!result.is(eax)) {
+      __ mov(result, eax);
+    }
+  } else {
+    ASSERT(flag == JUMP_FUNCTION);
+    ASSERT(result.is(eax));
+    __ pop(eax);  // Save return address.
+    __ push(code);
+    __ push(eax);  // Restore return address.
+    __ TailCallRuntime(Runtime::kCharFromCode, 1, 1);
+  }
+
+  __ bind(&exit);
+  if (flag == JUMP_FUNCTION) {
+    ASSERT(result.is(eax));
+    __ ret(0);
+  }
 }
 
 
@@ -10307,6 +12400,7 @@
   // Check if either of the strings are empty. In that case return the other.
   Label second_not_zero_length, both_not_zero_length;
   __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
+  ASSERT(kSmiTag == 0);
   __ test(ecx, Operand(ecx));
   __ j(not_zero, &second_not_zero_length);
   // Second string is empty, result is first string which is already in eax.
@@ -10314,6 +12408,7 @@
   __ ret(2 * kPointerSize);
   __ bind(&second_not_zero_length);
   __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
+  ASSERT(kSmiTag == 0);
   __ test(ebx, Operand(ebx));
   __ j(not_zero, &both_not_zero_length);
   // First string is empty, result is second string which is in edx.
@@ -10323,16 +12418,19 @@
 
   // Both strings are non-empty.
   // eax: first string
-  // ebx: length of first string
-  // ecx: length of second string
+  // ebx: length of first string as a smi
+  // ecx: length of second string as a smi
   // edx: second string
   // Look at the length of the result of adding the two strings.
   Label string_add_flat_result, longer_than_two;
   __ bind(&both_not_zero_length);
   __ add(ebx, Operand(ecx));
+  ASSERT(Smi::kMaxValue == String::kMaxLength);
+  // Handle exceptionally long strings in the runtime system.
+  __ j(overflow, &string_add_runtime);
   // Use the runtime system when adding two one character strings, as it
   // contains optimizations for this specific case using the symbol table.
-  __ cmp(ebx, 2);
+  __ cmp(Operand(ebx), Immediate(Smi::FromInt(2)));
   __ j(not_equal, &longer_than_two);
 
   // Check that both strings are non-external ascii strings.
@@ -10346,22 +12444,19 @@
   // Try to lookup two character string in symbol table. If it is not found
   // just allocate a new one.
   Label make_two_character_string, make_flat_ascii_string;
-  GenerateTwoCharacterSymbolTableProbe(masm, ebx, ecx, eax, edx, edi,
-                                       &make_two_character_string);
+  StringHelper::GenerateTwoCharacterSymbolTableProbe(
+      masm, ebx, ecx, eax, edx, edi, &make_two_character_string);
+  __ IncrementCounter(&Counters::string_add_native, 1);
   __ ret(2 * kPointerSize);
 
   __ bind(&make_two_character_string);
-  __ Set(ebx, Immediate(2));
+  __ Set(ebx, Immediate(Smi::FromInt(2)));
   __ jmp(&make_flat_ascii_string);
 
   __ bind(&longer_than_two);
   // Check if resulting string will be flat.
-  __ cmp(ebx, String::kMinNonFlatLength);
+  __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength)));
   __ j(below, &string_add_flat_result);
-  // Handle exceptionally long strings in the runtime system.
-  ASSERT((String::kMaxLength & 0x80000000) == 0);
-  __ cmp(ebx, String::kMaxLength);
-  __ j(above, &string_add_runtime);
 
   // If result is not supposed to be flat allocate a cons string object. If both
   // strings are ascii the result is an ascii cons string.
@@ -10378,6 +12473,7 @@
   __ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime);
   __ bind(&allocated);
   // Fill the fields of the cons string.
+  if (FLAG_debug_code) __ AbortIfNotSmi(ebx);
   __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
   __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
          Immediate(String::kEmptyHashField));
@@ -10394,7 +12490,7 @@
   // Handle creating a flat result. First check that both strings are not
   // external strings.
   // eax: first string
-  // ebx: length of resulting flat string
+  // ebx: length of resulting flat string as a smi
   // edx: second string
   __ bind(&string_add_flat_result);
   __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
@@ -10409,7 +12505,7 @@
   __ j(equal, &string_add_runtime);
   // Now check if both strings are ascii strings.
   // eax: first string
-  // ebx: length of resulting flat string
+  // ebx: length of resulting flat string as a smi
   // edx: second string
   Label non_ascii_string_add_flat_result;
   __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
@@ -10424,7 +12520,8 @@
 
   __ bind(&make_flat_ascii_string);
   // Both strings are ascii strings. As they are short they are both flat.
-  // ebx: length of resulting flat string
+  // ebx: length of resulting flat string as a smi
+  __ SmiUntag(ebx);
   __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime);
   // eax: result string
   __ mov(ecx, eax);
@@ -10433,27 +12530,29 @@
   // Load first argument and locate first character.
   __ mov(edx, Operand(esp, 2 * kPointerSize));
   __ mov(edi, FieldOperand(edx, String::kLengthOffset));
+  __ SmiUntag(edi);
   __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   // eax: result string
   // ecx: first character of result
   // edx: first char of first argument
   // edi: length of first argument
-  GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
+  StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
   // Load second argument and locate first character.
   __ mov(edx, Operand(esp, 1 * kPointerSize));
   __ mov(edi, FieldOperand(edx, String::kLengthOffset));
+  __ SmiUntag(edi);
   __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   // eax: result string
   // ecx: next character of result
   // edx: first char of second argument
   // edi: length of second argument
-  GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
+  StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
   __ IncrementCounter(&Counters::string_add_native, 1);
   __ ret(2 * kPointerSize);
 
   // Handle creating a flat two byte result.
   // eax: first string - known to be two byte
-  // ebx: length of resulting flat string
+  // ebx: length of resulting flat string as a smi
   // edx: second string
   __ bind(&non_ascii_string_add_flat_result);
   __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
@@ -10462,6 +12561,7 @@
   __ j(not_zero, &string_add_runtime);
   // Both strings are two byte strings. As they are short they are both
   // flat.
+  __ SmiUntag(ebx);
   __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &string_add_runtime);
   // eax: result string
   __ mov(ecx, eax);
@@ -10471,37 +12571,39 @@
   // Load first argument and locate first character.
   __ mov(edx, Operand(esp, 2 * kPointerSize));
   __ mov(edi, FieldOperand(edx, String::kLengthOffset));
+  __ SmiUntag(edi);
   __ add(Operand(edx),
          Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   // eax: result string
   // ecx: first character of result
   // edx: first char of first argument
   // edi: length of first argument
-  GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
+  StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
   // Load second argument and locate first character.
   __ mov(edx, Operand(esp, 1 * kPointerSize));
   __ mov(edi, FieldOperand(edx, String::kLengthOffset));
+  __ SmiUntag(edi);
   __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   // eax: result string
   // ecx: next character of result
   // edx: first char of second argument
   // edi: length of second argument
-  GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
+  StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
   __ IncrementCounter(&Counters::string_add_native, 1);
   __ ret(2 * kPointerSize);
 
   // Just jump to runtime to add the two strings.
   __ bind(&string_add_runtime);
-  __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
+  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
 }
 
 
-void StringStubBase::GenerateCopyCharacters(MacroAssembler* masm,
-                                            Register dest,
-                                            Register src,
-                                            Register count,
-                                            Register scratch,
-                                            bool ascii) {
+void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
+                                          Register dest,
+                                          Register src,
+                                          Register count,
+                                          Register scratch,
+                                          bool ascii) {
   Label loop;
   __ bind(&loop);
   // This loop just copies one character at a time, as it is only used for very
@@ -10522,12 +12624,12 @@
 }
 
 
-void StringStubBase::GenerateCopyCharactersREP(MacroAssembler* masm,
-                                               Register dest,
-                                               Register src,
-                                               Register count,
-                                               Register scratch,
-                                               bool ascii) {
+void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
+                                             Register dest,
+                                             Register src,
+                                             Register count,
+                                             Register scratch,
+                                             bool ascii) {
   // Copy characters using rep movs of doublewords. Align destination on 4 byte
   // boundary before starting rep movs. Copy remaining characters after running
   // rep movs.
@@ -10556,6 +12658,7 @@
   // Copy from edi to esi using rep movs instruction.
   __ mov(scratch, count);
   __ sar(count, 2);  // Number of doublewords to copy.
+  __ cld();
   __ rep_movs();
 
   // Find number of bytes left.
@@ -10581,13 +12684,13 @@
 }
 
 
-void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
-                                                          Register c1,
-                                                          Register c2,
-                                                          Register scratch1,
-                                                          Register scratch2,
-                                                          Register scratch3,
-                                                          Label* not_found) {
+void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+                                                        Register c1,
+                                                        Register c2,
+                                                        Register scratch1,
+                                                        Register scratch2,
+                                                        Register scratch3,
+                                                        Label* not_found) {
   // Register scratch3 is the general scratch register in this function.
   Register scratch = scratch3;
 
@@ -10627,10 +12730,7 @@
 
   // Calculate capacity mask from the symbol table capacity.
   Register mask = scratch2;
-  static const int kCapacityOffset =
-      FixedArray::kHeaderSize +
-      SymbolTable::kCapacityIndex * kPointerSize;
-  __ mov(mask, FieldOperand(symbol_table, kCapacityOffset));
+  __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
   __ SmiUntag(mask);
   __ sub(Operand(mask), Immediate(1));
 
@@ -10655,23 +12755,20 @@
 
     // Load the entry from the symble table.
     Register candidate = scratch;  // Scratch register contains candidate.
-    ASSERT_EQ(1, SymbolTableShape::kEntrySize);
-    static const int kFirstElementOffset =
-        FixedArray::kHeaderSize +
-        SymbolTable::kPrefixStartIndex * kPointerSize +
-        SymbolTableShape::kPrefixSize * kPointerSize;
+    ASSERT_EQ(1, SymbolTable::kEntrySize);
     __ mov(candidate,
            FieldOperand(symbol_table,
                         scratch,
                         times_pointer_size,
-                        kFirstElementOffset));
+                        SymbolTable::kElementsStartOffset));
 
     // If entry is undefined no string with this hash can be found.
     __ cmp(candidate, Factory::undefined_value());
     __ j(equal, not_found);
 
     // If length is not 2 the string is not a candidate.
-    __ cmp(FieldOperand(candidate, String::kLengthOffset), Immediate(2));
+    __ cmp(FieldOperand(candidate, String::kLengthOffset),
+           Immediate(Smi::FromInt(2)));
     __ j(not_equal, &next_probe[i]);
 
     // As we are out of registers save the mask on the stack and use that
@@ -10708,10 +12805,10 @@
 }
 
 
-void StringStubBase::GenerateHashInit(MacroAssembler* masm,
-                                      Register hash,
-                                      Register character,
-                                      Register scratch) {
+void StringHelper::GenerateHashInit(MacroAssembler* masm,
+                                    Register hash,
+                                    Register character,
+                                    Register scratch) {
   // hash = character + (character << 10);
   __ mov(hash, character);
   __ shl(hash, 10);
@@ -10723,10 +12820,10 @@
 }
 
 
-void StringStubBase::GenerateHashAddCharacter(MacroAssembler* masm,
-                                              Register hash,
-                                              Register character,
-                                              Register scratch) {
+void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
+                                            Register hash,
+                                            Register character,
+                                            Register scratch) {
   // hash += character;
   __ add(hash, Operand(character));
   // hash += hash << 10;
@@ -10740,9 +12837,9 @@
 }
 
 
-void StringStubBase::GenerateHashGetHash(MacroAssembler* masm,
-                                         Register hash,
-                                         Register scratch) {
+void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
+                                       Register hash,
+                                       Register scratch) {
   // hash += hash << 3;
   __ mov(scratch, hash);
   __ shl(scratch, 3);
@@ -10816,9 +12913,9 @@
 
   // Try to lookup two character string in symbol table.
   Label make_two_character_string;
-  GenerateTwoCharacterSymbolTableProbe(masm, ebx, ecx, eax, edx, edi,
-                                     &make_two_character_string);
-  __ ret(2 * kPointerSize);
+  StringHelper::GenerateTwoCharacterSymbolTableProbe(
+      masm, ebx, ecx, eax, edx, edi, &make_two_character_string);
+  __ ret(3 * kPointerSize);
 
   __ bind(&make_two_character_string);
   // Setup registers for allocating the two character string.
@@ -10856,7 +12953,7 @@
   // edx: original value of esi
   // edi: first character of result
   // esi: character of sub string start
-  GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
+  StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
   __ mov(esi, edx);  // Restore esi.
   __ IncrementCounter(&Counters::sub_string_native, 1);
   __ ret(3 * kPointerSize);
@@ -10895,14 +12992,14 @@
   // edx: original value of esi
   // edi: first character of result
   // esi: character of sub string start
-  GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
+  StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
   __ mov(esi, edx);  // Restore esi.
   __ IncrementCounter(&Counters::sub_string_native, 1);
   __ ret(3 * kPointerSize);
 
   // Just jump to runtime to create the sub string.
   __ bind(&runtime);
-  __ TailCallRuntime(ExternalReference(Runtime::kSubString), 3, 1);
+  __ TailCallRuntime(Runtime::kSubString, 3, 1);
 }
 
 
@@ -10915,6 +13012,9 @@
   Label result_not_equal;
   Label result_greater;
   Label compare_lengths;
+
+  __ IncrementCounter(&Counters::string_compare_native, 1);
+
   // Find minimum length.
   Label left_shorter;
   __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
@@ -10937,6 +13037,7 @@
   // Change index to run from -min_length to -1 by adding min_length
   // to string start. This means that loop ends when index reaches zero,
   // which doesn't need an additional compare.
+  __ SmiUntag(min_length);
   __ lea(left,
          FieldOperand(left,
                       min_length, times_1,
@@ -11012,13 +13113,12 @@
   __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
 
   // Compare flat ascii strings.
-  __ IncrementCounter(&Counters::string_compare_native, 1);
   GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
 
   // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
   // tagged as a small integer.
   __ bind(&runtime);
-  __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1);
+  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
 }
 
 #undef __
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index b84a6bb..0d3fee5 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -28,6 +28,8 @@
 #ifndef V8_IA32_CODEGEN_IA32_H_
 #define V8_IA32_CODEGEN_IA32_H_
 
+#include "ic-inl.h"
+
 namespace v8 {
 namespace internal {
 
@@ -337,13 +339,17 @@
   bool in_spilled_code() const { return in_spilled_code_; }
   void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
 
+  // If the name is an inline runtime function call return the number of
+  // expected arguments. Otherwise return -1.
+  static int InlineRuntimeCallArgumentsCount(Handle<String> name);
+
  private:
   // Construction/Destruction
   explicit CodeGenerator(MacroAssembler* masm);
 
   // Accessors
   inline bool is_eval();
-  Scope* scope();
+  inline Scope* scope();
 
   // Generating deferred code.
   void ProcessDeferred();
@@ -351,6 +357,24 @@
   // State
   ControlDestination* destination() const { return state_->destination(); }
 
+  // Control of side-effect-free int32 expression compilation.
+  bool in_safe_int32_mode() { return in_safe_int32_mode_; }
+  void set_in_safe_int32_mode(bool value) { in_safe_int32_mode_ = value; }
+  bool safe_int32_mode_enabled() {
+    return FLAG_safe_int32_compiler && safe_int32_mode_enabled_;
+  }
+  void set_safe_int32_mode_enabled(bool value) {
+    safe_int32_mode_enabled_ = value;
+  }
+  void set_unsafe_bailout(BreakTarget* unsafe_bailout) {
+    unsafe_bailout_ = unsafe_bailout;
+  }
+
+  // Take the Result that is an untagged int32, and convert it to a tagged
+  // Smi or HeapNumber.  Remove the untagged_int32 flag from the result.
+  void ConvertInt32ResultToNumber(Result* value);
+  void ConvertInt32ResultToSmi(Result* value);
+
   // Track loop nesting level.
   int loop_nesting() const { return loop_nesting_; }
   void IncrementLoopNesting() { loop_nesting_++; }
@@ -407,7 +431,7 @@
     return ContextOperand(esi, Context::GLOBAL_INDEX);
   }
 
-  void LoadCondition(Expression* x,
+  void LoadCondition(Expression* expr,
                      ControlDestination* destination,
                      bool force_control);
   void Load(Expression* expr);
@@ -419,6 +443,11 @@
   // temporarily while the code generator is being transformed.
   void LoadAndSpill(Expression* expression);
 
+  // Evaluate an expression and place its value on top of the frame,
+  // using, or not using, the side-effect-free expression compiler.
+  void LoadInSafeInt32Mode(Expression* expr, BreakTarget* unsafe_bailout);
+  void LoadWithSafeInt32ModeDisabled(Expression* expr);
+
   // Read a value from a slot and leave it on top of the expression stack.
   Result LoadFromSlot(Slot* slot, TypeofState typeof_state);
   Result LoadFromSlotCheckForArguments(Slot* slot, TypeofState typeof_state);
@@ -460,10 +489,11 @@
   // control destination.
   void ToBoolean(ControlDestination* destination);
 
-  void GenericBinaryOperation(
-      Token::Value op,
-      StaticType* type,
-      OverwriteMode overwrite_mode);
+  // Generate code that computes a shortcutting logical operation.
+  void GenerateLogicalBooleanOperation(BinaryOperation* node);
+
+  void GenericBinaryOperation(BinaryOperation* expr,
+                              OverwriteMode overwrite_mode);
 
   // If possible, combine two constant smi values using op to produce
   // a smi result, and push it on the virtual frame, all at compile time.
@@ -471,31 +501,40 @@
   bool FoldConstantSmis(Token::Value op, int left, int right);
 
   // Emit code to perform a binary operation on a constant
-  // smi and a likely smi.  Consumes the Result *operand.
-  Result ConstantSmiBinaryOperation(Token::Value op,
+  // smi and a likely smi.  Consumes the Result operand.
+  Result ConstantSmiBinaryOperation(BinaryOperation* expr,
                                     Result* operand,
                                     Handle<Object> constant_operand,
-                                    StaticType* type,
                                     bool reversed,
                                     OverwriteMode overwrite_mode);
 
   // Emit code to perform a binary operation on two likely smis.
   // The code to handle smi arguments is produced inline.
-  // Consumes the Results *left and *right.
-  Result LikelySmiBinaryOperation(Token::Value op,
+  // Consumes the Results left and right.
+  Result LikelySmiBinaryOperation(BinaryOperation* expr,
                                   Result* left,
                                   Result* right,
                                   OverwriteMode overwrite_mode);
 
+
+  // Emit code to perform a binary operation on two untagged int32 values.
+  // The values are on top of the frame, and the result is pushed on the frame.
+  void Int32BinaryOperation(BinaryOperation* node);
+
+
   void Comparison(AstNode* node,
                   Condition cc,
                   bool strict,
                   ControlDestination* destination);
+  void GenerateInlineNumberComparison(Result* left_side,
+                                      Result* right_side,
+                                      Condition cc,
+                                      ControlDestination* dest);
 
   // To prevent long attacker-controlled byte sequences, integer constants
   // from the JavaScript source are loaded in two parts if they are larger
-  // than 16 bits.
-  static const int kMaxSmiInlinedBits = 16;
+  // than 17 bits.
+  static const int kMaxSmiInlinedBits = 17;
   bool IsUnsafeSmi(Handle<Object> value);
   // Load an integer constant x into a register target or into the stack using
   // at most 16 bits of user-controlled data per assembly operation.
@@ -520,6 +559,7 @@
   struct InlineRuntimeLUT {
     void (CodeGenerator::*method)(ZoneList<Expression*>*);
     const char* name;
+    int nargs;
   };
 
   static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
@@ -536,8 +576,8 @@
   // name/value pairs.
   void DeclareGlobals(Handle<FixedArray> pairs);
 
-  // Instantiate the function boilerplate.
-  Result InstantiateBoilerplate(Handle<JSFunction> boilerplate);
+  // Instantiate the function based on the shared function info.
+  Result InstantiateFunction(Handle<SharedFunctionInfo> function_info);
 
   // Support for type checks.
   void GenerateIsSmi(ZoneList<Expression*>* args);
@@ -553,7 +593,7 @@
 
   // Support for arguments.length and arguments[?].
   void GenerateArgumentsLength(ZoneList<Expression*>* args);
-  void GenerateArgumentsAccess(ZoneList<Expression*>* args);
+  void GenerateArguments(ZoneList<Expression*>* args);
 
   // Support for accessing the class and value fields of an object.
   void GenerateClassOf(ZoneList<Expression*>* args);
@@ -563,6 +603,9 @@
   // Fast support for charCodeAt(n).
   void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
 
+  // Fast support for string.charAt(n) and string[n].
+  void GenerateCharFromCode(ZoneList<Expression*>* args);
+
   // Fast support for object equality testing.
   void GenerateObjectEquals(ZoneList<Expression*>* args);
 
@@ -571,7 +614,7 @@
   void GenerateGetFramePointer(ZoneList<Expression*>* args);
 
   // Fast support for Math.random().
-  void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
+  void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
 
   // Fast support for StringAdd.
   void GenerateStringAdd(ZoneList<Expression*>* args);
@@ -585,12 +628,27 @@
   // Support for direct calls from JavaScript to native RegExp code.
   void GenerateRegExpExec(ZoneList<Expression*>* args);
 
+  void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
+
+  // Support for fast native caches.
+  void GenerateGetFromCache(ZoneList<Expression*>* args);
+
   // Fast support for number to string.
   void GenerateNumberToString(ZoneList<Expression*>* args);
 
-  // Fast call to transcendental functions.
+  // Fast swapping of elements. Takes three expressions, the object and two
+  // indices. This should only be used if the indices are known to be
+  // non-negative and within bounds of the elements array at the call site.
+  void GenerateSwapElements(ZoneList<Expression*>* args);
+
+  // Fast call for custom callbacks.
+  void GenerateCallFunction(ZoneList<Expression*>* args);
+
+  // Fast call to math functions.
+  void GenerateMathPow(ZoneList<Expression*>* args);
   void GenerateMathSin(ZoneList<Expression*>* args);
   void GenerateMathCos(ZoneList<Expression*>* args);
+  void GenerateMathSqrt(ZoneList<Expression*>* args);
 
   // Simple condition analysis.
   enum ConditionAnalysis {
@@ -609,6 +667,8 @@
   void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
   void CodeForSourcePosition(int pos);
 
+  void SetTypeForStackSlot(Slot* slot, TypeInfo info);
+
 #ifdef DEBUG
   // True if the registers are valid for entry to a block.  There should
   // be no frame-external references to (non-reserved) registers.
@@ -627,10 +687,14 @@
   RegisterAllocator* allocator_;
   CodeGenState* state_;
   int loop_nesting_;
+  bool in_safe_int32_mode_;
+  bool safe_int32_mode_enabled_;
 
   // Jump targets.
   // The target of the return from the function.
   BreakTarget function_return_;
+  // The target of the bailout from a side-effect-free int32 subexpression.
+  BreakTarget* unsafe_bailout_;
 
   // True if the function return is shadowed (ie, jumping to the target
   // function_return_ does not jump to the true function return, but rather
@@ -687,18 +751,35 @@
   GenericBinaryOpStub(Token::Value op,
                       OverwriteMode mode,
                       GenericBinaryFlags flags,
-                      NumberInfo::Type operands_type = NumberInfo::kUnknown)
+                      TypeInfo operands_type)
       : op_(op),
         mode_(mode),
         flags_(flags),
         args_in_registers_(false),
         args_reversed_(false),
-        name_(NULL),
-        operands_type_(operands_type) {
+        static_operands_type_(operands_type),
+        runtime_operands_type_(BinaryOpIC::DEFAULT),
+        name_(NULL) {
+    if (static_operands_type_.IsSmi()) {
+      mode_ = NO_OVERWRITE;
+    }
     use_sse3_ = CpuFeatures::IsSupported(SSE3);
     ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
   }
 
+  GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type)
+      : op_(OpBits::decode(key)),
+        mode_(ModeBits::decode(key)),
+        flags_(FlagBits::decode(key)),
+        args_in_registers_(ArgsInRegistersBits::decode(key)),
+        args_reversed_(ArgsReversedBits::decode(key)),
+        use_sse3_(SSE3Bits::decode(key)),
+        static_operands_type_(TypeInfo::ExpandedRepresentation(
+            StaticTypeInfoBits::decode(key))),
+        runtime_operands_type_(runtime_operands_type),
+        name_(NULL) {
+  }
+
   // Generate code to call the stub with the supplied arguments. This will add
   // code at the call site to prepare arguments either in registers or on the
   // stack together with the actual call.
@@ -718,44 +799,53 @@
   bool args_in_registers_;  // Arguments passed in registers not on the stack.
   bool args_reversed_;  // Left and right argument are swapped.
   bool use_sse3_;
+
+  // Number type information of operands, determined by code generator.
+  TypeInfo static_operands_type_;
+
+  // Operand type information determined at runtime.
+  BinaryOpIC::TypeInfo runtime_operands_type_;
+
   char* name_;
-  NumberInfo::Type operands_type_;  // Number type information of operands.
 
   const char* GetName();
 
 #ifdef DEBUG
   void Print() {
     PrintF("GenericBinaryOpStub %d (op %s), "
-           "(mode %d, flags %d, registers %d, reversed %d, number_info %s)\n",
+           "(mode %d, flags %d, registers %d, reversed %d, type_info %s)\n",
            MinorKey(),
            Token::String(op_),
            static_cast<int>(mode_),
            static_cast<int>(flags_),
            static_cast<int>(args_in_registers_),
            static_cast<int>(args_reversed_),
-           NumberInfo::ToString(operands_type_));
+           static_operands_type_.ToString());
   }
 #endif
 
-  // Minor key encoding in 16 bits NNNFRASOOOOOOOMM.
+  // Minor key encoding in 18 bits RRNNNFRASOOOOOOOMM.
   class ModeBits: public BitField<OverwriteMode, 0, 2> {};
   class OpBits: public BitField<Token::Value, 2, 7> {};
   class SSE3Bits: public BitField<bool, 9, 1> {};
   class ArgsInRegistersBits: public BitField<bool, 10, 1> {};
   class ArgsReversedBits: public BitField<bool, 11, 1> {};
   class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
-  class NumberInfoBits: public BitField<NumberInfo::Type, 13, 3> {};
+  class StaticTypeInfoBits: public BitField<int, 13, 3> {};
+  class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 2> {};
 
   Major MajorKey() { return GenericBinaryOp; }
   int MinorKey() {
-    // Encode the parameters in a unique 16 bit value.
+    // Encode the parameters in a unique 18 bit value.
     return OpBits::encode(op_)
            | ModeBits::encode(mode_)
            | FlagBits::encode(flags_)
            | SSE3Bits::encode(use_sse3_)
            | ArgsInRegistersBits::encode(args_in_registers_)
            | ArgsReversedBits::encode(args_reversed_)
-           | NumberInfoBits::encode(operands_type_);
+           | StaticTypeInfoBits::encode(
+                 static_operands_type_.ThreeBitRepresentation())
+           | RuntimeTypeInfoBits::encode(runtime_operands_type_);
   }
 
   void Generate(MacroAssembler* masm);
@@ -763,6 +853,8 @@
   void GenerateLoadArguments(MacroAssembler* masm);
   void GenerateReturn(MacroAssembler* masm);
   void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
+  void GenerateRegisterArgsPush(MacroAssembler* masm);
+  void GenerateTypeTransition(MacroAssembler* masm);
 
   bool ArgsInRegistersSupported() {
     return op_ == Token::ADD || op_ == Token::SUB
@@ -777,56 +869,106 @@
   bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
   bool HasArgsInRegisters() { return args_in_registers_; }
   bool HasArgsReversed() { return args_reversed_; }
+
+  bool ShouldGenerateSmiCode() {
+    return HasSmiCodeInStub() &&
+        runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
+        runtime_operands_type_ != BinaryOpIC::STRINGS;
+  }
+
+  bool ShouldGenerateFPCode() {
+    return runtime_operands_type_ != BinaryOpIC::STRINGS;
+  }
+
+  virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+
+  virtual InlineCacheState GetICState() {
+    return BinaryOpIC::ToState(runtime_operands_type_);
+  }
 };
 
 
-class StringStubBase: public CodeStub {
+class StringHelper : public AllStatic {
  public:
+  // Generates fast code for getting a char code out of a string
+  // object at the given index. May bail out for four reasons (in the
+  // listed order):
+  //   * Receiver is not a string (receiver_not_string label).
+  //   * Index is not a smi (index_not_smi label).
+  //   * Index is out of range (index_out_of_range).
+  //   * Some other reason (slow_case label). In this case it's
+  //     guaranteed that the above conditions are not violated,
+  //     e.g. it's safe to assume the receiver is a string and the
+  //     index is a non-negative smi < length.
+  // When successful, object, index, and scratch are clobbered.
+  // Otherwise, scratch and result are clobbered.
+  static void GenerateFastCharCodeAt(MacroAssembler* masm,
+                                     Register object,
+                                     Register index,
+                                     Register scratch,
+                                     Register result,
+                                     Label* receiver_not_string,
+                                     Label* index_not_smi,
+                                     Label* index_out_of_range,
+                                     Label* slow_case);
+
+  // Generates code for creating a one-char string from the given char
+  // code. May do a runtime call, so any register can be clobbered
+  // and, if the given invoke flag specifies a call, an internal frame
+  // is required. In tail call mode the result must be eax register.
+  static void GenerateCharFromCode(MacroAssembler* masm,
+                                   Register code,
+                                   Register result,
+                                   InvokeFlag flag);
+
   // Generate code for copying characters using a simple loop. This should only
   // be used in places where the number of characters is small and the
   // additional setup and checking in GenerateCopyCharactersREP adds too much
   // overhead. Copying of overlapping regions is not supported.
-  void GenerateCopyCharacters(MacroAssembler* masm,
-                              Register dest,
-                              Register src,
-                              Register count,
-                              Register scratch,
-                              bool ascii);
+  static void GenerateCopyCharacters(MacroAssembler* masm,
+                                     Register dest,
+                                     Register src,
+                                     Register count,
+                                     Register scratch,
+                                     bool ascii);
 
   // Generate code for copying characters using the rep movs instruction.
   // Copies ecx characters from esi to edi. Copying of overlapping regions is
   // not supported.
-  void GenerateCopyCharactersREP(MacroAssembler* masm,
-                                 Register dest,     // Must be edi.
-                                 Register src,      // Must be esi.
-                                 Register count,    // Must be ecx.
-                                 Register scratch,  // Neither of the above.
-                                 bool ascii);
+  static void GenerateCopyCharactersREP(MacroAssembler* masm,
+                                        Register dest,     // Must be edi.
+                                        Register src,      // Must be esi.
+                                        Register count,    // Must be ecx.
+                                        Register scratch,  // Neither of above.
+                                        bool ascii);
 
   // Probe the symbol table for a two character string. If the string is
   // not found by probing a jump to the label not_found is performed. This jump
   // does not guarantee that the string is not in the symbol table. If the
   // string is found the code falls through with the string in register eax.
-  void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
-                                            Register c1,
-                                            Register c2,
-                                            Register scratch1,
-                                            Register scratch2,
-                                            Register scratch3,
-                                            Label* not_found);
+  static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+                                                   Register c1,
+                                                   Register c2,
+                                                   Register scratch1,
+                                                   Register scratch2,
+                                                   Register scratch3,
+                                                   Label* not_found);
 
   // Generate string hash.
-  void GenerateHashInit(MacroAssembler* masm,
-                        Register hash,
-                        Register character,
-                        Register scratch);
-  void GenerateHashAddCharacter(MacroAssembler* masm,
-                                Register hash,
-                                Register character,
-                                Register scratch);
-  void GenerateHashGetHash(MacroAssembler* masm,
-                           Register hash,
-                           Register scratch);
+  static void GenerateHashInit(MacroAssembler* masm,
+                               Register hash,
+                               Register character,
+                               Register scratch);
+  static void GenerateHashAddCharacter(MacroAssembler* masm,
+                                       Register hash,
+                                       Register character,
+                                       Register scratch);
+  static void GenerateHashGetHash(MacroAssembler* masm,
+                                  Register hash,
+                                  Register scratch);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
 };
 
 
@@ -837,7 +979,7 @@
 };
 
 
-class StringAddStub: public StringStubBase {
+class StringAddStub: public CodeStub {
  public:
   explicit StringAddStub(StringAddFlags flags) {
     string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
@@ -854,7 +996,7 @@
 };
 
 
-class SubStringStub: public StringStubBase {
+class SubStringStub: public CodeStub {
  public:
   SubStringStub() {}
 
@@ -866,7 +1008,7 @@
 };
 
 
-class StringCompareStub: public StringStubBase {
+class StringCompareStub: public CodeStub {
  public:
   explicit StringCompareStub() {
   }
@@ -921,6 +1063,42 @@
 };
 
 
+class RecordWriteStub : public CodeStub {
+ public:
+  RecordWriteStub(Register object, Register addr, Register scratch)
+      : object_(object), addr_(addr), scratch_(scratch) { }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  Register object_;
+  Register addr_;
+  Register scratch_;
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
+           object_.code(), addr_.code(), scratch_.code());
+  }
+#endif
+
+  // Minor key encoding in 12 bits. 4 bits for each of the three
+  // registers (object, address and scratch) OOOOAAAASSSS.
+  class ScratchBits: public BitField<uint32_t, 0, 4> {};
+  class AddressBits: public BitField<uint32_t, 4, 4> {};
+  class ObjectBits: public BitField<uint32_t, 8, 4> {};
+
+  Major MajorKey() { return RecordWrite; }
+
+  int MinorKey() {
+    // Encode the registers.
+    return ObjectBits::encode(object_.code()) |
+           AddressBits::encode(addr_.code()) |
+           ScratchBits::encode(scratch_.code());
+  }
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_IA32_CODEGEN_IA32_H_
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index a9e2626..d142b11 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -146,9 +146,10 @@
 void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
   // Register state for keyed IC load call (from ic-ia32.cc).
   // ----------- S t a t e -------------
-  //  No registers used on entry.
+  //  -- edx    : receiver
+  //  -- eax    : key
   // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, 0, false);
+  Generate_DebugBreakCallHelper(masm, eax.bit() | edx.bit(), false);
 }
 
 
@@ -156,10 +157,12 @@
   // Register state for keyed IC load call (from ic-ia32.cc).
   // ----------- S t a t e -------------
   //  -- eax    : value
+  //  -- ecx    : key
+  //  -- edx    : receiver
   // -----------------------------------
   // Register eax contains an object that needs to be pushed on the
   // expression stack of the fake JS frame.
-  Generate_DebugBreakCallHelper(masm, eax.bit(), false);
+  Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit() | edx.bit(), false);
 }
 
 
@@ -203,8 +206,58 @@
 }
 
 
+void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+  masm->ret(0);
+}
+
+// FrameDropper is a code replacement for a JavaScript frame with possibly
+// several frames above.
+// There is no calling conventions here, because it never actually gets called,
+// it only gets returned to.
+// Frame structure (conforms InternalFrame structure):
+//   -- JSFunction
+//   -- code
+//   -- SMI maker
+//   -- context
+//   -- frame base
+void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+  // We do not know our frame height, but set esp based on ebp.
+  __ lea(esp, Operand(ebp, -4 * kPointerSize));
+
+  __ pop(edi);  // function
+
+  // Skip code self-reference and marker.
+  __ add(Operand(esp), Immediate(2 * kPointerSize));
+
+  __ pop(esi);  // Context.
+  __ pop(ebp);
+
+  // Get function code.
+  __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+  __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+
+  // Re-run JSFunction, edi is function, esi is context.
+  __ jmp(Operand(edx));
+}
+
 #undef __
 
+
+void Debug::SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
+                                   Handle<Code> code) {
+  ASSERT(bottom_js_frame->is_java_script());
+
+  Address fp = bottom_js_frame->fp();
+  Memory::Object_at(fp - 4 * kPointerSize) =
+      Memory::Object_at(fp - 2 * kPointerSize);  // Move edi (function).
+
+  Memory::Object_at(fp - 3 * kPointerSize) = *code;
+  Memory::Object_at(fp - 2 * kPointerSize) = Smi::FromInt(StackFrame::INTERNAL);
+}
+const int Debug::kFrameDropperFrameSize = 5;
+
+
 #endif  // ENABLE_DEBUGGER_SUPPORT
 
 } }  // namespace v8::internal
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index a085900..8d342e0 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -89,6 +89,7 @@
   {0x9E, "sahf", UNSET_OP_ORDER},
   {0x99, "cdq", UNSET_OP_ORDER},
   {0x9B, "fwait", UNSET_OP_ORDER},
+  {0xFC, "cld", UNSET_OP_ORDER},
   {-1, "", UNSET_OP_ORDER}
 };
 
@@ -1055,12 +1056,39 @@
           AppendToBuffer(",%s", NameOfCPURegister(regop));
         } else if (*data == 0x0F) {
           data++;
-          if (*data == 0x2F) {
+          if (*data == 0x38) {
+            data++;
+            if (*data == 0x17) {
+              data++;
+              int mod, regop, rm;
+              get_modrm(*data, &mod, &regop, &rm);
+              AppendToBuffer("ptest %s,%s",
+                             NameOfXMMRegister(regop),
+                             NameOfXMMRegister(rm));
+              data++;
+            } else {
+              UnimplementedInstruction();
+            }
+          } else if (*data == 0x2E || *data == 0x2F) {
+            const char* mnem = (*data == 0x2E) ? "ucomisd" : "comisd";
             data++;
             int mod, regop, rm;
             get_modrm(*data, &mod, &regop, &rm);
-            AppendToBuffer("comisd %s,%s",
-                           NameOfXMMRegister(regop),
+            if (mod == 0x3) {
+              AppendToBuffer("%s %s,%s", mnem,
+                             NameOfXMMRegister(regop),
+                             NameOfXMMRegister(rm));
+              data++;
+            } else {
+              AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+              data += PrintRightOperand(data);
+            }
+          } else if (*data == 0x50) {
+            data++;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("movmskpd %s,%s",
+                           NameOfCPURegister(regop),
                            NameOfXMMRegister(rm));
             data++;
           } else if (*data == 0x57) {
@@ -1071,6 +1099,12 @@
                            NameOfXMMRegister(regop),
                            NameOfXMMRegister(rm));
             data++;
+          } else if (*data == 0x6E) {
+            data++;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("movd %s,", NameOfXMMRegister(regop));
+            data += PrintRightOperand(data);
           } else if (*data == 0x6F) {
             data++;
             int mod, regop, rm;
@@ -1084,6 +1118,14 @@
             get_modrm(*data, &mod, &regop, &rm);
             data += PrintRightOperand(data);
             AppendToBuffer(",%s", NameOfXMMRegister(regop));
+          } else if (*data == 0xEF) {
+             data++;
+             int mod, regop, rm;
+             get_modrm(*data, &mod, &regop, &rm);
+             AppendToBuffer("pxor %s,%s",
+                            NameOfXMMRegister(regop),
+                            NameOfXMMRegister(rm));
+             data++;
           } else {
             UnimplementedInstruction();
           }
@@ -1170,6 +1212,8 @@
             const char* mnem = "?";
             switch (b2) {
               case 0x2A: mnem = "cvtsi2sd"; break;
+              case 0x2C: mnem = "cvttsd2si"; break;
+              case 0x51: mnem = "sqrtsd"; break;
               case 0x58: mnem = "addsd"; break;
               case 0x59: mnem = "mulsd"; break;
               case 0x5C: mnem = "subsd"; break;
@@ -1179,14 +1223,38 @@
             int mod, regop, rm;
             get_modrm(*data, &mod, &regop, &rm);
             if (b2 == 0x2A) {
-              AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
-              data += PrintRightOperand(data);
+              if (mod != 0x3) {
+                AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+                data += PrintRightOperand(data);
+              } else {
+                AppendToBuffer("%s %s,%s",
+                               mnem,
+                               NameOfXMMRegister(regop),
+                               NameOfCPURegister(rm));
+                data++;
+              }
+            } else if (b2 == 0x2C) {
+              if (mod != 0x3) {
+                AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop));
+                data += PrintRightOperand(data);
+              } else {
+                AppendToBuffer("%s %s,%s",
+                               mnem,
+                               NameOfCPURegister(regop),
+                               NameOfXMMRegister(rm));
+                data++;
+              }
             } else {
-              AppendToBuffer("%s %s,%s",
-                             mnem,
-                             NameOfXMMRegister(regop),
-                             NameOfXMMRegister(rm));
-              data++;
+              if (mod != 0x3) {
+                AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+                data += PrintRightOperand(data);
+              } else {
+                AppendToBuffer("%s %s,%s",
+                               mnem,
+                               NameOfXMMRegister(regop),
+                               NameOfXMMRegister(rm));
+                data++;
+              }
             }
           }
         } else {
@@ -1199,6 +1267,14 @@
           if (*(data+2) == 0x2C) {
             data += 3;
             data += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, data);
+          } else  if (*(data+2) == 0x5A) {
+            data += 3;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("cvtss2sd %s,%s",
+                           NameOfXMMRegister(regop),
+                           NameOfXMMRegister(rm));
+            data++;
           } else  if (*(data+2) == 0x6F) {
             data += 3;
             int mod, regop, rm;
@@ -1218,6 +1294,9 @@
         } else if (*(data+1) == 0xA5) {
           data += 2;
           AppendToBuffer("rep_movs");
+        } else if (*(data+1) == 0xAB) {
+          data += 2;
+          AppendToBuffer("rep_stos");
         } else {
           UnimplementedInstruction();
         }
diff --git a/src/ia32/fast-codegen-ia32.cc b/src/ia32/fast-codegen-ia32.cc
index f1c2507..61e2b5e 100644
--- a/src/ia32/fast-codegen-ia32.cc
+++ b/src/ia32/fast-codegen-ia32.cc
@@ -195,9 +195,9 @@
 }
 
 
-void FastCodeGenSyntaxChecker::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* expr) {
-  BAILOUT("FunctionBoilerplateLiteral");
+void FastCodeGenSyntaxChecker::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
+  BAILOUT("SharedFunctionInfoLiteral");
 }
 
 
@@ -436,9 +436,6 @@
   AstLabeler labeler;
   labeler.Label(info);
 
-  LivenessAnalyzer analyzer;
-  analyzer.Analyze(info->function());
-
   CodeGenerator::MakeCodePrologue(info);
 
   const int kInitialBufferSize = 4 * KB;
@@ -621,6 +618,7 @@
 void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
   ASSERT(info_ == NULL);
   info_ = compilation_info;
+  Comment cmnt(masm_, "[ function compiled by fast code generator");
 
   // Save the caller's frame pointer and set up our own.
   Comment prologue_cmnt(masm(), ";; Prologue");
@@ -766,8 +764,8 @@
 }
 
 
-void FastCodeGenerator::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* expr) {
+void FastCodeGenerator::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
   UNREACHABLE();
 }
 
@@ -801,8 +799,8 @@
     Comment cmnt(masm(), ";; Global");
     if (FLAG_print_ir) {
       SmartPointer<char> name = expr->name()->ToCString();
-      PrintF("%d: t%d = Global(%s)  // last_use = %d\n", expr->num(),
-             expr->num(), *name, expr->var_def()->last_use()->num());
+      PrintF("%d: t%d = Global(%s)\n", expr->num(),
+             expr->num(), *name);
     }
     EmitGlobalVariableLoad(cell);
   }
@@ -856,9 +854,8 @@
     SmartPointer<char> name_string = name->ToCString();
     PrintF("%d: ", expr->num());
     if (!destination().is(no_reg)) PrintF("t%d = ", expr->num());
-    PrintF("Store(this, \"%s\", t%d)  // last_use(this) = %d\n", *name_string,
-           expr->value()->num(),
-           expr->var_def()->last_use()->num());
+    PrintF("Store(this, \"%s\", t%d)\n", *name_string,
+           expr->value()->num());
   }
 
   EmitThisPropertyStore(name);
@@ -881,9 +878,8 @@
     Comment cmnt(masm(), ";; Load from this");
     if (FLAG_print_ir) {
       SmartPointer<char> name_string = name->ToCString();
-      PrintF("%d: t%d = Load(this, \"%s\")  // last_use(this) = %d\n",
-             expr->num(), expr->num(), *name_string,
-             expr->var_def()->last_use()->num());
+      PrintF("%d: t%d = Load(this, \"%s\")\n",
+             expr->num(), expr->num(), *name_string);
     }
     EmitThisPropertyLoad(name);
   }
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 2394bed..e9838ad 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -32,6 +32,7 @@
 #include "debug.h"
 #include "full-codegen.h"
 #include "parser.h"
+#include "scopes.h"
 
 namespace v8 {
 namespace internal {
@@ -55,6 +56,7 @@
   ASSERT(info_ == NULL);
   info_ = info;
   SetFunctionPosition(function());
+  Comment cmnt(masm_, "[ function compiled by full code generator");
 
   if (mode == PRIMARY) {
     __ push(ebp);  // Caller's frame pointer.
@@ -740,23 +742,22 @@
       // We are declaring a function or constant that rewrites to a
       // property.  Use (keyed) IC to set the initial value.
       VisitForValue(prop->obj(), kStack);
-      VisitForValue(prop->key(), kStack);
-
       if (decl->fun() != NULL) {
+        VisitForValue(prop->key(), kStack);
         VisitForValue(decl->fun(), kAccumulator);
+        __ pop(ecx);
       } else {
+        VisitForValue(prop->key(), kAccumulator);
+        __ mov(ecx, result_register());
         __ mov(result_register(), Factory::the_hole_value());
       }
+      __ pop(edx);
 
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
       __ call(ic, RelocInfo::CODE_TARGET);
       // Absence of a test eax instruction following the call
       // indicates that none of the load was inlined.
       __ nop();
-
-      // Value in eax is ignored (declarations are statements).  Receiver
-      // and key on stack are discarded.
-      __ Drop(2);
     }
   }
 }
@@ -775,16 +776,15 @@
 void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
   Comment cmnt(masm_, "[ FunctionLiteral");
 
-  // Build the function boilerplate and instantiate it.
-  Handle<JSFunction> boilerplate =
-      Compiler::BuildBoilerplate(expr, script(), this);
+  // Build the shared function info and instantiate the function based
+  // on it.
+  Handle<SharedFunctionInfo> function_info =
+      Compiler::BuildFunctionInfo(expr, script(), this);
   if (HasStackOverflow()) return;
 
-  ASSERT(boilerplate->IsBoilerplate());
-
   // Create a new closure.
   __ push(esi);
-  __ push(Immediate(boilerplate));
+  __ push(Immediate(function_info));
   __ CallRuntime(Runtime::kNewClosure, 2);
   Apply(context_, eax);
 }
@@ -900,10 +900,11 @@
   __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
   __ push(Immediate(Smi::FromInt(expr->literal_index())));
   __ push(Immediate(expr->constant_properties()));
+  __ push(Immediate(Smi::FromInt(expr->fast_elements() ? 1 : 0)));
   if (expr->depth() > 1) {
-    __ CallRuntime(Runtime::kCreateObjectLiteral, 3);
+    __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
   } else {
-    __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
+    __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
   }
 
   // If result_saved is true the result is on top of the stack.  If
@@ -1129,7 +1130,8 @@
   __ push(result_register());
   GenericBinaryOpStub stub(op,
                            NO_OVERWRITE,
-                           NO_GENERIC_BINARY_FLAGS);
+                           NO_GENERIC_BINARY_FLAGS,
+                           TypeInfo::Unknown());
   __ CallStub(&stub);
   Apply(context, eax);
 }
@@ -1250,6 +1252,12 @@
     __ pop(result_register());
   }
 
+  __ pop(ecx);
+  if (expr->ends_initialization_block()) {
+    __ mov(edx, Operand(esp, 0));  // Leave receiver on the stack for later.
+  } else {
+    __ pop(edx);
+  }
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
@@ -1260,15 +1268,14 @@
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
+    __ pop(edx);
     __ push(eax);  // Result of assignment, saved even if not needed.
-    // Receiver is under the key and value.
-    __ push(Operand(esp, 2 * kPointerSize));
+    __ push(edx);
     __ CallRuntime(Runtime::kToFastProperties, 1);
     __ pop(eax);
   }
 
-  // Receiver and key are still on stack.
-  DropAndApply(2, context_, eax);
+  Apply(context_, eax);
 }
 
 
@@ -1738,7 +1745,8 @@
   // Call stub for +1/-1.
   GenericBinaryOpStub stub(expr->binary_op(),
                            NO_OVERWRITE,
-                           NO_GENERIC_BINARY_FLAGS);
+                           NO_GENERIC_BINARY_FLAGS,
+                           TypeInfo::Unknown());
   stub.GenerateCall(masm(), eax, Smi::FromInt(1));
   __ bind(&done);
 
@@ -1776,18 +1784,20 @@
       break;
     }
     case KEYED_PROPERTY: {
+      __ pop(ecx);
+      __ pop(edx);
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
       __ call(ic, RelocInfo::CODE_TARGET);
       // This nop signals to the IC that there is no inlined code at the call
       // site for it to patch.
       __ nop();
       if (expr->is_postfix()) {
-        __ Drop(2);  // Result is on the stack under the key and the receiver.
+        // Result is on the stack
         if (context_ != Expression::kEffect) {
           ApplyTOS(context_);
         }
       } else {
-        DropAndApply(2, context_, eax);
+        Apply(context_, eax);
       }
       break;
     }
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 0d79c54..bc7a33c 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -73,11 +73,10 @@
   // Check for the absence of an interceptor.
   // Load the map into r0.
   __ mov(r0, FieldOperand(receiver, JSObject::kMapOffset));
-  // Test the has_named_interceptor bit in the map.
-  __ test(FieldOperand(r0, Map::kInstanceAttributesOffset),
-          Immediate(1 << (Map::kHasNamedInterceptor + (3 * 8))));
 
-  // Jump to miss if the interceptor bit is set.
+  // Bail out if the receiver has a named interceptor.
+  __ test(FieldOperand(r0, Map::kBitFieldOffset),
+          Immediate(1 << Map::kHasNamedInterceptor));
   __ j(not_zero, miss_label, not_taken);
 
   // Bail out if we have a JS global proxy object.
@@ -152,6 +151,103 @@
 }
 
 
+static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
+                                         Label* miss,
+                                         Register elements,
+                                         Register key,
+                                         Register r0,
+                                         Register r1,
+                                         Register r2) {
+  // Register use:
+  //
+  // elements - holds the slow-case elements of the receiver and is unchanged.
+  //
+  // key      - holds the smi key on entry and is unchanged if a branch is
+  //            performed to the miss label. If the load succeeds and we
+  //            fall through, key holds the result on exit.
+  //
+  // Scratch registers:
+  //
+  // r0 - holds the untagged key on entry and holds the hash once computed.
+  //
+  // r1 - used to hold the capacity mask of the dictionary
+  //
+  // r2 - used for the index into the dictionary.
+  Label done;
+
+  // Compute the hash code from the untagged key.  This must be kept in sync
+  // with ComputeIntegerHash in utils.h.
+  //
+  // hash = ~hash + (hash << 15);
+  __ mov(r1, r0);
+  __ not_(r0);
+  __ shl(r1, 15);
+  __ add(r0, Operand(r1));
+  // hash = hash ^ (hash >> 12);
+  __ mov(r1, r0);
+  __ shr(r1, 12);
+  __ xor_(r0, Operand(r1));
+  // hash = hash + (hash << 2);
+  __ lea(r0, Operand(r0, r0, times_4, 0));
+  // hash = hash ^ (hash >> 4);
+  __ mov(r1, r0);
+  __ shr(r1, 4);
+  __ xor_(r0, Operand(r1));
+  // hash = hash * 2057;
+  __ imul(r0, r0, 2057);
+  // hash = hash ^ (hash >> 16);
+  __ mov(r1, r0);
+  __ shr(r1, 16);
+  __ xor_(r0, Operand(r1));
+
+  // Compute capacity mask.
+  __ mov(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset));
+  __ shr(r1, kSmiTagSize);  // convert smi to int
+  __ dec(r1);
+
+  // Generate an unrolled loop that performs a few probes before giving up.
+  const int kProbes = 4;
+  for (int i = 0; i < kProbes; i++) {
+    // Use r2 for index calculations and keep the hash intact in r0.
+    __ mov(r2, r0);
+    // Compute the masked index: (hash + i + i * i) & mask.
+    if (i > 0) {
+      __ add(Operand(r2), Immediate(NumberDictionary::GetProbeOffset(i)));
+    }
+    __ and_(r2, Operand(r1));
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(NumberDictionary::kEntrySize == 3);
+    __ lea(r2, Operand(r2, r2, times_2, 0));  // r2 = r2 * 3
+
+    // Check if the key matches.
+    __ cmp(key, FieldOperand(elements,
+                             r2,
+                             times_pointer_size,
+                             NumberDictionary::kElementsStartOffset));
+    if (i != (kProbes - 1)) {
+      __ j(equal, &done, taken);
+    } else {
+      __ j(not_equal, miss, not_taken);
+    }
+  }
+
+  __ bind(&done);
+  // Check that the value is a normal propety.
+  const int kDetailsOffset =
+      NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+  ASSERT_EQ(NORMAL, 0);
+  __ test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
+          Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
+  __ j(not_zero, miss);
+
+  // Get the value at the masked, scaled index.
+  const int kValueOffset =
+      NumberDictionary::kElementsStartOffset + kPointerSize;
+  __ mov(key, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
+}
+
+
 // The offset from the inlined patch site to the start of the
 // inlined load instruction.  It is 7 bytes (test eax, imm) plus
 // 6 bytes (jne slow_label).
@@ -208,6 +304,7 @@
   // -----------------------------------
   Label slow, check_string, index_int, index_string;
   Label check_pixel_array, probe_dictionary;
+  Label check_number_dictionary;
 
   // Check that the object isn't a smi.
   __ test(edx, Immediate(kSmiTagMask));
@@ -256,7 +353,7 @@
   // ebx: untagged index
   // eax: key
   // ecx: elements
-  __ CheckMap(ecx, Factory::pixel_array_map(), &slow, true);
+  __ CheckMap(ecx, Factory::pixel_array_map(), &check_number_dictionary, true);
   __ cmp(ebx, FieldOperand(ecx, PixelArray::kLengthOffset));
   __ j(above_equal, &slow);
   __ mov(eax, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
@@ -264,6 +361,32 @@
   __ SmiTag(eax);
   __ ret(0);
 
+  __ bind(&check_number_dictionary);
+  // Check whether the elements is a number dictionary.
+  // edx: receiver
+  // ebx: untagged index
+  // eax: key
+  // ecx: elements
+  __ CheckMap(ecx, Factory::hash_table_map(), &slow, true);
+  Label slow_pop_receiver;
+  // Push receiver on the stack to free up a register for the dictionary
+  // probing.
+  __ push(edx);
+  GenerateNumberDictionaryLoad(masm,
+                               &slow_pop_receiver,
+                               ecx,
+                               eax,
+                               ebx,
+                               edx,
+                               edi);
+  // Pop receiver before returning.
+  __ pop(edx);
+  __ ret(0);
+
+  __ bind(&slow_pop_receiver);
+  // Pop the receiver from the stack and jump to runtime.
+  __ pop(edx);
+
   __ bind(&slow);
   // Slow case: jump to runtime.
   // edx: receiver
@@ -349,7 +472,7 @@
                          ecx,
                          edi,
                          DICTIONARY_CHECK_DONE);
-  __ mov(eax, Operand(ecx));
+  __ mov(eax, ecx);
   __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
   __ ret(0);
 
@@ -368,39 +491,70 @@
 
 void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- eax    : key
+  //  -- eax    : key (index)
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
-  Label miss, index_ok;
+  Label miss;
+  Label index_not_smi;
+  Label index_out_of_range;
+  Label slow_char_code;
+  Label got_char_code;
 
-  // Pop return address.
-  // Performing the load early is better in the common case.
-  __ pop(ebx);
+  Register receiver = edx;
+  Register index = eax;
+  Register code = ebx;
+  Register scratch = ecx;
 
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &miss);
-  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
-  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-  __ test(ecx, Immediate(kIsNotStringMask));
-  __ j(not_zero, &miss);
+  StringHelper::GenerateFastCharCodeAt(masm,
+                                       receiver,
+                                       index,
+                                       scratch,
+                                       code,
+                                       &miss,  // When not a string.
+                                       &index_not_smi,
+                                       &index_out_of_range,
+                                       &slow_char_code);
+  // If we didn't bail out, code register contains smi tagged char
+  // code.
+  __ bind(&got_char_code);
+  StringHelper::GenerateCharFromCode(masm, code, eax, JUMP_FUNCTION);
+#ifdef DEBUG
+  __ Abort("Unexpected fall-through from char from code tail call");
+#endif
 
-  // Check if key is a smi or a heap number.
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &index_ok);
-  __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
-  __ cmp(ecx, Factory::heap_number_map());
-  __ j(not_equal, &miss);
+  // Check if key is a heap number.
+  __ bind(&index_not_smi);
+  __ CheckMap(index, Factory::heap_number_map(), &miss, true);
 
-  __ bind(&index_ok);
-  // Push receiver and key on the stack, and make a tail call.
-  __ push(edx);  // receiver
-  __ push(eax);  // key
-  __ push(ebx);  // return address
-  __ InvokeBuiltin(Builtins::STRING_CHAR_AT, JUMP_FUNCTION);
+  // Push receiver and key on the stack (now that we know they are a
+  // string and a number), and call runtime.
+  __ bind(&slow_char_code);
+  __ EnterInternalFrame();
+  __ push(receiver);
+  __ push(index);
+  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+  ASSERT(!code.is(eax));
+  __ mov(code, eax);
+  __ LeaveInternalFrame();
+
+  // Check if the runtime call returned NaN char code. If yes, return
+  // undefined. Otherwise, we can continue.
+  if (FLAG_debug_code) {
+    ASSERT(kSmiTag == 0);
+    __ test(code, Immediate(kSmiTagMask));
+    __ j(zero, &got_char_code);
+    __ mov(scratch, FieldOperand(code, HeapObject::kMapOffset));
+    __ cmp(scratch, Factory::heap_number_map());
+    __ Assert(equal, "StringCharCodeAt must return smi or heap number");
+  }
+  __ cmp(code, Factory::nan_value());
+  __ j(not_equal, &got_char_code);
+  __ bind(&index_out_of_range);
+  __ Set(eax, Immediate(Factory::undefined_value()));
+  __ ret(0);
 
   __ bind(&miss);
-  __ push(ebx);
   GenerateMiss(masm);
 }
 
@@ -592,8 +746,9 @@
   __ push(ecx);  // return address
 
   // Perform tail call to the entry.
-  __ TailCallRuntime(ExternalReference(
-        IC_Utility(kKeyedLoadPropertyWithInterceptor)), 2, 1);
+  ExternalReference ref = ExternalReference(
+      IC_Utility(kKeyedLoadPropertyWithInterceptor));
+  __ TailCallExternalReference(ref, 2, 1);
 
   __ bind(&slow);
   GenerateMiss(masm);
@@ -603,54 +758,41 @@
 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax    : value
+  //  -- ecx    : key
+  //  -- edx    : receiver
   //  -- esp[0] : return address
-  //  -- esp[4] : key
-  //  -- esp[8] : receiver
   // -----------------------------------
   Label slow, fast, array, extra, check_pixel_array;
 
-  // Get the receiver from the stack.
-  __ mov(edx, Operand(esp, 2 * kPointerSize));  // 2 ~ return address, key
   // Check that the object isn't a smi.
   __ test(edx, Immediate(kSmiTagMask));
   __ j(zero, &slow, not_taken);
   // Get the map from the receiver.
-  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+  __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
   // Check that the receiver does not require access checks.  We need
   // to do this because this generic stub does not perform map checks.
-  __ movzx_b(ebx, FieldOperand(ecx, Map::kBitFieldOffset));
+  __ movzx_b(ebx, FieldOperand(edi, Map::kBitFieldOffset));
   __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
   __ j(not_zero, &slow, not_taken);
-  // Get the key from the stack.
-  __ mov(ebx, Operand(esp, 1 * kPointerSize));  // 1 ~ return address
   // Check that the key is a smi.
-  __ test(ebx, Immediate(kSmiTagMask));
+  __ test(ecx, Immediate(kSmiTagMask));
   __ j(not_zero, &slow, not_taken);
-  // Get the instance type from the map of the receiver.
-  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-  // Check if the object is a JS array or not.
-  __ cmp(ecx, JS_ARRAY_TYPE);
+  __ CmpInstanceType(edi, JS_ARRAY_TYPE);
   __ j(equal, &array);
   // Check that the object is some kind of JS object.
-  __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
-  __ j(less, &slow, not_taken);
+  __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
+  __ j(below, &slow, not_taken);
 
   // Object case: Check key against length in the elements array.
   // eax: value
   // edx: JSObject
-  // ebx: index (as a smi)
-  __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+  // ecx: key (a smi)
+  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
   // Check that the object is in fast mode (not dictionary).
-  __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
-         Immediate(Factory::fixed_array_map()));
-  __ j(not_equal, &check_pixel_array, not_taken);
-  // Untag the key (for checking against untagged length in the fixed array).
-  __ mov(edx, Operand(ebx));
-  __ sar(edx, kSmiTagSize);  // untag the index and use it for the comparison
-  __ cmp(edx, FieldOperand(ecx, Array::kLengthOffset));
-  // eax: value
-  // ecx: FixedArray
-  // ebx: index (as a smi)
+  __ CheckMap(edi, Factory::fixed_array_map(), &check_pixel_array, true);
+  __ mov(ebx, Operand(ecx));
+  __ SmiUntag(ebx);
+  __ cmp(ebx, FieldOperand(edi, Array::kLengthOffset));
   __ j(below, &fast, taken);
 
   // Slow case: call runtime.
@@ -658,52 +800,51 @@
   GenerateRuntimeSetProperty(masm);
 
   // Check whether the elements is a pixel array.
-  // eax: value
-  // ecx: elements array
-  // ebx: index (as a smi)
   __ bind(&check_pixel_array);
-  __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
-         Immediate(Factory::pixel_array_map()));
-  __ j(not_equal, &slow);
+  // eax: value
+  // ecx: key
+  // edx: receiver
+  // edi: elements array
+  __ CheckMap(edi, Factory::pixel_array_map(), &slow, true);
   // Check that the value is a smi. If a conversion is needed call into the
   // runtime to convert and clamp.
   __ test(eax, Immediate(kSmiTagMask));
   __ j(not_zero, &slow);
-  __ sar(ebx, kSmiTagSize);  // Untag the index.
-  __ cmp(ebx, FieldOperand(ecx, PixelArray::kLengthOffset));
+  __ mov(ebx, ecx);
+  __ SmiUntag(ebx);
+  __ cmp(ebx, FieldOperand(edi, PixelArray::kLengthOffset));
   __ j(above_equal, &slow);
-  __ mov(edx, eax);  // Save the value.
-  __ sar(eax, kSmiTagSize);  // Untag the value.
+  __ mov(ecx, eax);  // Save the value. Key is not longer needed.
+  __ SmiUntag(ecx);
   {  // Clamp the value to [0..255].
     Label done;
-    __ test(eax, Immediate(0xFFFFFF00));
+    __ test(ecx, Immediate(0xFFFFFF00));
     __ j(zero, &done);
-    __ setcc(negative, eax);  // 1 if negative, 0 if positive.
-    __ dec_b(eax);  // 0 if negative, 255 if positive.
+    __ setcc(negative, ecx);  // 1 if negative, 0 if positive.
+    __ dec_b(ecx);  // 0 if negative, 255 if positive.
     __ bind(&done);
   }
-  __ mov(ecx, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
-  __ mov_b(Operand(ecx, ebx, times_1, 0), eax);
-  __ mov(eax, edx);  // Return the original value.
-  __ ret(0);
+  __ mov(edi, FieldOperand(edi, PixelArray::kExternalPointerOffset));
+  __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
+  __ ret(0);  // Return value in eax.
 
   // Extra capacity case: Check if there is extra capacity to
   // perform the store and update the length. Used for adding one
   // element to the array by writing to array[array.length].
   __ bind(&extra);
   // eax: value
-  // edx: JSArray
-  // ecx: FixedArray
-  // ebx: index (as a smi)
-  // flags: compare (ebx, edx.length())
+  // edx: receiver, a JSArray
+  // ecx: key, a smi.
+  // edi: receiver->elements, a FixedArray
+  // flags: compare (ecx, edx.length())
   __ j(not_equal, &slow, not_taken);  // do not leave holes in the array
-  __ sar(ebx, kSmiTagSize);  // untag
-  __ cmp(ebx, FieldOperand(ecx, Array::kLengthOffset));
+  __ mov(ebx, ecx);
+  __ SmiUntag(ebx);  // untag
+  __ cmp(ebx, FieldOperand(edi, Array::kLengthOffset));
   __ j(above_equal, &slow, not_taken);
-  // Restore tag and increment.
-  __ lea(ebx, Operand(ebx, times_2, 1 << kSmiTagSize));
-  __ mov(FieldOperand(edx, JSArray::kLengthOffset), ebx);
-  __ sub(Operand(ebx), Immediate(1 << kSmiTagSize));  // decrement ebx again
+  // Add 1 to receiver->length, and go to fast array write.
+  __ add(FieldOperand(edx, JSArray::kLengthOffset),
+         Immediate(1 << kSmiTagSize));
   __ jmp(&fast);
 
   // Array case: Get the length and the elements array from the JS
@@ -711,28 +852,26 @@
   // length is always a smi.
   __ bind(&array);
   // eax: value
-  // edx: JSArray
-  // ebx: index (as a smi)
-  __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
-  __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
-         Immediate(Factory::fixed_array_map()));
-  __ j(not_equal, &check_pixel_array);
+  // edx: receiver, a JSArray
+  // ecx: key, a smi.
+  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+  __ CheckMap(edi, Factory::fixed_array_map(), &check_pixel_array, true);
 
   // Check the key against the length in the array, compute the
   // address to store into and fall through to fast case.
-  __ cmp(ebx, FieldOperand(edx, JSArray::kLengthOffset));
+  __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset));  // Compare smis.
   __ j(above_equal, &extra, not_taken);
 
   // Fast case: Do the store.
   __ bind(&fast);
   // eax: value
-  // ecx: FixedArray
-  // ebx: index (as a smi)
-  __ mov(Operand(ecx, ebx, times_2, FixedArray::kHeaderSize - kHeapObjectTag),
-         eax);
+  // ecx: key (a smi)
+  // edx: receiver
+  // edi: FixedArray receiver->elements
+  __ mov(FieldOperand(edi, ecx, times_2, FixedArray::kHeaderSize), eax);
   // Update write barrier for the elements array address.
   __ mov(edx, Operand(eax));
-  __ RecordWrite(ecx, 0, edx, ebx);
+  __ RecordWrite(edi, 0, edx, ecx);
   __ ret(0);
 }
 
@@ -741,92 +880,91 @@
                                          ExternalArrayType array_type) {
   // ----------- S t a t e -------------
   //  -- eax    : value
+  //  -- ecx    : key
+  //  -- edx    : receiver
   //  -- esp[0] : return address
-  //  -- esp[4] : key
-  //  -- esp[8] : receiver
   // -----------------------------------
   Label slow, check_heap_number;
 
-  // Get the receiver from the stack.
-  __ mov(edx, Operand(esp, 2 * kPointerSize));
   // Check that the object isn't a smi.
   __ test(edx, Immediate(kSmiTagMask));
   __ j(zero, &slow);
   // Get the map from the receiver.
-  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+  __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
   // Check that the receiver does not require access checks.  We need
   // to do this because this generic stub does not perform map checks.
-  __ movzx_b(ebx, FieldOperand(ecx, Map::kBitFieldOffset));
+  __ movzx_b(ebx, FieldOperand(edi, Map::kBitFieldOffset));
   __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
   __ j(not_zero, &slow);
-  // Get the key from the stack.
-  __ mov(ebx, Operand(esp, 1 * kPointerSize));  // 1 ~ return address
   // Check that the key is a smi.
-  __ test(ebx, Immediate(kSmiTagMask));
+  __ test(ecx, Immediate(kSmiTagMask));
   __ j(not_zero, &slow);
   // Get the instance type from the map of the receiver.
-  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-  // Check that the object is a JS object.
-  __ cmp(ecx, JS_OBJECT_TYPE);
+  __ CmpInstanceType(edi, JS_OBJECT_TYPE);
   __ j(not_equal, &slow);
 
   // Check that the elements array is the appropriate type of
   // ExternalArray.
   // eax: value
-  // edx: JSObject
-  // ebx: index (as a smi)
-  __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
-  Handle<Map> map(Heap::MapForExternalArrayType(array_type));
-  __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
-         Immediate(map));
-  __ j(not_equal, &slow);
+  // edx: receiver, a JSObject
+  // ecx: key, a smi
+  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+  __ CheckMap(edi, Handle<Map>(Heap::MapForExternalArrayType(array_type)),
+              &slow, true);
 
   // Check that the index is in range.
-  __ sar(ebx, kSmiTagSize);  // Untag the index.
-  __ cmp(ebx, FieldOperand(ecx, ExternalArray::kLengthOffset));
+  __ mov(ebx, ecx);
+  __ SmiUntag(ebx);
+  __ cmp(ebx, FieldOperand(edi, ExternalArray::kLengthOffset));
   // Unsigned comparison catches both negative and too-large values.
   __ j(above_equal, &slow);
 
   // Handle both smis and HeapNumbers in the fast path. Go to the
   // runtime for all other kinds of values.
   // eax: value
-  // ecx: elements array
+  // edx: receiver
+  // ecx: key
+  // edi: elements array
   // ebx: untagged index
   __ test(eax, Immediate(kSmiTagMask));
   __ j(not_equal, &check_heap_number);
   // smi case
-  __ mov(edx, eax);  // Save the value.
-  __ sar(eax, kSmiTagSize);  // Untag the value.
-  __ mov(ecx, FieldOperand(ecx, ExternalArray::kExternalPointerOffset));
+  __ mov(ecx, eax);  // Preserve the value in eax.  Key is no longer needed.
+  __ SmiUntag(ecx);
+  __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
   // ecx: base pointer of external storage
   switch (array_type) {
     case kExternalByteArray:
     case kExternalUnsignedByteArray:
-      __ mov_b(Operand(ecx, ebx, times_1, 0), eax);
+      __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
       break;
     case kExternalShortArray:
     case kExternalUnsignedShortArray:
-      __ mov_w(Operand(ecx, ebx, times_2, 0), eax);
+      __ mov_w(Operand(edi, ebx, times_2, 0), ecx);
       break;
     case kExternalIntArray:
     case kExternalUnsignedIntArray:
-      __ mov(Operand(ecx, ebx, times_4, 0), eax);
+      __ mov(Operand(edi, ebx, times_4, 0), ecx);
       break;
     case kExternalFloatArray:
       // Need to perform int-to-float conversion.
-      __ push(eax);
+      __ push(ecx);
       __ fild_s(Operand(esp, 0));
-      __ pop(eax);
-      __ fstp_s(Operand(ecx, ebx, times_4, 0));
+      __ pop(ecx);
+      __ fstp_s(Operand(edi, ebx, times_4, 0));
       break;
     default:
       UNREACHABLE();
       break;
   }
-  __ mov(eax, edx);  // Return the original value.
-  __ ret(0);
+  __ ret(0);  // Return the original value.
 
   __ bind(&check_heap_number);
+  // eax: value
+  // edx: receiver
+  // ecx: key
+  // edi: elements array
+  // ebx: untagged index
   __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
          Immediate(Factory::heap_number_map()));
   __ j(not_equal, &slow);
@@ -835,14 +973,12 @@
   // +/-Infinity into integer arrays basically undefined. For more
   // reproducible behavior, convert these to zero.
   __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
-  __ mov(edx, eax);  // Save the value.
-  __ mov(ecx, FieldOperand(ecx, ExternalArray::kExternalPointerOffset));
+  __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
   // ebx: untagged index
-  // ecx: base pointer of external storage
+  // edi: base pointer of external storage
   // top of FPU stack: value
   if (array_type == kExternalFloatArray) {
-    __ fstp_s(Operand(ecx, ebx, times_4, 0));
-    __ mov(eax, edx);  // Return the original value.
+    __ fstp_s(Operand(edi, ebx, times_4, 0));
     __ ret(0);
   } else {
     // Need to perform float-to-int conversion.
@@ -852,29 +988,27 @@
     __ j(parity_even, &is_nan);
 
     if (array_type != kExternalUnsignedIntArray) {
-      __ push(eax);  // Make room on stack
+      __ push(ecx);  // Make room on stack
       __ fistp_s(Operand(esp, 0));
-      __ pop(eax);
+      __ pop(ecx);
     } else {
       // fistp stores values as signed integers.
       // To represent the entire range, we need to store as a 64-bit
       // int and discard the high 32 bits.
-      __ push(eax);  // Make room on stack
-      __ push(eax);  // Make room on stack
+      __ sub(Operand(esp), Immediate(2 * kPointerSize));
       __ fistp_d(Operand(esp, 0));
-      __ pop(eax);
-      __ mov(Operand(esp, 0), eax);
-      __ pop(eax);
+      __ pop(ecx);
+      __ add(Operand(esp), Immediate(kPointerSize));
     }
-    // eax: untagged integer value
+    // ecx: untagged integer value
     switch (array_type) {
       case kExternalByteArray:
       case kExternalUnsignedByteArray:
-        __ mov_b(Operand(ecx, ebx, times_1, 0), eax);
+        __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
         break;
       case kExternalShortArray:
       case kExternalUnsignedShortArray:
-        __ mov_w(Operand(ecx, ebx, times_2, 0), eax);
+        __ mov_w(Operand(edi, ebx, times_2, 0), ecx);
         break;
       case kExternalIntArray:
       case kExternalUnsignedIntArray: {
@@ -885,21 +1019,20 @@
         // This test would apparently detect both NaN and Infinity,
         // but we've already checked for NaN using the FPU hardware
         // above.
-        __ mov_w(edi, FieldOperand(edx, HeapNumber::kValueOffset + 6));
-        __ and_(edi, 0x7FF0);
-        __ cmp(edi, 0x7FF0);
+        __ mov_w(edx, FieldOperand(eax, HeapNumber::kValueOffset + 6));
+        __ and_(edx, 0x7FF0);
+        __ cmp(edx, 0x7FF0);
         __ j(not_equal, &not_infinity);
-        __ mov(eax, 0);
+        __ mov(ecx, 0);
         __ bind(&not_infinity);
-        __ mov(Operand(ecx, ebx, times_4, 0), eax);
+        __ mov(Operand(edi, ebx, times_4, 0), ecx);
         break;
       }
       default:
         UNREACHABLE();
         break;
     }
-    __ mov(eax, edx);  // Return the original value.
-    __ ret(0);
+    __ ret(0);  // Return original value.
 
     __ bind(&is_nan);
     __ ffree();
@@ -907,23 +1040,22 @@
     switch (array_type) {
       case kExternalByteArray:
       case kExternalUnsignedByteArray:
-        __ mov_b(Operand(ecx, ebx, times_1, 0), 0);
+        __ mov_b(Operand(edi, ebx, times_1, 0), 0);
         break;
       case kExternalShortArray:
       case kExternalUnsignedShortArray:
-        __ mov(eax, 0);
-        __ mov_w(Operand(ecx, ebx, times_2, 0), eax);
+        __ xor_(ecx, Operand(ecx));
+        __ mov_w(Operand(edi, ebx, times_2, 0), ecx);
         break;
       case kExternalIntArray:
       case kExternalUnsignedIntArray:
-        __ mov(Operand(ecx, ebx, times_4, 0), Immediate(0));
+        __ mov(Operand(edi, ebx, times_4, 0), Immediate(0));
         break;
       default:
         UNREACHABLE();
         break;
     }
-    __ mov(eax, edx);  // Return the original value.
-    __ ret(0);
+    __ ret(0);  // Return the original value.
   }
 
   // Slow case: call runtime.
@@ -1238,7 +1370,8 @@
   __ push(ebx);  // return address
 
   // Perform tail call to the entry.
-  __ TailCallRuntime(ExternalReference(IC_Utility(kLoadIC_Miss)), 2, 1);
+  ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss));
+  __ TailCallExternalReference(ref, 2, 1);
 }
 
 
@@ -1353,7 +1486,8 @@
   __ push(ebx);  // return address
 
   // Perform tail call to the entry.
-  __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedLoadIC_Miss)), 2, 1);
+  ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss));
+  __ TailCallExternalReference(ref, 2, 1);
 }
 
 
@@ -1370,7 +1504,7 @@
   __ push(ebx);  // return address
 
   // Perform tail call to the entry.
-  __ TailCallRuntime(ExternalReference(Runtime::kKeyedGetProperty), 2, 1);
+  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
 }
 
 
@@ -1407,7 +1541,60 @@
   __ push(ebx);
 
   // Perform tail call to the entry.
-  __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1);
+  ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss));
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : name
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  //
+  // This accepts as a receiver anything JSObject::SetElementsLength accepts
+  // (currently anything except for external and pixel arrays which means
+  // anything with elements of FixedArray type.), but currently is restricted
+  // to JSArray.
+  // Value must be a number, but only smis are accepted as the most common case.
+
+  Label miss;
+
+  Register receiver = edx;
+  Register value = eax;
+  Register scratch = ebx;
+
+  // Check that the receiver isn't a smi.
+  __ test(receiver, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+
+  // Check that the object is a JS array.
+  __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
+  __ j(not_equal, &miss, not_taken);
+
+  // Check that elements are FixedArray.
+  __ mov(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
+  __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
+  __ j(not_equal, &miss, not_taken);
+
+  // Check that value is a smi.
+  __ test(value, Immediate(kSmiTagMask));
+  __ j(not_zero, &miss, not_taken);
+
+  // Prepare tail call to StoreIC_ArrayLength.
+  __ pop(scratch);
+  __ push(receiver);
+  __ push(value);
+  __ push(scratch);  // return address
+
+  ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength));
+  __ TailCallExternalReference(ref, 2, 1);
+
+  __ bind(&miss);
+
+  GenerateMiss(masm);
 }
 
 
@@ -1417,38 +1604,39 @@
 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax    : value
+  //  -- ecx    : key
+  //  -- edx    : receiver
   //  -- esp[0] : return address
-  //  -- esp[4] : key
-  //  -- esp[8] : receiver
   // -----------------------------------
 
-  __ pop(ecx);
-  __ push(Operand(esp, 1 * kPointerSize));
-  __ push(Operand(esp, 1 * kPointerSize));
-  __ push(eax);
+  __ pop(ebx);
+  __ push(edx);
   __ push(ecx);
+  __ push(eax);
+  __ push(ebx);
 
   // Do tail-call to runtime routine.
-  __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
+  __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
 }
 
 
 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax    : value
+  //  -- ecx    : key
+  //  -- edx    : receiver
   //  -- esp[0] : return address
-  //  -- esp[4] : key
-  //  -- esp[8] : receiver
   // -----------------------------------
 
-  __ pop(ecx);
-  __ push(Operand(esp, 1 * kPointerSize));
-  __ push(Operand(esp, 1 * kPointerSize));
-  __ push(eax);
+  __ pop(ebx);
+  __ push(edx);
   __ push(ecx);
+  __ push(eax);
+  __ push(ebx);
 
   // Do tail-call to runtime routine.
-  __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedStoreIC_Miss)), 3, 1);
+  ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss));
+  __ TailCallExternalReference(ref, 3, 1);
 }
 
 #undef __
diff --git a/src/ia32/jump-target-ia32.cc b/src/ia32/jump-target-ia32.cc
index c3f2bc1..cba6508 100644
--- a/src/ia32/jump-target-ia32.cc
+++ b/src/ia32/jump-target-ia32.cc
@@ -30,6 +30,7 @@
 #include "codegen-inl.h"
 #include "jump-target-inl.h"
 #include "register-allocator-inl.h"
+#include "virtual-frame-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 1f08e87..a7d2834 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -47,33 +47,40 @@
 }
 
 
-static void RecordWriteHelper(MacroAssembler* masm,
-                              Register object,
-                              Register addr,
-                              Register scratch) {
+void MacroAssembler::RecordWriteHelper(Register object,
+                                       Register addr,
+                                       Register scratch) {
+  if (FLAG_debug_code) {
+    // Check that the object is not in new space.
+    Label not_in_new_space;
+    InNewSpace(object, scratch, not_equal, &not_in_new_space);
+    Abort("new-space object passed to RecordWriteHelper");
+    bind(&not_in_new_space);
+  }
+
   Label fast;
 
   // Compute the page start address from the heap object pointer, and reuse
   // the 'object' register for it.
-  masm->and_(object, ~Page::kPageAlignmentMask);
+  and_(object, ~Page::kPageAlignmentMask);
   Register page_start = object;
 
   // Compute the bit addr in the remembered set/index of the pointer in the
   // page. Reuse 'addr' as pointer_offset.
-  masm->sub(addr, Operand(page_start));
-  masm->shr(addr, kObjectAlignmentBits);
+  sub(addr, Operand(page_start));
+  shr(addr, kObjectAlignmentBits);
   Register pointer_offset = addr;
 
   // If the bit offset lies beyond the normal remembered set range, it is in
   // the extra remembered set area of a large object.
-  masm->cmp(pointer_offset, Page::kPageSize / kPointerSize);
-  masm->j(less, &fast);
+  cmp(pointer_offset, Page::kPageSize / kPointerSize);
+  j(less, &fast);
 
   // Adjust 'page_start' so that addressing using 'pointer_offset' hits the
   // extra remembered set after the large object.
 
   // Find the length of the large object (FixedArray).
-  masm->mov(scratch, Operand(page_start, Page::kObjectStartOffset
+  mov(scratch, Operand(page_start, Page::kObjectStartOffset
                                          + FixedArray::kLengthOffset));
   Register array_length = scratch;
 
@@ -83,59 +90,41 @@
   // Add the delta between the end of the normal RSet and the start of the
   // extra RSet to 'page_start', so that addressing the bit using
   // 'pointer_offset' hits the extra RSet words.
-  masm->lea(page_start,
-            Operand(page_start, array_length, times_pointer_size,
-                    Page::kObjectStartOffset + FixedArray::kHeaderSize
-                        - Page::kRSetEndOffset));
+  lea(page_start,
+      Operand(page_start, array_length, times_pointer_size,
+              Page::kObjectStartOffset + FixedArray::kHeaderSize
+                  - Page::kRSetEndOffset));
 
   // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
   // to limit code size. We should probably evaluate this decision by
   // measuring the performance of an equivalent implementation using
   // "simpler" instructions
-  masm->bind(&fast);
-  masm->bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
+  bind(&fast);
+  bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
 }
 
 
-class RecordWriteStub : public CodeStub {
- public:
-  RecordWriteStub(Register object, Register addr, Register scratch)
-      : object_(object), addr_(addr), scratch_(scratch) { }
-
-  void Generate(MacroAssembler* masm);
-
- private:
-  Register object_;
-  Register addr_;
-  Register scratch_;
-
-#ifdef DEBUG
-  void Print() {
-    PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
-           object_.code(), addr_.code(), scratch_.code());
+void MacroAssembler::InNewSpace(Register object,
+                                Register scratch,
+                                Condition cc,
+                                Label* branch) {
+  ASSERT(cc == equal || cc == not_equal);
+  if (Serializer::enabled()) {
+    // Can't do arithmetic on external references if it might get serialized.
+    mov(scratch, Operand(object));
+    // The mask isn't really an address.  We load it as an external reference in
+    // case the size of the new space is different between the snapshot maker
+    // and the running system.
+    and_(Operand(scratch), Immediate(ExternalReference::new_space_mask()));
+    cmp(Operand(scratch), Immediate(ExternalReference::new_space_start()));
+    j(cc, branch);
+  } else {
+    int32_t new_space_start = reinterpret_cast<int32_t>(
+        ExternalReference::new_space_start().address());
+    lea(scratch, Operand(object, -new_space_start));
+    and_(scratch, Heap::NewSpaceMask());
+    j(cc, branch);
   }
-#endif
-
-  // Minor key encoding in 12 bits of three registers (object, address and
-  // scratch) OOOOAAAASSSS.
-  class ScratchBits: public BitField<uint32_t, 0, 4> {};
-  class AddressBits: public BitField<uint32_t, 4, 4> {};
-  class ObjectBits: public BitField<uint32_t, 8, 4> {};
-
-  Major MajorKey() { return RecordWrite; }
-
-  int MinorKey() {
-    // Encode the registers.
-    return ObjectBits::encode(object_.code()) |
-           AddressBits::encode(addr_.code()) |
-           ScratchBits::encode(scratch_.code());
-  }
-};
-
-
-void RecordWriteStub::Generate(MacroAssembler* masm) {
-  RecordWriteHelper(masm, object_, addr_, scratch_);
-  masm->ret(0);
 }
 
 
@@ -153,7 +142,7 @@
 
   // First, check if a remembered set write is even needed. The tests below
   // catch stores of Smis and stores into young gen (which does not have space
-  // for the remembered set bits.
+  // for the remembered set bits).
   Label done;
 
   // Skip barrier if writing a smi.
@@ -161,24 +150,19 @@
   test(value, Immediate(kSmiTagMask));
   j(zero, &done);
 
-  if (Serializer::enabled()) {
-    // Can't do arithmetic on external references if it might get serialized.
-    mov(value, Operand(object));
-    // The mask isn't really an address.  We load it as an external reference in
-    // case the size of the new space is different between the snapshot maker
-    // and the running system.
-    and_(Operand(value), Immediate(ExternalReference::new_space_mask()));
-    cmp(Operand(value), Immediate(ExternalReference::new_space_start()));
-    j(equal, &done);
-  } else {
-    int32_t new_space_start = reinterpret_cast<int32_t>(
-        ExternalReference::new_space_start().address());
-    lea(value, Operand(object, -new_space_start));
-    and_(value, Heap::NewSpaceMask());
-    j(equal, &done);
-  }
+  InNewSpace(object, value, equal, &done);
 
-  if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
+  // The offset is relative to a tagged or untagged HeapObject pointer,
+  // so either offset or offset + kHeapObjectTag must be a
+  // multiple of kPointerSize.
+  ASSERT(IsAligned(offset, kPointerSize) ||
+         IsAligned(offset + kHeapObjectTag, kPointerSize));
+
+  // We use optimized write barrier code if the word being written to is not in
+  // a large object chunk or is in the first page of a large object chunk.
+  // We make sure that an offset is inside the right limits whether it is
+  // tagged or untagged.
+  if ((offset > 0) && (offset < Page::kMaxHeapObjectSize - kHeapObjectTag)) {
     // Compute the bit offset in the remembered set, leave it in 'value'.
     lea(value, Operand(object, offset));
     and_(value, Page::kPageAlignmentMask);
@@ -209,7 +193,7 @@
     // If we are already generating a shared stub, not inlining the
     // record write code isn't going to save us any memory.
     if (generating_stub()) {
-      RecordWriteHelper(this, object, dst, value);
+      RecordWriteHelper(object, dst, value);
     } else {
       RecordWriteStub stub(object, dst, value);
       CallStub(&stub);
@@ -221,9 +205,9 @@
   // Clobber all input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (FLAG_debug_code) {
-    mov(object, Immediate(bit_cast<int32_t>(kZapValue)));
-    mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
-    mov(scratch, Immediate(bit_cast<int32_t>(kZapValue)));
+    mov(object, Immediate(BitCast<int32_t>(kZapValue)));
+    mov(value, Immediate(BitCast<int32_t>(kZapValue)));
+    mov(scratch, Immediate(BitCast<int32_t>(kZapValue)));
   }
 }
 
@@ -386,17 +370,23 @@
 }
 
 
-void MacroAssembler::AbortIfNotNumber(Register object, const char* msg) {
+void MacroAssembler::AbortIfNotNumber(Register object) {
   Label ok;
   test(object, Immediate(kSmiTagMask));
   j(zero, &ok);
   cmp(FieldOperand(object, HeapObject::kMapOffset),
       Factory::heap_number_map());
-  Assert(equal, msg);
+  Assert(equal, "Operand not a number");
   bind(&ok);
 }
 
 
+void MacroAssembler::AbortIfNotSmi(Register object) {
+  test(object, Immediate(kSmiTagMask));
+  Assert(equal, "Operand not a smi");
+}
+
+
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   push(ebp);
   mov(ebp, Operand(esp));
@@ -920,7 +910,9 @@
   // Set the map, length and hash field.
   mov(FieldOperand(result, HeapObject::kMapOffset),
       Immediate(Factory::string_map()));
-  mov(FieldOperand(result, String::kLengthOffset), length);
+  mov(scratch1, length);
+  SmiTag(scratch1);
+  mov(FieldOperand(result, String::kLengthOffset), scratch1);
   mov(FieldOperand(result, String::kHashFieldOffset),
       Immediate(String::kEmptyHashField));
 }
@@ -953,7 +945,9 @@
   // Set the map, length and hash field.
   mov(FieldOperand(result, HeapObject::kMapOffset),
       Immediate(Factory::ascii_string_map()));
-  mov(FieldOperand(result, String::kLengthOffset), length);
+  mov(scratch1, length);
+  SmiTag(scratch1);
+  mov(FieldOperand(result, String::kLengthOffset), scratch1);
   mov(FieldOperand(result, String::kHashFieldOffset),
       Immediate(String::kEmptyHashField));
 }
@@ -1189,15 +1183,22 @@
 }
 
 
-void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
-                                     int num_arguments,
-                                     int result_size) {
+void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
+                                               int num_arguments,
+                                               int result_size) {
   // TODO(1236192): Most runtime routines don't need the number of
   // arguments passed in because it is constant. At some point we
   // should remove this need and make the runtime routine entry code
   // smarter.
   Set(eax, Immediate(num_arguments));
-  JumpToRuntime(ext);
+  JumpToExternalReference(ext);
+}
+
+
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
+                                     int num_arguments,
+                                     int result_size) {
+  TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
 }
 
 
@@ -1267,7 +1268,7 @@
 }
 
 
-void MacroAssembler::JumpToRuntime(const ExternalReference& ext) {
+void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
   // Set the entry point and jump to the C entry runtime stub.
   mov(ebx, Immediate(ext));
   CEntryStub ces(1);
@@ -1418,16 +1419,28 @@
 
 
 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+  ASSERT(!target.is(edi));
+
+  // Load the builtins object into target register.
+  mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
+
   // Load the JavaScript builtin function from the builtins object.
-  mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
-  mov(edi, FieldOperand(edi, GlobalObject::kBuiltinsOffset));
-  int builtins_offset =
-      JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
-  mov(edi, FieldOperand(edi, builtins_offset));
-  // Load the code entry point from the function into the target register.
-  mov(target, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-  mov(target, FieldOperand(target, SharedFunctionInfo::kCodeOffset));
-  add(Operand(target), Immediate(Code::kHeaderSize - kHeapObjectTag));
+  mov(edi, FieldOperand(target, JSBuiltinsObject::OffsetOfFunctionWithId(id)));
+
+  // Load the code entry point from the builtins object.
+  mov(target, FieldOperand(target, JSBuiltinsObject::OffsetOfCodeWithId(id)));
+  if (FLAG_debug_code) {
+    // Make sure the code objects in the builtins object and in the
+    // builtin function are the same.
+    push(target);
+    mov(target, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+    mov(target, FieldOperand(target, SharedFunctionInfo::kCodeOffset));
+    cmp(target, Operand(esp, 0));
+    Assert(equal, "Builtin code object changed");
+    pop(target);
+  }
+  lea(target, FieldOperand(target, Code::kHeaderSize));
 }
 
 
@@ -1545,6 +1558,21 @@
 }
 
 
+void MacroAssembler::CheckStackAlignment() {
+  int frame_alignment = OS::ActivationFrameAlignment();
+  int frame_alignment_mask = frame_alignment - 1;
+  if (frame_alignment > kPointerSize) {
+    ASSERT(IsPowerOf2(frame_alignment));
+    Label alignment_as_expected;
+    test(esp, Immediate(frame_alignment_mask));
+    j(zero, &alignment_as_expected);
+    // Abort if stack is not aligned.
+    int3();
+    bind(&alignment_as_expected);
+  }
+}
+
+
 void MacroAssembler::Abort(const char* msg) {
   // We want to pass the msg string like a smi to avoid GC
   // problems, however msg is not guaranteed to be aligned
@@ -1575,7 +1603,7 @@
 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
     Register instance_type,
     Register scratch,
-    Label *failure) {
+    Label* failure) {
   if (!scratch.is(instance_type)) {
     mov(scratch, instance_type);
   }
@@ -1618,6 +1646,46 @@
 }
 
 
+void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
+  int frameAlignment = OS::ActivationFrameAlignment();
+  if (frameAlignment != 0) {
+    // Make stack end at alignment and make room for num_arguments words
+    // and the original value of esp.
+    mov(scratch, esp);
+    sub(Operand(esp), Immediate((num_arguments + 1) * kPointerSize));
+    ASSERT(IsPowerOf2(frameAlignment));
+    and_(esp, -frameAlignment);
+    mov(Operand(esp, num_arguments * kPointerSize), scratch);
+  } else {
+    sub(Operand(esp), Immediate(num_arguments * kPointerSize));
+  }
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+                                   int num_arguments) {
+  // Trashing eax is ok as it will be the return value.
+  mov(Operand(eax), Immediate(function));
+  CallCFunction(eax, num_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(Register function,
+                                   int num_arguments) {
+  // Check stack alignment.
+  if (FLAG_debug_code) {
+    CheckStackAlignment();
+  }
+
+  call(Operand(function));
+  if (OS::ActivationFrameAlignment() != 0) {
+    mov(esp, Operand(esp, num_arguments * kPointerSize));
+  } else {
+    add(Operand(esp), Immediate(num_arguments * sizeof(int32_t)));
+  }
+}
+
+
 CodePatcher::CodePatcher(byte* address, int size)
     : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
   // Create a new macro assembler pointing to the address of the code to patch.
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 69dc54c..c3a019b 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -48,6 +48,20 @@
   // ---------------------------------------------------------------------------
   // GC Support
 
+  // Set the remebered set bit for an address which points into an
+  // object. RecordWriteHelper only works if the object is not in new
+  // space.
+  void RecordWriteHelper(Register object,
+                         Register addr,
+                         Register scratch);
+
+  // Check if object is in new space.
+  // scratch can be object itself, but it will be clobbered.
+  void InNewSpace(Register object,
+                  Register scratch,
+                  Condition cc,  // equal for new space, not_equal otherwise.
+                  Label* branch);
+
   // Set the remembered set bit for [object+offset].
   // object is the object being stored into, value is the object being stored.
   // If offset is zero, then the scratch register contains the array index into
@@ -170,14 +184,18 @@
   // Smi tagging support.
   void SmiTag(Register reg) {
     ASSERT(kSmiTag == 0);
-    shl(reg, kSmiTagSize);
+    ASSERT(kSmiTagSize == 1);
+    add(reg, Operand(reg));
   }
   void SmiUntag(Register reg) {
     sar(reg, kSmiTagSize);
   }
 
   // Abort execution if argument is not a number. Used in debug code.
-  void AbortIfNotNumber(Register object, const char* msg);
+  void AbortIfNotNumber(Register object);
+
+  // Abort execution if argument is not a smi. Used in debug code.
+  void AbortIfNotSmi(Register object);
 
   // ---------------------------------------------------------------------------
   // Exception handling
@@ -349,7 +367,6 @@
   void StubReturn(int argc);
 
   // Call a runtime routine.
-  // Eventually this should be used for all C calls.
   void CallRuntime(Runtime::Function* f, int num_arguments);
 
   // Call a runtime function, returning the CodeStub object called.
@@ -367,12 +384,34 @@
   Object* TryCallRuntime(Runtime::FunctionId id, int num_arguments);
 
   // Tail call of a runtime routine (jump).
-  // Like JumpToRuntime, but also takes care of passing the number
-  // of arguments.
-  void TailCallRuntime(const ExternalReference& ext,
+  // Like JumpToExternalReference, but also takes care of passing the number
+  // of parameters.
+  void TailCallExternalReference(const ExternalReference& ext,
+                                 int num_arguments,
+                                 int result_size);
+
+  // Convenience function: tail call a runtime routine (jump).
+  void TailCallRuntime(Runtime::FunctionId fid,
                        int num_arguments,
                        int result_size);
 
+  // Before calling a C-function from generated code, align arguments on stack.
+  // After aligning the frame, arguments must be stored in esp[0], esp[4],
+  // etc., not pushed. The argument count assumes all arguments are word sized.
+  // Some compilers/platforms require the stack to be aligned when calling
+  // C++ code.
+  // Needs a scratch register to do some arithmetic. This register will be
+  // trashed.
+  void PrepareCallCFunction(int num_arguments, Register scratch);
+
+  // Calls a C function and cleans up the space for arguments allocated
+  // by PrepareCallCFunction. The called function is not allowed to trigger a
+  // garbage collection, since that might move the code and invalidate the
+  // return address (unless this is somehow accounted for by the called
+  // function).
+  void CallCFunction(ExternalReference function, int num_arguments);
+  void CallCFunction(Register function, int num_arguments);
+
   void PushHandleScope(Register scratch);
 
   // Pops a handle scope using the specified scratch register and
@@ -384,7 +423,7 @@
   Object* TryPopHandleScope(Register saved, Register scratch);
 
   // Jump to a runtime routine.
-  void JumpToRuntime(const ExternalReference& ext);
+  void JumpToExternalReference(const ExternalReference& ext);
 
 
   // ---------------------------------------------------------------------------
@@ -426,6 +465,9 @@
   // Print a message to stdout and abort execution.
   void Abort(const char* msg);
 
+  // Check that the stack is aligned.
+  void CheckStackAlignment();
+
   // Verify restrictions about code generated in stubs.
   void set_generating_stub(bool value) { generating_stub_ = value; }
   bool generating_stub() { return generating_stub_; }
@@ -440,7 +482,7 @@
   // for both instance type and scratch.
   void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,
                                               Register scratch,
-                                              Label *on_not_flat_ascii_string);
+                                              Label* on_not_flat_ascii_string);
 
   // Checks if both objects are sequential ASCII strings, and jumps to label
   // if either is not.
@@ -448,7 +490,7 @@
                                            Register object2,
                                            Register scratch1,
                                            Register scratch2,
-                                           Label *on_not_flat_ascii_strings);
+                                           Label* on_not_flat_ascii_strings);
 
  private:
   bool generating_stub_;
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index f6da693..fdf3b9f 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -38,7 +38,7 @@
 namespace v8 {
 namespace internal {
 
-#ifdef V8_NATIVE_REGEXP
+#ifndef V8_INTERPRETED_REGEXP
 /*
  * This assembler uses the following register assignment convention
  * - edx : current character. Must be loaded using LoadCurrentCharacter
@@ -324,8 +324,8 @@
     __ push(backtrack_stackpointer());
     __ push(ebx);
 
-    const int argument_count = 3;
-    FrameAlign(argument_count, ecx);
+    static const int argument_count = 3;
+    __ PrepareCallCFunction(argument_count, ecx);
     // Put arguments into allocated stack area, last argument highest on stack.
     // Parameters are
     //   Address byte_offset1 - Address captured substring's start.
@@ -346,7 +346,7 @@
 
     ExternalReference compare =
         ExternalReference::re_case_insensitive_compare_uc16();
-    CallCFunction(compare, argument_count);
+    __ CallCFunction(compare, argument_count);
     // Pop original values before reacting on result value.
     __ pop(ebx);
     __ pop(backtrack_stackpointer());
@@ -653,6 +653,8 @@
   __ j(not_zero, &exit_label_);
 
   __ bind(&stack_ok);
+  // Load start index for later use.
+  __ mov(ebx, Operand(ebp, kStartIndex));
 
   // Allocate space on stack for registers.
   __ sub(Operand(esp), Immediate(num_registers_ * kPointerSize));
@@ -662,17 +664,23 @@
   __ mov(edi, Operand(ebp, kInputStart));
   // Set up edi to be negative offset from string end.
   __ sub(edi, Operand(esi));
-  // Set eax to address of char before start of input
+
+  // Set eax to address of char before start of the string.
   // (effectively string position -1).
-  __ lea(eax, Operand(edi, -char_size()));
+  __ neg(ebx);
+  if (mode_ == UC16) {
+    __ lea(eax, Operand(edi, ebx, times_2, -char_size()));
+  } else {
+    __ lea(eax, Operand(edi, ebx, times_1, -char_size()));
+  }
   // Store this value in a local variable, for use when clearing
   // position registers.
   __ mov(Operand(ebp, kInputStartMinusOne), eax);
 
   // Determine whether the start index is zero, that is at the start of the
   // string, and store that value in a local variable.
-  __ mov(ebx, Operand(ebp, kStartIndex));
   __ xor_(Operand(ecx), ecx);  // setcc only operates on cl (lower byte of ecx).
+  // Register ebx still holds -stringIndex.
   __ test(ebx, Operand(ebx));
   __ setcc(zero, ecx);  // 1 if 0 (start of string), 0 if positive.
   __ mov(Operand(ebp, kAtStart), ecx);
@@ -721,10 +729,17 @@
       // copy captures to output
       __ mov(ebx, Operand(ebp, kRegisterOutput));
       __ mov(ecx, Operand(ebp, kInputEnd));
+      __ mov(edx, Operand(ebp, kStartIndex));
       __ sub(ecx, Operand(ebp, kInputStart));
+      if (mode_ == UC16) {
+        __ lea(ecx, Operand(ecx, edx, times_2, 0));
+      } else {
+        __ add(ecx, Operand(edx));
+      }
       for (int i = 0; i < num_saved_registers_; i++) {
         __ mov(eax, register_location(i));
-        __ add(eax, Operand(ecx));  // Convert to index from start, not end.
+        // Convert to index from start of string, not end.
+        __ add(eax, Operand(ecx));
         if (mode_ == UC16) {
           __ sar(eax, 1);  // Convert byte index to character index.
         }
@@ -784,13 +799,13 @@
     __ push(edi);
 
     // Call GrowStack(backtrack_stackpointer())
-    int num_arguments = 2;
-    FrameAlign(num_arguments, ebx);
+    static const int num_arguments = 2;
+    __ PrepareCallCFunction(num_arguments, ebx);
     __ lea(eax, Operand(ebp, kStackHighEnd));
     __ mov(Operand(esp, 1 * kPointerSize), eax);
     __ mov(Operand(esp, 0 * kPointerSize), backtrack_stackpointer());
     ExternalReference grow_stack = ExternalReference::re_grow_stack();
-    CallCFunction(grow_stack, num_arguments);
+    __ CallCFunction(grow_stack, num_arguments);
     // If return NULL, we have failed to grow the stack, and
     // must exit with a stack-overflow exception.
     __ or_(eax, Operand(eax));
@@ -817,7 +832,7 @@
                                        NULL,
                                        Code::ComputeFlags(Code::REGEXP),
                                        masm_->CodeObject());
-  LOG(RegExpCodeCreateEvent(*code, *source));
+  PROFILE(RegExpCodeCreateEvent(*code, *source));
   return Handle<Object>::cast(code);
 }
 
@@ -951,8 +966,8 @@
 // Private methods:
 
 void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) {
-  int num_arguments = 3;
-  FrameAlign(num_arguments, scratch);
+  static const int num_arguments = 3;
+  __ PrepareCallCFunction(num_arguments, scratch);
   // RegExp code frame pointer.
   __ mov(Operand(esp, 2 * kPointerSize), ebp);
   // Code* of self.
@@ -962,7 +977,7 @@
   __ mov(Operand(esp, 0 * kPointerSize), eax);
   ExternalReference check_stack_guard =
       ExternalReference::re_check_stack_guard_state();
-  CallCFunction(check_stack_guard, num_arguments);
+  __ CallCFunction(check_stack_guard, num_arguments);
 }
 
 
@@ -1087,19 +1102,22 @@
 
 
 void RegExpMacroAssemblerIA32::SafeCall(Label* to) {
-  __ call(to);
+  Label return_to;
+  __ push(Immediate::CodeRelativeOffset(&return_to));
+  __ jmp(to);
+  __ bind(&return_to);
 }
 
 
 void RegExpMacroAssemblerIA32::SafeReturn() {
-  __ add(Operand(esp, 0), Immediate(masm_->CodeObject()));
-  __ ret(0);
+  __ pop(ebx);
+  __ add(Operand(ebx), Immediate(masm_->CodeObject()));
+  __ jmp(Operand(ebx));
 }
 
 
 void RegExpMacroAssemblerIA32::SafeCallTarget(Label* name) {
   __ bind(name);
-  __ sub(Operand(esp, 0), Immediate(masm_->CodeObject()));
 }
 
 
@@ -1153,37 +1171,6 @@
 }
 
 
-void RegExpMacroAssemblerIA32::FrameAlign(int num_arguments, Register scratch) {
-  // TODO(lrn): Since we no longer use the system stack arbitrarily (but we do
-  // use it, e.g., for SafeCall), we know the number of elements on the stack
-  // since the last frame alignment. We might be able to do this simpler then.
-  int frameAlignment = OS::ActivationFrameAlignment();
-  if (frameAlignment != 0) {
-    // Make stack end at alignment and make room for num_arguments words
-    // and the original value of esp.
-    __ mov(scratch, esp);
-    __ sub(Operand(esp), Immediate((num_arguments + 1) * kPointerSize));
-    ASSERT(IsPowerOf2(frameAlignment));
-    __ and_(esp, -frameAlignment);
-    __ mov(Operand(esp, num_arguments * kPointerSize), scratch);
-  } else {
-    __ sub(Operand(esp), Immediate(num_arguments * kPointerSize));
-  }
-}
-
-
-void RegExpMacroAssemblerIA32::CallCFunction(ExternalReference function,
-                                             int num_arguments) {
-  __ mov(Operand(eax), Immediate(function));
-  __ call(Operand(eax));
-  if (OS::ActivationFrameAlignment() != 0) {
-    __ mov(esp, Operand(esp, num_arguments * kPointerSize));
-  } else {
-    __ add(Operand(esp), Immediate(num_arguments * sizeof(int32_t)));
-  }
-}
-
-
 void RegExpMacroAssemblerIA32::LoadCurrentCharacterUnchecked(int cp_offset,
                                                              int characters) {
   if (mode_ == ASCII) {
@@ -1211,6 +1198,6 @@
 
 #undef __
 
-#endif  // V8_NATIVE_REGEXP
+#endif  // V8_INTERPRETED_REGEXP
 
 }}  // namespace v8::internal
diff --git a/src/ia32/regexp-macro-assembler-ia32.h b/src/ia32/regexp-macro-assembler-ia32.h
index d9866b7..823bc03 100644
--- a/src/ia32/regexp-macro-assembler-ia32.h
+++ b/src/ia32/regexp-macro-assembler-ia32.h
@@ -31,14 +31,14 @@
 namespace v8 {
 namespace internal {
 
-#ifndef V8_NATIVE_REGEXP
+#ifdef V8_INTERPRETED_REGEXP
 class RegExpMacroAssemblerIA32: public RegExpMacroAssembler {
  public:
   RegExpMacroAssemblerIA32() { }
   virtual ~RegExpMacroAssemblerIA32() { }
 };
 
-#else
+#else  // V8_INTERPRETED_REGEXP
 class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
  public:
   RegExpMacroAssemblerIA32(Mode mode, int registers_to_save);
@@ -187,21 +187,6 @@
   // (ecx) and increments it by a word size.
   inline void Pop(Register target);
 
-  // Before calling a C-function from generated code, align arguments on stack.
-  // After aligning the frame, arguments must be stored in esp[0], esp[4],
-  // etc., not pushed. The argument count assumes all arguments are word sized.
-  // Some compilers/platforms require the stack to be aligned when calling
-  // C++ code.
-  // Needs a scratch register to do some arithmetic. This register will be
-  // trashed.
-  inline void FrameAlign(int num_arguments, Register scratch);
-
-  // Calls a C function and cleans up the space for arguments allocated
-  // by FrameAlign. The called function is not allowed to trigger a garbage
-  // collection, since that might move the code and invalidate the return
-  // address (unless this is somehow accounted for).
-  inline void CallCFunction(ExternalReference function, int num_arguments);
-
   MacroAssembler* masm_;
 
   // Which mode to generate code for (ASCII or UC16).
@@ -223,7 +208,7 @@
   Label check_preempt_label_;
   Label stack_overflow_label_;
 };
-#endif  // V8_NATIVE_REGEXP
+#endif  // V8_INTERPRETED_REGEXP
 
 }}  // namespace v8::internal
 
diff --git a/src/ia32/register-allocator-ia32.cc b/src/ia32/register-allocator-ia32.cc
index 0bad87d..73fefb3 100644
--- a/src/ia32/register-allocator-ia32.cc
+++ b/src/ia32/register-allocator-ia32.cc
@@ -29,6 +29,7 @@
 
 #include "codegen-inl.h"
 #include "register-allocator-inl.h"
+#include "virtual-frame-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -41,13 +42,40 @@
   if (is_constant()) {
     Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate();
     ASSERT(fresh.is_valid());
-    if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
+    if (is_untagged_int32()) {
+      fresh.set_untagged_int32(true);
+      if (handle()->IsSmi()) {
+      CodeGeneratorScope::Current()->masm()->Set(
+          fresh.reg(),
+          Immediate(Smi::cast(*handle())->value()));
+      } else if (handle()->IsHeapNumber()) {
+        double double_value = HeapNumber::cast(*handle())->value();
+        int32_t value = DoubleToInt32(double_value);
+        if (double_value == 0 && signbit(double_value)) {
+          // Negative zero must not be converted to an int32 unless
+          // the context allows it.
+          CodeGeneratorScope::Current()->unsafe_bailout_->Branch(equal);
+          CodeGeneratorScope::Current()->unsafe_bailout_->Branch(not_equal);
+        } else if (double_value == value) {
+          CodeGeneratorScope::Current()->masm()->Set(
+              fresh.reg(), Immediate(value));
+        } else {
+          CodeGeneratorScope::Current()->unsafe_bailout_->Branch(equal);
+          CodeGeneratorScope::Current()->unsafe_bailout_->Branch(not_equal);
+        }
+      } else {
+        // Constant is not a number.  This was not predicted by AST analysis.
+        CodeGeneratorScope::Current()->unsafe_bailout_->Branch(equal);
+        CodeGeneratorScope::Current()->unsafe_bailout_->Branch(not_equal);
+      }
+    } else if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
       CodeGeneratorScope::Current()->MoveUnsafeSmi(fresh.reg(), handle());
     } else {
       CodeGeneratorScope::Current()->masm()->Set(fresh.reg(),
                                                  Immediate(handle()));
     }
     // This result becomes a copy of the fresh one.
+    fresh.set_type_info(type_info());
     *this = fresh;
   }
   ASSERT(is_register());
@@ -63,13 +91,39 @@
       CodeGeneratorScope::Current()->masm()->mov(fresh.reg(), reg());
     } else {
       ASSERT(is_constant());
-      if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
-        CodeGeneratorScope::Current()->MoveUnsafeSmi(fresh.reg(), handle());
+      if (is_untagged_int32()) {
+        if (handle()->IsSmi()) {
+          CodeGeneratorScope::Current()->masm()->Set(
+              fresh.reg(),
+              Immediate(Smi::cast(*handle())->value()));
+        } else {
+          ASSERT(handle()->IsHeapNumber());
+          double double_value = HeapNumber::cast(*handle())->value();
+          int32_t value = DoubleToInt32(double_value);
+          if (double_value == 0 && signbit(double_value)) {
+            // Negative zero must not be converted to an int32 unless
+            // the context allows it.
+            CodeGeneratorScope::Current()->unsafe_bailout_->Branch(equal);
+            CodeGeneratorScope::Current()->unsafe_bailout_->Branch(not_equal);
+          } else if (double_value == value) {
+            CodeGeneratorScope::Current()->masm()->Set(
+                fresh.reg(), Immediate(value));
+          } else {
+            CodeGeneratorScope::Current()->unsafe_bailout_->Branch(equal);
+            CodeGeneratorScope::Current()->unsafe_bailout_->Branch(not_equal);
+          }
+        }
       } else {
-        CodeGeneratorScope::Current()->masm()->Set(fresh.reg(),
-                                                   Immediate(handle()));
+        if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
+          CodeGeneratorScope::Current()->MoveUnsafeSmi(fresh.reg(), handle());
+        } else {
+          CodeGeneratorScope::Current()->masm()->Set(fresh.reg(),
+                                                     Immediate(handle()));
+        }
       }
     }
+    fresh.set_type_info(type_info());
+    fresh.set_untagged_int32(is_untagged_int32());
     *this = fresh;
   } else if (is_register() && reg().is(target)) {
     ASSERT(CodeGeneratorScope::Current()->has_valid_frame());
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 5729d9d..809228c 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -221,7 +221,6 @@
 
   // Load length from the string and convert to a smi.
   __ mov(eax, FieldOperand(receiver, String::kLengthOffset));
-  __ SmiTag(eax);
   __ ret(0);
 
   // Check if the object is a JSValue wrapper.
@@ -234,7 +233,6 @@
   __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
   GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
   __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
-  __ SmiTag(eax);
   __ ret(0);
 }
 
@@ -276,14 +274,15 @@
                                      Register holder,
                                      Register name,
                                      JSObject* holder_obj) {
-  __ push(receiver);
-  __ push(holder);
   __ push(name);
   InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
   ASSERT(!Heap::InNewSpace(interceptor));
-  __ mov(receiver, Immediate(Handle<Object>(interceptor)));
+  Register scratch = name;
+  __ mov(scratch, Immediate(Handle<Object>(interceptor)));
+  __ push(scratch);
   __ push(receiver);
-  __ push(FieldOperand(receiver, InterceptorInfo::kDataOffset));
+  __ push(holder);
+  __ push(FieldOperand(scratch, InterceptorInfo::kDataOffset));
 }
 
 
@@ -446,7 +445,7 @@
 
       ExternalReference ref =
           ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
-      __ TailCallRuntime(ref, 5, 1);
+      __ TailCallExternalReference(ref, 5, 1);
 
       __ bind(&cleanup);
       __ pop(scratch1);
@@ -468,7 +467,7 @@
 
     ExternalReference ref = ExternalReference(
         IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
-    __ TailCallRuntime(ref, 5, 1);
+    __ TailCallExternalReference(ref, 5, 1);
   }
 
  private:
@@ -476,108 +475,6 @@
 };
 
 
-// Holds information about possible function call optimizations.
-class CallOptimization BASE_EMBEDDED {
- public:
-  explicit CallOptimization(LookupResult* lookup)
-    : constant_function_(NULL),
-      is_simple_api_call_(false),
-      expected_receiver_type_(NULL),
-      api_call_info_(NULL) {
-    if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
-
-    // We only optimize constant function calls.
-    if (lookup->type() != CONSTANT_FUNCTION) return;
-
-    Initialize(lookup->GetConstantFunction());
-  }
-
-  explicit CallOptimization(JSFunction* function) {
-    Initialize(function);
-  }
-
-  bool is_constant_call() const {
-    return constant_function_ != NULL;
-  }
-
-  JSFunction* constant_function() const {
-    ASSERT(constant_function_ != NULL);
-    return constant_function_;
-  }
-
-  bool is_simple_api_call() const {
-    return is_simple_api_call_;
-  }
-
-  FunctionTemplateInfo* expected_receiver_type() const {
-    ASSERT(is_simple_api_call_);
-    return expected_receiver_type_;
-  }
-
-  CallHandlerInfo* api_call_info() const {
-    ASSERT(is_simple_api_call_);
-    return api_call_info_;
-  }
-
-  // Returns the depth of the object having the expected type in the
-  // prototype chain between the two arguments.
-  int GetPrototypeDepthOfExpectedType(JSObject* object,
-                                      JSObject* holder) const {
-    ASSERT(is_simple_api_call_);
-    if (expected_receiver_type_ == NULL) return 0;
-    int depth = 0;
-    while (object != holder) {
-      if (object->IsInstanceOf(expected_receiver_type_)) return depth;
-      object = JSObject::cast(object->GetPrototype());
-      ++depth;
-    }
-    if (holder->IsInstanceOf(expected_receiver_type_)) return depth;
-    return kInvalidProtoDepth;
-  }
-
- private:
-  void Initialize(JSFunction* function) {
-    if (!function->is_compiled()) return;
-
-    constant_function_ = function;
-    is_simple_api_call_ = false;
-
-    AnalyzePossibleApiFunction(function);
-  }
-
-  // Determines whether the given function can be called using the
-  // fast api call builtin.
-  void AnalyzePossibleApiFunction(JSFunction* function) {
-    SharedFunctionInfo* sfi = function->shared();
-    if (sfi->function_data()->IsUndefined()) return;
-    FunctionTemplateInfo* info =
-        FunctionTemplateInfo::cast(sfi->function_data());
-
-    // Require a C++ callback.
-    if (info->call_code()->IsUndefined()) return;
-    api_call_info_ = CallHandlerInfo::cast(info->call_code());
-
-    // Accept signatures that either have no restrictions at all or
-    // only have restrictions on the receiver.
-    if (!info->signature()->IsUndefined()) {
-      SignatureInfo* signature = SignatureInfo::cast(info->signature());
-      if (!signature->args()->IsUndefined()) return;
-      if (!signature->receiver()->IsUndefined()) {
-        expected_receiver_type_ =
-            FunctionTemplateInfo::cast(signature->receiver());
-      }
-    }
-
-    is_simple_api_call_ = true;
-  }
-
-  JSFunction* constant_function_;
-  bool is_simple_api_call_;
-  FunctionTemplateInfo* expected_receiver_type_;
-  CallHandlerInfo* api_call_info_;
-};
-
-
 // Reserves space for the extra arguments to FastHandleApiCall in the
 // caller's frame.
 //
@@ -698,8 +595,7 @@
 
     CallOptimization optimization(lookup);
 
-    if (optimization.is_constant_call() &&
-        !Top::CanHaveSpecialFunctions(holder)) {
+    if (optimization.is_constant_call()) {
       CompileCacheable(masm,
                        object,
                        receiver,
@@ -907,7 +803,7 @@
     __ push(Immediate(Handle<Map>(transition)));
     __ push(eax);
     __ push(scratch);
-    __ TailCallRuntime(
+    __ TailCallExternalReference(
         ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage)), 3, 1);
     return;
   }
@@ -951,6 +847,26 @@
 }
 
 
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+static Object* GenerateCheckPropertyCell(MacroAssembler* masm,
+                                         GlobalObject* global,
+                                         String* name,
+                                         Register scratch,
+                                         Label* miss) {
+  Object* probe = global->EnsurePropertyCell(name);
+  if (probe->IsFailure()) return probe;
+  JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+  ASSERT(cell->value()->IsTheHole());
+  __ mov(scratch, Immediate(Handle<Object>(cell)));
+  __ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
+         Immediate(Factory::the_hole_value()));
+  __ j(not_equal, miss, not_taken);
+  return cell;
+}
+
+
 #undef __
 #define __ ACCESS_MASM(masm())
 
@@ -969,21 +885,19 @@
                         push_at_depth, miss);
 
   // If we've skipped any global objects, it's not enough to verify
-  // that their maps haven't changed.
+  // that their maps haven't changed.  We also need to check that the
+  // property cell for the property is still empty.
   while (object != holder) {
     if (object->IsGlobalObject()) {
-      GlobalObject* global = GlobalObject::cast(object);
-      Object* probe = global->EnsurePropertyCell(name);
-      if (probe->IsFailure()) {
-        set_failure(Failure::cast(probe));
+      Object* cell = GenerateCheckPropertyCell(masm(),
+                                               GlobalObject::cast(object),
+                                               name,
+                                               scratch,
+                                               miss);
+      if (cell->IsFailure()) {
+        set_failure(Failure::cast(cell));
         return result;
       }
-      JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
-      ASSERT(cell->value()->IsTheHole());
-      __ mov(scratch, Immediate(Handle<Object>(cell)));
-      __ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
-             Immediate(Factory::the_hole_value()));
-      __ j(not_equal, miss, not_taken);
     }
     object = JSObject::cast(object->GetPrototype());
   }
@@ -1047,17 +961,16 @@
   __ push(receiver);  // receiver
   __ push(reg);  // holder
   __ mov(other, Immediate(callback_handle));
-  __ push(other);
   __ push(FieldOperand(other, AccessorInfo::kDataOffset));  // data
   __ push(name_reg);  // name
   // Save a pointer to where we pushed the arguments pointer.
-  // This will be passed as the const Arguments& to the C++ callback.
+  // This will be passed as the const AccessorInfo& to the C++ callback.
   __ mov(eax, esp);
-  __ add(Operand(eax), Immediate(5 * kPointerSize));
+  __ add(Operand(eax), Immediate(4 * kPointerSize));
   __ mov(ebx, esp);
 
   // Do call through the api.
-  ASSERT_EQ(6, ApiGetterEntryStub::kStackSpace);
+  ASSERT_EQ(5, ApiGetterEntryStub::kStackSpace);
   Address getter_address = v8::ToCData<Address>(callback->getter());
   ApiFunction fun(getter_address);
   ApiGetterEntryStub stub(callback_handle, &fun);
@@ -1211,6 +1124,246 @@
 }
 
 
+Object* CallStubCompiler::CompileArrayPushCall(Object* object,
+                                               JSObject* holder,
+                                               JSFunction* function,
+                                               String* name,
+                                               CheckType check) {
+  // ----------- S t a t e -------------
+  //  -- ecx                 : name
+  //  -- esp[0]              : return address
+  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- esp[(argc + 1) * 4] : receiver
+  // -----------------------------------
+  ASSERT(check == RECEIVER_MAP_CHECK);
+
+  // If object is not an array, bail out to regular call.
+  if (!object->IsJSArray()) {
+    return Heap::undefined_value();
+  }
+
+  Label miss;
+
+  // Get the receiver from the stack.
+  const int argc = arguments().immediate();
+  __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &miss);
+
+  CheckPrototypes(JSObject::cast(object), edx,
+                  holder, ebx,
+                  eax, name, &miss);
+
+  if (argc == 0) {
+    // Noop, return the length.
+    __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
+    __ ret((argc + 1) * kPointerSize);
+  } else {
+    // Get the elements array of the object.
+    __ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
+
+    // Check that the elements are in fast mode (not dictionary).
+    __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+           Immediate(Factory::fixed_array_map()));
+    __ j(not_equal, &miss);
+
+    if (argc == 1) {  // Otherwise fall through to call builtin.
+      Label call_builtin, exit, with_rset_update, attempt_to_grow_elements;
+
+      // Get the array's length into eax and calculate new length.
+      __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
+      STATIC_ASSERT(kSmiTagSize == 1);
+      STATIC_ASSERT(kSmiTag == 0);
+      __ add(Operand(eax), Immediate(Smi::FromInt(argc)));
+
+      // Get the element's length into ecx.
+      __ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
+      __ SmiTag(ecx);
+
+      // Check if we could survive without allocation.
+      __ cmp(eax, Operand(ecx));
+      __ j(greater, &attempt_to_grow_elements);
+
+      // Save new length.
+      __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
+
+      // Push the element.
+      __ lea(edx, FieldOperand(ebx,
+                               eax, times_half_pointer_size,
+                               FixedArray::kHeaderSize - argc * kPointerSize));
+      __ mov(ecx, Operand(esp, argc * kPointerSize));
+      __ mov(Operand(edx, 0), ecx);
+
+      // Check if value is a smi.
+      __ test(ecx, Immediate(kSmiTagMask));
+      __ j(not_zero, &with_rset_update);
+
+      __ bind(&exit);
+      __ ret((argc + 1) * kPointerSize);
+
+      __ bind(&with_rset_update);
+
+      __ InNewSpace(ebx, ecx, equal, &exit);
+
+      RecordWriteStub stub(ebx, edx, ecx);
+      __ CallStub(&stub);
+      __ ret((argc + 1) * kPointerSize);
+
+      __ bind(&attempt_to_grow_elements);
+      ExternalReference new_space_allocation_top =
+          ExternalReference::new_space_allocation_top_address();
+      ExternalReference new_space_allocation_limit =
+          ExternalReference::new_space_allocation_limit_address();
+
+      const int kAllocationDelta = 4;
+      // Load top.
+      __ mov(ecx, Operand::StaticVariable(new_space_allocation_top));
+
+      // Check if it's the end of elements.
+      __ lea(edx, FieldOperand(ebx,
+                               eax, times_half_pointer_size,
+                               FixedArray::kHeaderSize - argc * kPointerSize));
+      __ cmp(edx, Operand(ecx));
+      __ j(not_equal, &call_builtin);
+      __ add(Operand(ecx), Immediate(kAllocationDelta * kPointerSize));
+      __ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
+      __ j(above, &call_builtin);
+
+      // We fit and could grow elements.
+      __ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
+      __ mov(ecx, Operand(esp, argc * kPointerSize));
+
+      // Push the argument...
+      __ mov(Operand(edx, 0), ecx);
+      // ... and fill the rest with holes.
+      for (int i = 1; i < kAllocationDelta; i++) {
+        __ mov(Operand(edx, i * kPointerSize),
+               Immediate(Factory::the_hole_value()));
+      }
+
+      // Restore receiver to edx as finish sequence assumes it's here.
+      __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+
+      // Increment element's and array's sizes.
+      __ add(FieldOperand(ebx, FixedArray::kLengthOffset),
+             Immediate(kAllocationDelta));
+      __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
+
+      // Elements are in new space, so no remembered set updates are necessary.
+      __ ret((argc + 1) * kPointerSize);
+
+      __ bind(&call_builtin);
+    }
+
+    __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush),
+                                 argc + 1,
+                                 1);
+  }
+
+  __ bind(&miss);
+
+  Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  String* function_name = NULL;
+  if (function->shared()->name()->IsString()) {
+    function_name = String::cast(function->shared()->name());
+  }
+  return GetCode(CONSTANT_FUNCTION, function_name);
+}
+
+
+Object* CallStubCompiler::CompileArrayPopCall(Object* object,
+                                              JSObject* holder,
+                                              JSFunction* function,
+                                              String* name,
+                                              CheckType check) {
+  // ----------- S t a t e -------------
+  //  -- ecx                 : name
+  //  -- esp[0]              : return address
+  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- esp[(argc + 1) * 4] : receiver
+  // -----------------------------------
+  ASSERT(check == RECEIVER_MAP_CHECK);
+
+  // If object is not an array, bail out to regular call.
+  if (!object->IsJSArray()) {
+    return Heap::undefined_value();
+  }
+
+  Label miss, return_undefined, call_builtin;
+
+  // Get the receiver from the stack.
+  const int argc = arguments().immediate();
+  __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &miss);
+  CheckPrototypes(JSObject::cast(object), edx,
+                  holder, ebx,
+                  eax, name, &miss);
+
+  // Get the elements array of the object.
+  __ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
+
+  // Check that the elements are in fast mode (not dictionary).
+  __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+         Immediate(Factory::fixed_array_map()));
+  __ j(not_equal, &miss);
+
+  // Get the array's length into ecx and calculate new length.
+  __ mov(ecx, FieldOperand(edx, JSArray::kLengthOffset));
+  __ sub(Operand(ecx), Immediate(Smi::FromInt(1)));
+  __ j(negative, &return_undefined);
+
+  // Get the last element.
+  STATIC_ASSERT(kSmiTagSize == 1);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ mov(eax, FieldOperand(ebx,
+                           ecx, times_half_pointer_size,
+                           FixedArray::kHeaderSize));
+  __ cmp(Operand(eax), Immediate(Factory::the_hole_value()));
+  __ j(equal, &call_builtin);
+
+  // Set the array's length.
+  __ mov(FieldOperand(edx, JSArray::kLengthOffset), ecx);
+
+  // Fill with the hole.
+  __ mov(FieldOperand(ebx,
+                      ecx, times_half_pointer_size,
+                      FixedArray::kHeaderSize),
+         Immediate(Factory::the_hole_value()));
+  __ ret((argc + 1) * kPointerSize);
+
+  __ bind(&return_undefined);
+  __ mov(eax, Immediate(Factory::undefined_value()));
+  __ ret((argc + 1) * kPointerSize);
+
+  __ bind(&call_builtin);
+  __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop),
+                               argc + 1,
+                               1);
+
+  __ bind(&miss);
+
+  Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  String* function_name = NULL;
+  if (function->shared()->name()->IsString()) {
+    function_name = String::cast(function->shared()->name());
+  }
+  return GetCode(CONSTANT_FUNCTION, function_name);
+}
+
+
 Object* CallStubCompiler::CompileCallConstant(Object* object,
                                               JSObject* holder,
                                               JSFunction* function,
@@ -1223,7 +1376,19 @@
   //  -- ...
   //  -- esp[(argc + 1) * 4] : receiver
   // -----------------------------------
-  Label miss;
+
+  SharedFunctionInfo* function_info = function->shared();
+  if (function_info->HasCustomCallGenerator()) {
+    CustomCallGenerator generator =
+        ToCData<CustomCallGenerator>(function_info->function_data());
+    Object* result = generator(this, object, holder, function, name, check);
+    // undefined means bail out to regular compiler.
+    if (!result->IsUndefined()) {
+      return result;
+    }
+  }
+
+  Label miss_in_smi_check;
 
   // Get the receiver from the stack.
   const int argc = arguments().immediate();
@@ -1232,7 +1397,7 @@
   // Check that the receiver isn't a smi.
   if (check != NUMBER_CHECK) {
     __ test(edx, Immediate(kSmiTagMask));
-    __ j(zero, &miss, not_taken);
+    __ j(zero, &miss_in_smi_check, not_taken);
   }
 
   // Make sure that it's okay not to patch the on stack receiver
@@ -1241,6 +1406,7 @@
 
   CallOptimization optimization(function);
   int depth = kInvalidProtoDepth;
+  Label miss;
 
   switch (check) {
     case RECEIVER_MAP_CHECK:
@@ -1332,18 +1498,6 @@
       break;
     }
 
-    case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
-      CheckPrototypes(JSObject::cast(object), edx, holder,
-                      ebx, eax, name, &miss);
-      // Make sure object->HasFastElements().
-      // Get the elements array of the object.
-      __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
-      // Check that the object is in fast mode (not dictionary).
-      __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
-             Immediate(Factory::fixed_array_map()));
-      __ j(not_equal, &miss, not_taken);
-      break;
-
     default:
       UNREACHABLE();
   }
@@ -1359,6 +1513,7 @@
   if (depth != kInvalidProtoDepth) {
     FreeSpaceForFastApiCall(masm(), eax);
   }
+  __ bind(&miss_in_smi_check);
   Handle<Code> ic = ComputeCallMiss(arguments().immediate());
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
@@ -1587,7 +1742,7 @@
   // Do tail-call to the runtime system.
   ExternalReference store_callback_property =
       ExternalReference(IC_Utility(IC::kStoreCallbackProperty));
-  __ TailCallRuntime(store_callback_property, 4, 1);
+  __ TailCallExternalReference(store_callback_property, 4, 1);
 
   // Handle store cache miss.
   __ bind(&miss);
@@ -1636,7 +1791,7 @@
   // Do tail-call to the runtime system.
   ExternalReference store_ic_property =
       ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
-  __ TailCallRuntime(store_ic_property, 3, 1);
+  __ TailCallExternalReference(store_ic_property, 3, 1);
 
   // Handle store cache miss.
   __ bind(&miss);
@@ -1689,23 +1844,18 @@
                                                   String* name) {
   // ----------- S t a t e -------------
   //  -- eax    : value
+  //  -- ecx    : key
+  //  -- edx    : receiver
   //  -- esp[0] : return address
-  //  -- esp[4] : key
-  //  -- esp[8] : receiver
   // -----------------------------------
   Label miss;
 
   __ IncrementCounter(&Counters::keyed_store_field, 1);
 
-  // Get the name from the stack.
-  __ mov(ecx, Operand(esp, 1 * kPointerSize));
   // Check that the name has not changed.
   __ cmp(Operand(ecx), Immediate(Handle<String>(name)));
   __ j(not_equal, &miss, not_taken);
 
-  // Get the object from the stack.
-  __ mov(edx, Operand(esp, 2 * kPointerSize));
-
   // Generate store field code.  Trashes the name register.
   GenerateStoreField(masm(),
                      object,
@@ -1725,6 +1875,48 @@
 }
 
 
+Object* LoadStubCompiler::CompileLoadNonexistent(String* name,
+                                                 JSObject* object,
+                                                 JSObject* last) {
+  // ----------- S t a t e -------------
+  //  -- eax    : receiver
+  //  -- ecx    : name
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label miss;
+
+  // Check that the receiver isn't a smi.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+
+  // Check the maps of the full prototype chain. Also check that
+  // global property cells up to (but not including) the last object
+  // in the prototype chain are empty.
+  CheckPrototypes(object, eax, last, ebx, edx, name, &miss);
+
+  // If the last object in the prototype chain is a global object,
+  // check that the global property cell is empty.
+  if (last->IsGlobalObject()) {
+    Object* cell = GenerateCheckPropertyCell(masm(),
+                                             GlobalObject::cast(last),
+                                             name,
+                                             edx,
+                                             &miss);
+    if (cell->IsFailure()) return cell;
+  }
+
+  // Return undefined if maps of the full prototype chain are still the
+  // same and no global property with this name contains a value.
+  __ mov(eax, Factory::undefined_value());
+  __ ret(0);
+
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(NONEXISTENT, Heap::empty_string());
+}
+
 
 Object* LoadStubCompiler::CompileLoadField(JSObject* object,
                                            JSObject* holder,
diff --git a/src/ia32/virtual-frame-ia32.cc b/src/ia32/virtual-frame-ia32.cc
index 7df028e..10aaa52 100644
--- a/src/ia32/virtual-frame-ia32.cc
+++ b/src/ia32/virtual-frame-ia32.cc
@@ -30,29 +30,13 @@
 #include "codegen-inl.h"
 #include "register-allocator-inl.h"
 #include "scopes.h"
+#include "virtual-frame-inl.h"
 
 namespace v8 {
 namespace internal {
 
 #define __ ACCESS_MASM(masm())
 
-// -------------------------------------------------------------------------
-// VirtualFrame implementation.
-
-// On entry to a function, the virtual frame already contains the receiver,
-// the parameters, and a return address.  All frame elements are in memory.
-VirtualFrame::VirtualFrame()
-    : elements_(parameter_count() + local_count() + kPreallocatedElements),
-      stack_pointer_(parameter_count() + 1) {  // 0-based index of TOS.
-  for (int i = 0; i <= stack_pointer_; i++) {
-    elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown));
-  }
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    register_locations_[i] = kIllegalIndex;
-  }
-}
-
-
 void VirtualFrame::SyncElementBelowStackPointer(int index) {
   // Emit code to write elements below the stack pointer to their
   // (already allocated) stack address.
@@ -178,7 +162,7 @@
     if (element.is_constant() || element.is_copy()) {
       if (element.is_synced()) {
         // Just spill.
-        elements_[i] = FrameElement::MemoryElement(NumberInfo::kUnknown);
+        elements_[i] = FrameElement::MemoryElement(TypeInfo::Unknown());
       } else {
         // Allocate to a register.
         FrameElement backing_element;  // Invalid if not a copy.
@@ -190,7 +174,7 @@
         elements_[i] =
             FrameElement::RegisterElement(fresh.reg(),
                                           FrameElement::NOT_SYNCED,
-                                          NumberInfo::kUnknown);
+                                          TypeInfo::Unknown());
         Use(fresh.reg(), i);
 
         // Emit a move.
@@ -223,7 +207,7 @@
       // The copy flag is not relied on before the end of this loop,
       // including when registers are spilled.
       elements_[i].clear_copied();
-      elements_[i].set_number_info(NumberInfo::kUnknown);
+      elements_[i].set_type_info(TypeInfo::Unknown());
     }
   }
 }
@@ -613,12 +597,12 @@
     elements_[new_backing_index] =
         FrameElement::RegisterElement(backing_reg,
                                       FrameElement::SYNCED,
-                                      original.number_info());
+                                      original.type_info());
   } else {
     elements_[new_backing_index] =
         FrameElement::RegisterElement(backing_reg,
                                       FrameElement::NOT_SYNCED,
-                                      original.number_info());
+                                      original.type_info());
   }
   // Update the other copies.
   for (int i = new_backing_index + 1; i < element_count(); i++) {
@@ -650,7 +634,7 @@
       FrameElement new_element =
           FrameElement::RegisterElement(fresh.reg(),
                                         FrameElement::NOT_SYNCED,
-                                        original.number_info());
+                                        original.type_info());
       Use(fresh.reg(), element_count());
       elements_.Add(new_element);
       __ mov(fresh.reg(), Operand(ebp, fp_relative(index)));
@@ -791,6 +775,89 @@
 }
 
 
+void VirtualFrame::UntaggedPushFrameSlotAt(int index) {
+  ASSERT(index >= 0);
+  ASSERT(index <= element_count());
+  FrameElement original = elements_[index];
+  if (original.is_copy()) {
+    original = elements_[original.index()];
+    index = original.index();
+  }
+
+  switch (original.type()) {
+    case FrameElement::MEMORY:
+    case FrameElement::REGISTER:  {
+      Label done;
+      // Emit code to load the original element's data into a register.
+      // Push that register as a FrameElement on top of the frame.
+      Result fresh = cgen()->allocator()->Allocate();
+      ASSERT(fresh.is_valid());
+      Register fresh_reg = fresh.reg();
+      FrameElement new_element =
+          FrameElement::RegisterElement(fresh_reg,
+                                        FrameElement::NOT_SYNCED,
+                                        original.type_info());
+      new_element.set_untagged_int32(true);
+      Use(fresh_reg, element_count());
+      fresh.Unuse();  // BreakTarget does not handle a live Result well.
+      elements_.Add(new_element);
+      if (original.is_register()) {
+        __ mov(fresh_reg, original.reg());
+      } else {
+        ASSERT(original.is_memory());
+        __ mov(fresh_reg, Operand(ebp, fp_relative(index)));
+      }
+      // Now convert the value to int32, or bail out.
+      if (original.type_info().IsSmi()) {
+        __ SmiUntag(fresh_reg);
+        // Pushing the element is completely done.
+      } else {
+        __ test(fresh_reg, Immediate(kSmiTagMask));
+        Label not_smi;
+        __ j(not_zero, &not_smi);
+        __ SmiUntag(fresh_reg);
+        __ jmp(&done);
+
+        __ bind(&not_smi);
+        if (!original.type_info().IsNumber()) {
+          __ cmp(FieldOperand(fresh_reg, HeapObject::kMapOffset),
+                 Factory::heap_number_map());
+          cgen()->unsafe_bailout_->Branch(not_equal);
+        }
+
+        if (!CpuFeatures::IsSupported(SSE2)) {
+          UNREACHABLE();
+        } else {
+          CpuFeatures::Scope use_sse2(SSE2);
+          __ movdbl(xmm0, FieldOperand(fresh_reg, HeapNumber::kValueOffset));
+          __ cvttsd2si(fresh_reg, Operand(xmm0));
+          __ cvtsi2sd(xmm1, Operand(fresh_reg));
+          __ ucomisd(xmm0, xmm1);
+          cgen()->unsafe_bailout_->Branch(not_equal);
+          cgen()->unsafe_bailout_->Branch(parity_even);  // NaN.
+          // Test for negative zero.
+          __ test(fresh_reg, Operand(fresh_reg));
+          __ j(not_zero, &done);
+          __ movmskpd(fresh_reg, xmm0);
+          __ and_(fresh_reg, 0x1);
+          cgen()->unsafe_bailout_->Branch(not_equal);
+        }
+        __ bind(&done);
+      }
+      break;
+    }
+    case FrameElement::CONSTANT:
+      elements_.Add(CopyElementAt(index));
+      elements_[element_count() - 1].set_untagged_int32(true);
+      break;
+    case FrameElement::COPY:
+    case FrameElement::INVALID:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
 void VirtualFrame::PushTryHandler(HandlerType type) {
   ASSERT(cgen()->HasValidEntryRegisters());
   // Grow the expression stack by handler size less one (the return
@@ -842,6 +909,25 @@
 }
 
 
+Result VirtualFrame::CallJSFunction(int arg_count) {
+  Result function = Pop();
+
+  // InvokeFunction requires function in edi.  Move it in there.
+  function.ToRegister(edi);
+  function.Unuse();
+
+  // +1 for receiver.
+  PrepareForCall(arg_count + 1, arg_count + 1);
+  ASSERT(cgen()->HasValidEntryRegisters());
+  ParameterCount count(arg_count);
+  __ InvokeFunction(edi, count, CALL_FUNCTION);
+  RestoreContextRegister();
+  Result result = cgen()->allocator()->Allocate(eax);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
 Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
   PrepareForCall(arg_count, arg_count);
   ASSERT(cgen()->HasValidEntryRegisters());
@@ -895,30 +981,39 @@
 }
 
 
+// This function assumes that the only results that could be in a_reg or b_reg
+// are a and b.  Other results can be live, but must not be in a_reg or b_reg.
+void VirtualFrame::MoveResultsToRegisters(Result* a,
+                                          Result* b,
+                                          Register a_reg,
+                                          Register b_reg) {
+  if (a->is_register() && a->reg().is(a_reg)) {
+    b->ToRegister(b_reg);
+  } else if (!cgen()->allocator()->is_used(a_reg)) {
+    a->ToRegister(a_reg);
+    b->ToRegister(b_reg);
+  } else if (cgen()->allocator()->is_used(b_reg)) {
+    // a must be in b_reg, b in a_reg.
+    __ xchg(a_reg, b_reg);
+    // Results a and b will be invalidated, so it is ok if they are switched.
+  } else {
+    b->ToRegister(b_reg);
+    a->ToRegister(a_reg);
+  }
+  a->Unuse();
+  b->Unuse();
+}
+
+
 Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
   // Name and receiver are on the top of the frame.  The IC expects
   // name in ecx and receiver in eax.
-  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
   Result name = Pop();
   Result receiver = Pop();
   PrepareForCall(0, 0);  // No stack arguments.
-  // Move results to the right registers:
-  if (name.is_register() && name.reg().is(eax)) {
-    if (receiver.is_register() && receiver.reg().is(ecx)) {
-      // Wrong registers.
-      __ xchg(eax, ecx);
-    } else {
-      // Register ecx is free for name, which frees eax for receiver.
-      name.ToRegister(ecx);
-      receiver.ToRegister(eax);
-    }
-  } else {
-    // Register eax is free for receiver, which frees ecx for name.
-    receiver.ToRegister(eax);
-    name.ToRegister(ecx);
-  }
-  name.Unuse();
-  receiver.Unuse();
+  MoveResultsToRegisters(&name, &receiver, ecx, eax);
+
+  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
   return RawCallCodeObject(ic, mode);
 }
 
@@ -928,20 +1023,7 @@
   Result key = Pop();
   Result receiver = Pop();
   PrepareForCall(0, 0);
-
-  if (!key.is_register() || !key.reg().is(edx)) {
-    // Register edx is available for receiver.
-    receiver.ToRegister(edx);
-    key.ToRegister(eax);
-  } else if (!receiver.is_register() || !receiver.reg().is(eax)) {
-    // Register eax is available for key.
-    key.ToRegister(eax);
-    receiver.ToRegister(edx);
-  } else {
-    __ xchg(edx, eax);
-  }
-  key.Unuse();
-  receiver.Unuse();
+  MoveResultsToRegisters(&key, &receiver, eax, edx);
 
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
   return RawCallCodeObject(ic, mode);
@@ -957,42 +1039,62 @@
     PrepareForCall(0, 0);
     value.ToRegister(eax);
     __ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
-    __ mov(ecx, name);
+    value.Unuse();
   } else {
     Result receiver = Pop();
     PrepareForCall(0, 0);
-
-    if (value.is_register() && value.reg().is(edx)) {
-      if (receiver.is_register() && receiver.reg().is(eax)) {
-        // Wrong registers.
-        __ xchg(eax, edx);
-      } else {
-        // Register eax is free for value, which frees edx for receiver.
-        value.ToRegister(eax);
-        receiver.ToRegister(edx);
-      }
-    } else {
-      // Register edx is free for receiver, which guarantees eax is free for
-      // value.
-      receiver.ToRegister(edx);
-      value.ToRegister(eax);
-    }
+    MoveResultsToRegisters(&value, &receiver, eax, edx);
   }
   __ mov(ecx, name);
-  value.Unuse();
   return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
 }
 
 
 Result VirtualFrame::CallKeyedStoreIC() {
   // Value, key, and receiver are on the top of the frame.  The IC
-  // expects value in eax and key and receiver on the stack.  It does
-  // not drop the key and receiver.
-  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+  // expects value in eax, key in ecx, and receiver in edx.
   Result value = Pop();
-  PrepareForCall(2, 0);  // Two stack args, neither callee-dropped.
-  value.ToRegister(eax);
-  value.Unuse();
+  Result key = Pop();
+  Result receiver = Pop();
+  PrepareForCall(0, 0);
+  if (!cgen()->allocator()->is_used(eax) ||
+      (value.is_register() && value.reg().is(eax))) {
+    if (!cgen()->allocator()->is_used(eax)) {
+      value.ToRegister(eax);
+    }
+    MoveResultsToRegisters(&key, &receiver, ecx, edx);
+    value.Unuse();
+  } else if (!cgen()->allocator()->is_used(ecx) ||
+             (key.is_register() && key.reg().is(ecx))) {
+    if (!cgen()->allocator()->is_used(ecx)) {
+      key.ToRegister(ecx);
+    }
+    MoveResultsToRegisters(&value, &receiver, eax, edx);
+    key.Unuse();
+  } else if (!cgen()->allocator()->is_used(edx) ||
+             (receiver.is_register() && receiver.reg().is(edx))) {
+    if (!cgen()->allocator()->is_used(edx)) {
+      receiver.ToRegister(edx);
+    }
+    MoveResultsToRegisters(&key, &value, ecx, eax);
+    receiver.Unuse();
+  } else {
+    // All three registers are used, and no value is in the correct place.
+    // We have one of the two circular permutations of eax, ecx, edx.
+    ASSERT(value.is_register());
+    if (value.reg().is(ecx)) {
+      __ xchg(eax, edx);
+      __ xchg(eax, ecx);
+    } else {
+      __ xchg(eax, ecx);
+      __ xchg(eax, edx);
+    }
+    value.Unuse();
+    key.Unuse();
+    receiver.Unuse();
+  }
+
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
   return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
 }
 
@@ -1065,13 +1167,14 @@
   FrameElement element = elements_.RemoveLast();
   int index = element_count();
   ASSERT(element.is_valid());
+  ASSERT(element.is_untagged_int32() == cgen()->in_safe_int32_mode());
 
   // Get number type information of the result.
-  NumberInfo::Type info;
+  TypeInfo info;
   if (!element.is_copy()) {
-    info = element.number_info();
+    info = element.type_info();
   } else {
-    info = elements_[element.index()].number_info();
+    info = elements_[element.index()].type_info();
   }
 
   bool pop_needed = (stack_pointer_ == index);
@@ -1081,7 +1184,8 @@
       Result temp = cgen()->allocator()->Allocate();
       ASSERT(temp.is_valid());
       __ pop(temp.reg());
-      temp.set_number_info(info);
+      temp.set_type_info(info);
+      temp.set_untagged_int32(element.is_untagged_int32());
       return temp;
     }
 
@@ -1094,6 +1198,7 @@
   if (element.is_register()) {
     Unuse(element.reg());
   } else if (element.is_copy()) {
+    ASSERT(!element.is_untagged_int32());
     ASSERT(element.index() < index);
     index = element.index();
     element = elements_[index];
@@ -1105,23 +1210,28 @@
     // Memory elements could only be the backing store of a copy.
     // Allocate the original to a register.
     ASSERT(index <= stack_pointer_);
+    ASSERT(!element.is_untagged_int32());
     Result temp = cgen()->allocator()->Allocate();
     ASSERT(temp.is_valid());
     Use(temp.reg(), index);
     FrameElement new_element =
         FrameElement::RegisterElement(temp.reg(),
                                       FrameElement::SYNCED,
-                                      element.number_info());
+                                      element.type_info());
     // Preserve the copy flag on the element.
     if (element.is_copied()) new_element.set_copied();
     elements_[index] = new_element;
     __ mov(temp.reg(), Operand(ebp, fp_relative(index)));
     return Result(temp.reg(), info);
   } else if (element.is_register()) {
-    return Result(element.reg(), info);
+    Result return_value(element.reg(), info);
+    return_value.set_untagged_int32(element.is_untagged_int32());
+    return return_value;
   } else {
     ASSERT(element.is_constant());
-    return Result(element.handle());
+    Result return_value(element.handle());
+    return_value.set_untagged_int32(element.is_untagged_int32());
+    return return_value;
   }
 }
 
@@ -1142,7 +1252,7 @@
 }
 
 
-void VirtualFrame::EmitPush(Register reg, NumberInfo::Type info) {
+void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
   ASSERT(stack_pointer_ == element_count() - 1);
   elements_.Add(FrameElement::MemoryElement(info));
   stack_pointer_++;
@@ -1150,7 +1260,7 @@
 }
 
 
-void VirtualFrame::EmitPush(Operand operand, NumberInfo::Type info) {
+void VirtualFrame::EmitPush(Operand operand, TypeInfo info) {
   ASSERT(stack_pointer_ == element_count() - 1);
   elements_.Add(FrameElement::MemoryElement(info));
   stack_pointer_++;
@@ -1158,7 +1268,7 @@
 }
 
 
-void VirtualFrame::EmitPush(Immediate immediate, NumberInfo::Type info) {
+void VirtualFrame::EmitPush(Immediate immediate, TypeInfo info) {
   ASSERT(stack_pointer_ == element_count() - 1);
   elements_.Add(FrameElement::MemoryElement(info));
   stack_pointer_++;
@@ -1166,6 +1276,12 @@
 }
 
 
+void VirtualFrame::PushUntaggedElement(Handle<Object> value) {
+  elements_.Add(FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED));
+  elements_[element_count() - 1].set_untagged_int32(true);
+}
+
+
 void VirtualFrame::Push(Expression* expr) {
   ASSERT(expr->IsTrivial());
 
@@ -1176,11 +1292,17 @@
   }
 
   VariableProxy* proxy = expr->AsVariableProxy();
-  if (proxy != NULL && proxy->is_this()) {
-    PushParameterAt(-1);
-    return;
+  if (proxy != NULL) {
+    Slot* slot = proxy->var()->slot();
+    if (slot->type() == Slot::LOCAL) {
+      PushLocalAt(slot->index());
+      return;
+    }
+    if (slot->type() == Slot::PARAMETER) {
+      PushParameterAt(slot->index());
+      return;
+    }
   }
-
   UNREACHABLE();
 }
 
diff --git a/src/ia32/virtual-frame-ia32.h b/src/ia32/virtual-frame-ia32.h
index 7be593c..14fe466 100644
--- a/src/ia32/virtual-frame-ia32.h
+++ b/src/ia32/virtual-frame-ia32.h
@@ -28,7 +28,7 @@
 #ifndef V8_IA32_VIRTUAL_FRAME_IA32_H_
 #define V8_IA32_VIRTUAL_FRAME_IA32_H_
 
-#include "number-info.h"
+#include "type-info.h"
 #include "register-allocator.h"
 #include "scopes.h"
 
@@ -73,10 +73,10 @@
   static const int kIllegalIndex = -1;
 
   // Construct an initial virtual frame on entry to a JS function.
-  VirtualFrame();
+  inline VirtualFrame();
 
   // Construct a virtual frame as a clone of an existing one.
-  explicit VirtualFrame(VirtualFrame* original);
+  explicit inline VirtualFrame(VirtualFrame* original);
 
   CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
 
@@ -84,7 +84,7 @@
 
   // Create a duplicate of an existing valid frame element.
   FrameElement CopyElementAt(int index,
-    NumberInfo::Type info = NumberInfo::kUninitialized);
+    TypeInfo info = TypeInfo::Uninitialized());
 
   // The number of elements on the virtual frame.
   int element_count() { return elements_.length(); }
@@ -138,7 +138,7 @@
   void ForgetElements(int count);
 
   // Spill all values from the frame to memory.
-  void SpillAll();
+  inline void SpillAll();
 
   // Spill all occurrences of a specific register from the frame.
   void Spill(Register reg) {
@@ -199,7 +199,7 @@
   // Prepare for returning from the frame by spilling locals.  This
   // avoids generating unnecessary merge code when jumping to the
   // shared return site.  Emits code for spills.
-  void PrepareForReturn();
+  inline void PrepareForReturn();
 
   // Number of local variables after when we use a loop for allocating.
   static const int kLocalVarBound = 10;
@@ -242,6 +242,11 @@
     PushFrameSlotAt(local0_index() + index);
   }
 
+  // Push a copy of the value of a local frame slot on top of the frame.
+  void UntaggedPushLocalAt(int index) {
+    UntaggedPushFrameSlotAt(local0_index() + index);
+  }
+
   // Push the value of a local frame slot on top of the frame and invalidate
   // the local slot.  The slot should be written to before trying to read
   // from it again.
@@ -282,6 +287,11 @@
     PushFrameSlotAt(param0_index() + index);
   }
 
+  // Push a copy of the value of a parameter frame slot on top of the frame.
+  void UntaggedPushParameterAt(int index) {
+    UntaggedPushFrameSlotAt(param0_index() + index);
+  }
+
   // Push the value of a paramter frame slot on top of the frame and
   // invalidate the parameter slot.  The slot should be written to before
   // trying to read from it again.
@@ -321,6 +331,10 @@
   // arguments are consumed by the call.
   Result CallStub(CodeStub* stub, Result* arg0, Result* arg1);
 
+  // Call JS function from top of the stack with arguments
+  // taken from the stack.
+  Result CallJSFunction(int arg_count);
+
   // Call runtime given the number of arguments expected on (and
   // removed from) the stack.
   Result CallRuntime(Runtime::Function* f, int arg_count);
@@ -388,18 +402,18 @@
   // Push an element on top of the expression stack and emit a
   // corresponding push instruction.
   void EmitPush(Register reg,
-                NumberInfo::Type info = NumberInfo::kUnknown);
+                TypeInfo info = TypeInfo::Unknown());
   void EmitPush(Operand operand,
-                NumberInfo::Type info = NumberInfo::kUnknown);
+                TypeInfo info = TypeInfo::Unknown());
   void EmitPush(Immediate immediate,
-                NumberInfo::Type info = NumberInfo::kUnknown);
+                TypeInfo info = TypeInfo::Unknown());
 
   // Push an element on the virtual frame.
-  void Push(Register reg, NumberInfo::Type info = NumberInfo::kUnknown);
-  void Push(Handle<Object> value);
-  void Push(Smi* value) {
-    Push(Handle<Object> (value));
-  }
+  inline void Push(Register reg, TypeInfo info = TypeInfo::Unknown());
+  inline void Push(Handle<Object> value);
+  inline void Push(Smi* value);
+
+  void PushUntaggedElement(Handle<Object> value);
 
   // Pushing a result invalidates it (its contents become owned by the
   // frame).
@@ -407,11 +421,15 @@
     // This assert will trigger if you try to push the same value twice.
     ASSERT(result->is_valid());
     if (result->is_register()) {
-      Push(result->reg(), result->number_info());
+      Push(result->reg(), result->type_info());
     } else {
       ASSERT(result->is_constant());
       Push(result->handle());
     }
+    if (cgen()->in_safe_int32_mode()) {
+      ASSERT(result->is_untagged_int32());
+      elements_[element_count() - 1].set_untagged_int32(true);
+    }
     result->Unuse();
   }
 
@@ -422,7 +440,19 @@
   // Nip removes zero or more elements from immediately below the top
   // of the frame, leaving the previous top-of-frame value on top of
   // the frame.  Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
-  void Nip(int num_dropped);
+  inline void Nip(int num_dropped);
+
+  // Check that the frame has no elements containing untagged int32 elements.
+  bool HasNoUntaggedInt32Elements() {
+    for (int i = 0; i < element_count(); ++i) {
+      if (elements_[i].is_untagged_int32()) return false;
+    }
+    return true;
+  }
+
+  // Update the type information of a variable frame element directly.
+  inline void SetTypeForLocalAt(int index, TypeInfo info);
+  inline void SetTypeForParamAt(int index, TypeInfo info);
 
  private:
   static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
@@ -530,7 +560,12 @@
 
   // Push a copy of a frame slot (typically a local or parameter) on top of
   // the frame.
-  void PushFrameSlotAt(int index);
+  inline void PushFrameSlotAt(int index);
+
+  // Push a copy of a frame slot (typically a local or parameter) on top of
+  // the frame, at an untagged int32 value.  Bails out if the value is not
+  // an int32.
+  void UntaggedPushFrameSlotAt(int index);
 
   // Push a the value of a frame slot (typically a local or parameter) on
   // top of the frame and invalidate the slot.
@@ -573,6 +608,14 @@
   // Register counts are correctly updated.
   int InvalidateFrameSlotAt(int index);
 
+  // This function assumes that a and b are the only results that could be in
+  // the registers a_reg or b_reg.  Other results can be live, but must not
+  //  be in the registers a_reg or b_reg.  The results a and b are invalidated.
+  void MoveResultsToRegisters(Result* a,
+                              Result* b,
+                              Register a_reg,
+                              Register b_reg);
+
   // Call a code stub that has already been prepared for calling (via
   // PrepareForCall).
   Result RawCallStub(CodeStub* stub);
@@ -581,7 +624,7 @@
   // (via PrepareForCall).
   Result RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
 
-  bool Equals(VirtualFrame* other);
+  inline bool Equals(VirtualFrame* other);
 
   // Classes that need raw access to the elements_ array.
   friend class DeferredCode;
diff --git a/src/ic.cc b/src/ic.cc
index b6b57dc..64c3ec1 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -63,7 +63,9 @@
                  Code* new_target,
                  const char* extra_info) {
   if (FLAG_trace_ic) {
-    State new_state = StateFrom(new_target, Heap::undefined_value());
+    State new_state = StateFrom(new_target,
+                                Heap::undefined_value(),
+                                Heap::undefined_value());
     PrintF("[%s (%c->%c)%s", type,
            TransitionMarkFromState(old_state),
            TransitionMarkFromState(new_state),
@@ -132,7 +134,7 @@
 }
 #endif
 
-IC::State IC::StateFrom(Code* target, Object* receiver) {
+IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
   IC::State state = target->ic_state();
 
   if (state != MONOMORPHIC) return state;
@@ -148,7 +150,7 @@
   // the receiver map's code cache.  Therefore, if the current target
   // is in the receiver map's code cache, the inline cache failed due
   // to prototype check failure.
-  int index = map->IndexInCodeCache(target);
+  int index = map->IndexInCodeCache(name, target);
   if (index >= 0) {
     // For keyed load/store, the most likely cause of cache failure is
     // that the key has changed.  We do not distinguish between
@@ -160,7 +162,7 @@
 
     // Remove the target from the code cache to avoid hitting the same
     // invalid stub again.
-    map->RemoveFromCodeCache(index);
+    map->RemoveFromCodeCache(String::cast(name), target, index);
 
     return MONOMORPHIC_PROTOTYPE_FAILURE;
   }
@@ -222,6 +224,8 @@
     case Code::STORE_IC: return StoreIC::Clear(address, target);
     case Code::KEYED_STORE_IC: return KeyedStoreIC::Clear(address, target);
     case Code::CALL_IC: return CallIC::Clear(address, target);
+    case Code::BINARY_OP_IC: return;  // Clearing these is tricky and does not
+                                      // make any performance difference.
     default: UNREACHABLE();
   }
 }
@@ -455,17 +459,6 @@
   ASSERT(result != Heap::the_hole_value());
 
   if (result->IsJSFunction()) {
-    // Check if there is an optimized (builtin) version of the function.
-    // Ignored this will degrade performance for some Array functions.
-    // Please note we only return the optimized function iff
-    // the JSObject has FastElements.
-    if (object->IsJSObject() && JSObject::cast(*object)->HasFastElements()) {
-      Object* opt = Top::LookupSpecialFunction(JSObject::cast(*object),
-                                               lookup.holder(),
-                                               JSFunction::cast(result));
-      if (opt->IsJSFunction()) return opt;
-    }
-
 #ifdef ENABLE_DEBUGGER_SUPPORT
     // Handle stepping into a function if step into is active.
     if (Debug::StepInActive()) {
@@ -622,7 +615,8 @@
     }
 
     // Use specialized code for getting prototype of functions.
-    if (object->IsJSFunction() && name->Equals(Heap::prototype_symbol())) {
+    if (object->IsJSFunction() && name->Equals(Heap::prototype_symbol()) &&
+        JSFunction::cast(*object)->should_have_prototype()) {
 #ifdef DEBUG
       if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
 #endif
@@ -701,8 +695,8 @@
                           State state,
                           Handle<Object> object,
                           Handle<String> name) {
-  // Bail out if we didn't find a result.
-  if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
+  // Bail out if the result is not cacheable.
+  if (!lookup->IsCacheable()) return;
 
   // Loading properties from values is not common, so don't try to
   // deal with non-JS objects here.
@@ -716,6 +710,9 @@
     // Set the target to the pre monomorphic stub to delay
     // setting the monomorphic state.
     code = pre_monomorphic_stub();
+  } else if (!lookup->IsProperty()) {
+    // Nonexistent property. The result is undefined.
+    code = StubCache::ComputeLoadNonexistent(*name, *receiver);
   } else {
     // Compute monomorphic stub.
     switch (lookup->type()) {
@@ -828,7 +825,8 @@
       }
 
       // Use specialized code for getting prototype of functions.
-      if (object->IsJSFunction() && name->Equals(Heap::prototype_symbol())) {
+      if (object->IsJSFunction() && name->Equals(Heap::prototype_symbol()) &&
+        JSFunction::cast(*object)->should_have_prototype()) {
         Handle<JSFunction> function = Handle<JSFunction>::cast(object);
         Object* code =
             StubCache::ComputeKeyedLoadFunctionPrototype(*name, *function);
@@ -1043,6 +1041,20 @@
     return *value;
   }
 
+
+  // Use specialized code for setting the length of arrays.
+  if (receiver->IsJSArray()
+      && name->Equals(Heap::length_symbol())
+      && receiver->AllowsSetElementsLength()) {
+#ifdef DEBUG
+    if (FLAG_trace_ic) PrintF("[StoreIC : +#length /array]\n");
+#endif
+    Code* target = Builtins::builtin(Builtins::StoreIC_ArrayLength);
+    set_target(target);
+    StubCache::Set(*name, HeapObject::cast(*object)->map(), target);
+    return receiver->SetProperty(*name, *value, NONE);
+  }
+
   // Lookup the property locally in the receiver.
   if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
     LookupResult lookup;
@@ -1276,7 +1288,7 @@
   NoHandleAllocation na;
   ASSERT(args.length() == 2);
   CallIC ic;
-  IC::State state = IC::StateFrom(ic.target(), args[0]);
+  IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
   Object* result =
       ic.LoadFunction(state, args.at<Object>(0), args.at<String>(1));
 
@@ -1309,7 +1321,7 @@
   NoHandleAllocation na;
   ASSERT(args.length() == 2);
   LoadIC ic;
-  IC::State state = IC::StateFrom(ic.target(), args[0]);
+  IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
   return ic.Load(state, args.at<Object>(0), args.at<String>(1));
 }
 
@@ -1319,7 +1331,7 @@
   NoHandleAllocation na;
   ASSERT(args.length() == 2);
   KeyedLoadIC ic;
-  IC::State state = IC::StateFrom(ic.target(), args[0]);
+  IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
   return ic.Load(state, args.at<Object>(0), args.at<Object>(1));
 }
 
@@ -1329,12 +1341,25 @@
   NoHandleAllocation na;
   ASSERT(args.length() == 3);
   StoreIC ic;
-  IC::State state = IC::StateFrom(ic.target(), args[0]);
+  IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
   return ic.Store(state, args.at<Object>(0), args.at<String>(1),
                   args.at<Object>(2));
 }
 
 
+Object* StoreIC_ArrayLength(Arguments args) {
+  NoHandleAllocation nha;
+
+  ASSERT(args.length() == 2);
+  JSObject* receiver = JSObject::cast(args[0]);
+  Object* len = args[1];
+
+  Object* result = receiver->SetElementsLength(len);
+  if (result->IsFailure()) return result;
+  return len;
+}
+
+
 // Extend storage is called in a store inline cache when
 // it is necessary to extend the properties array of a
 // JSObject.
@@ -1374,12 +1399,100 @@
   NoHandleAllocation na;
   ASSERT(args.length() == 3);
   KeyedStoreIC ic;
-  IC::State state = IC::StateFrom(ic.target(), args[0]);
+  IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
   return ic.Store(state, args.at<Object>(0), args.at<Object>(1),
                   args.at<Object>(2));
 }
 
 
+void BinaryOpIC::patch(Code* code) {
+  set_target(code);
+}
+
+
+const char* BinaryOpIC::GetName(TypeInfo type_info) {
+  switch (type_info) {
+    case DEFAULT: return "Default";
+    case GENERIC: return "Generic";
+    case HEAP_NUMBERS: return "HeapNumbers";
+    case STRINGS: return "Strings";
+    default: return "Invalid";
+  }
+}
+
+
+BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) {
+  switch (type_info) {
+    // DEFAULT is mapped to UNINITIALIZED so that calls to DEFAULT stubs
+    // are not cleared at GC.
+    case DEFAULT: return UNINITIALIZED;
+
+    // Could have mapped GENERIC to MONOMORPHIC just as well but MEGAMORPHIC is
+    // conceptually closer.
+    case GENERIC: return MEGAMORPHIC;
+
+    default: return MONOMORPHIC;
+  }
+}
+
+
+BinaryOpIC::TypeInfo BinaryOpIC::GetTypeInfo(Object* left,
+                                             Object* right) {
+  if (left->IsSmi() && right->IsSmi()) {
+    return GENERIC;
+  }
+
+  if (left->IsNumber() && right->IsNumber()) {
+    return HEAP_NUMBERS;
+  }
+
+  if (left->IsString() || right->IsString()) {
+    // Patching for fast string ADD makes sense even if only one of the
+    // arguments is a string.
+    return STRINGS;
+  }
+
+  return GENERIC;
+}
+
+
+// defined in codegen-<arch>.cc
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info);
+
+
+Object* BinaryOp_Patch(Arguments args) {
+  ASSERT(args.length() == 6);
+
+  Handle<Object> left = args.at<Object>(0);
+  Handle<Object> right = args.at<Object>(1);
+  Handle<Object> result = args.at<Object>(2);
+  int key = Smi::cast(args[3])->value();
+#ifdef DEBUG
+  Token::Value op = static_cast<Token::Value>(Smi::cast(args[4])->value());
+  BinaryOpIC::TypeInfo prev_type_info =
+      static_cast<BinaryOpIC::TypeInfo>(Smi::cast(args[5])->value());
+#endif  // DEBUG
+  { HandleScope scope;
+    BinaryOpIC::TypeInfo type_info = BinaryOpIC::GetTypeInfo(*left, *right);
+    Handle<Code> code = GetBinaryOpStub(key, type_info);
+    if (!code.is_null()) {
+      BinaryOpIC ic;
+      ic.patch(*code);
+#ifdef DEBUG
+      if (FLAG_trace_ic) {
+        PrintF("[BinaryOpIC (%s->%s)#%s]\n",
+            BinaryOpIC::GetName(prev_type_info),
+            BinaryOpIC::GetName(type_info),
+            Token::Name(op));
+      }
+#endif  // DEBUG
+    }
+  }
+
+  return *result;
+}
+
+
 static Address IC_utilities[] = {
 #define ADDR(name) FUNCTION_ADDR(name),
     IC_UTIL_LIST(ADDR)
diff --git a/src/ic.h b/src/ic.h
index feff8c5..6aae096 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -45,6 +45,7 @@
   ICU(KeyedLoadIC_Miss)                               \
   ICU(CallIC_Miss)                                    \
   ICU(StoreIC_Miss)                                   \
+  ICU(StoreIC_ArrayLength)                            \
   ICU(SharedStoreIC_ExtendStorage)                    \
   ICU(KeyedStoreIC_Miss)                              \
   /* Utilities for IC stubs. */                       \
@@ -54,7 +55,8 @@
   ICU(LoadPropertyWithInterceptorForLoad)             \
   ICU(LoadPropertyWithInterceptorForCall)             \
   ICU(KeyedLoadPropertyWithInterceptor)               \
-  ICU(StoreInterceptorProperty)
+  ICU(StoreInterceptorProperty)                       \
+  ICU(BinaryOp_Patch)
 
 //
 // IC is the base class for LoadIC, StoreIC, CallIC, KeyedLoadIC,
@@ -92,8 +94,8 @@
   Code* target() { return GetTargetAtAddress(address()); }
   inline Address address();
 
-  // Compute the current IC state based on the target stub and the receiver.
-  static State StateFrom(Code* target, Object* receiver);
+  // Compute the current IC state based on the target stub, receiver and name.
+  static State StateFrom(Code* target, Object* receiver, Object* name);
 
   // Clear the inline cache to initial state.
   static void Clear(Address address);
@@ -299,7 +301,6 @@
   // Clear the use of the inlined version.
   static void ClearInlinedVersion(Address address);
 
- private:
   // Bit mask to be tested against bit field for the cases when
   // generic stub should go into slow case.
   // Access check is necessary explicitly since generic stub does not perform
@@ -307,6 +308,7 @@
   static const int kSlowCaseBitFieldMask =
       (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
 
+ private:
   // Update the inline cache.
   void UpdateCaches(LookupResult* lookup,
                     State state,
@@ -358,6 +360,7 @@
   static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
   static void GenerateMiss(MacroAssembler* masm);
   static void GenerateMegamorphic(MacroAssembler* masm);
+  static void GenerateArrayLength(MacroAssembler* masm);
 
  private:
   // Update the inline cache and the global stub cache based on the
@@ -442,6 +445,30 @@
 };
 
 
+class BinaryOpIC: public IC {
+ public:
+
+  enum TypeInfo {
+    DEFAULT,  // Initial state. When first executed, patches to one
+              // of the following states depending on the operands types.
+    HEAP_NUMBERS,  // Both arguments are HeapNumbers.
+    STRINGS,  // At least one of the arguments is String.
+    GENERIC   // Non-specialized case (processes any type combination).
+  };
+
+  BinaryOpIC() : IC(NO_EXTRA_FRAME) { }
+
+  void patch(Code* code);
+
+  static void Clear(Address address, Code* target);
+
+  static const char* GetName(TypeInfo type_info);
+
+  static State ToState(TypeInfo type_info);
+
+  static TypeInfo GetTypeInfo(Object* left, Object* right);
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_IC_H_
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 019de68..9a1f1f1 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -43,7 +43,7 @@
 #include "regexp-macro-assembler-irregexp.h"
 #include "regexp-stack.h"
 
-#ifdef V8_NATIVE_REGEXP
+#ifndef V8_INTERPRETED_REGEXP
 #if V8_TARGET_ARCH_IA32
 #include "ia32/regexp-macro-assembler-ia32.h"
 #elif V8_TARGET_ARCH_X64
@@ -122,6 +122,7 @@
   }
   FlattenString(pattern);
   CompilationZoneScope zone_scope(DELETE_ON_EXIT);
+  PostponeInterruptsScope postpone;
   RegExpCompileData parse_result;
   FlatStringReader reader(pattern);
   if (!ParseRegExp(&reader, flags.is_multiline(), &parse_result)) {
@@ -144,7 +145,7 @@
     Handle<String> atom_string = Factory::NewStringFromTwoByte(atom_pattern);
     AtomCompile(re, pattern, flags, atom_string);
   } else {
-    IrregexpPrepare(re, pattern, flags, parse_result.capture_count);
+    IrregexpInitialize(re, pattern, flags, parse_result.capture_count);
   }
   ASSERT(re->data()->IsFixedArray());
   // Compilation succeeded so the data is set on the regexp
@@ -235,10 +236,10 @@
 // returns false.
 bool RegExpImpl::EnsureCompiledIrregexp(Handle<JSRegExp> re, bool is_ascii) {
   Object* compiled_code = re->DataAt(JSRegExp::code_index(is_ascii));
-#ifdef V8_NATIVE_REGEXP
-  if (compiled_code->IsCode()) return true;
-#else  // ! V8_NATIVE_REGEXP (RegExp interpreter code)
+#ifdef V8_INTERPRETED_REGEXP
   if (compiled_code->IsByteArray()) return true;
+#else  // V8_INTERPRETED_REGEXP (RegExp native code)
+  if (compiled_code->IsCode()) return true;
 #endif
   return CompileIrregexp(re, is_ascii);
 }
@@ -247,6 +248,7 @@
 bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re, bool is_ascii) {
   // Compile the RegExp.
   CompilationZoneScope zone_scope(DELETE_ON_EXIT);
+  PostponeInterruptsScope postpone;
   Object* entry = re->DataAt(JSRegExp::code_index(is_ascii));
   if (entry->IsJSObject()) {
     // If it's a JSObject, a previous compilation failed and threw this object.
@@ -336,10 +338,10 @@
 }
 
 
-void RegExpImpl::IrregexpPrepare(Handle<JSRegExp> re,
-                                 Handle<String> pattern,
-                                 JSRegExp::Flags flags,
-                                 int capture_count) {
+void RegExpImpl::IrregexpInitialize(Handle<JSRegExp> re,
+                                    Handle<String> pattern,
+                                    JSRegExp::Flags flags,
+                                    int capture_count) {
   // Initialize compiled code entries to null.
   Factory::SetRegExpIrregexpData(re,
                                  JSRegExp::IRREGEXP,
@@ -349,6 +351,94 @@
 }
 
 
+int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp,
+                                Handle<String> subject) {
+  if (!subject->IsFlat()) {
+    FlattenString(subject);
+  }
+  bool is_ascii = subject->IsAsciiRepresentation();
+  if (!EnsureCompiledIrregexp(regexp, is_ascii)) {
+    return -1;
+  }
+#ifdef V8_INTERPRETED_REGEXP
+  // Byte-code regexp needs space allocated for all its registers.
+  return IrregexpNumberOfRegisters(FixedArray::cast(regexp->data()));
+#else  // V8_INTERPRETED_REGEXP
+  // Native regexp only needs room to output captures. Registers are handled
+  // internally.
+  return (IrregexpNumberOfCaptures(FixedArray::cast(regexp->data())) + 1) * 2;
+#endif  // V8_INTERPRETED_REGEXP
+}
+
+
+RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(Handle<JSRegExp> regexp,
+                                                        Handle<String> subject,
+                                                        int index,
+                                                        Vector<int> output) {
+  Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()));
+
+  ASSERT(index >= 0);
+  ASSERT(index <= subject->length());
+  ASSERT(subject->IsFlat());
+
+#ifndef V8_INTERPRETED_REGEXP
+  ASSERT(output.length() >=
+      (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
+  do {
+    bool is_ascii = subject->IsAsciiRepresentation();
+    Handle<Code> code(IrregexpNativeCode(*irregexp, is_ascii));
+    NativeRegExpMacroAssembler::Result res =
+        NativeRegExpMacroAssembler::Match(code,
+                                          subject,
+                                          output.start(),
+                                          output.length(),
+                                          index);
+    if (res != NativeRegExpMacroAssembler::RETRY) {
+      ASSERT(res != NativeRegExpMacroAssembler::EXCEPTION ||
+             Top::has_pending_exception());
+      STATIC_ASSERT(
+          static_cast<int>(NativeRegExpMacroAssembler::SUCCESS) == RE_SUCCESS);
+      STATIC_ASSERT(
+          static_cast<int>(NativeRegExpMacroAssembler::FAILURE) == RE_FAILURE);
+      STATIC_ASSERT(static_cast<int>(NativeRegExpMacroAssembler::EXCEPTION)
+                    == RE_EXCEPTION);
+      return static_cast<IrregexpResult>(res);
+    }
+    // If result is RETRY, the string has changed representation, and we
+    // must restart from scratch.
+    // In this case, it means we must make sure we are prepared to handle
+    // the, potentially, differen subject (the string can switch between
+    // being internal and external, and even between being ASCII and UC16,
+    // but the characters are always the same).
+    IrregexpPrepare(regexp, subject);
+  } while (true);
+  UNREACHABLE();
+  return RE_EXCEPTION;
+#else  // V8_INTERPRETED_REGEXP
+
+  ASSERT(output.length() >= IrregexpNumberOfRegisters(*irregexp));
+  bool is_ascii = subject->IsAsciiRepresentation();
+  // We must have done EnsureCompiledIrregexp, so we can get the number of
+  // registers.
+  int* register_vector = output.start();
+  int number_of_capture_registers =
+      (IrregexpNumberOfCaptures(*irregexp) + 1) * 2;
+  for (int i = number_of_capture_registers - 1; i >= 0; i--) {
+    register_vector[i] = -1;
+  }
+  Handle<ByteArray> byte_codes(IrregexpByteCode(*irregexp, is_ascii));
+
+  if (IrregexpInterpreter::Match(byte_codes,
+                                 subject,
+                                 register_vector,
+                                 index)) {
+    return RE_SUCCESS;
+  }
+  return RE_FAILURE;
+#endif  // V8_INTERPRETED_REGEXP
+}
+
+
 Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
                                         Handle<String> subject,
                                         int previous_index,
@@ -356,10 +446,7 @@
   ASSERT_EQ(jsregexp->TypeTag(), JSRegExp::IRREGEXP);
 
   // Prepare space for the return values.
-  int number_of_capture_registers =
-      (IrregexpNumberOfCaptures(FixedArray::cast(jsregexp->data())) + 1) * 2;
-
-#ifndef V8_NATIVE_REGEXP
+#ifdef V8_INTERPRETED_REGEXP
 #ifdef DEBUG
   if (FLAG_trace_regexp_bytecodes) {
     String* pattern = jsregexp->Pattern();
@@ -368,101 +455,42 @@
   }
 #endif
 #endif
-
-  if (!subject->IsFlat()) {
-    FlattenString(subject);
-  }
-
-  last_match_info->EnsureSize(number_of_capture_registers + kLastMatchOverhead);
-
-  Handle<FixedArray> array;
-
-  // Dispatch to the correct RegExp implementation.
-  Handle<FixedArray> regexp(FixedArray::cast(jsregexp->data()));
-
-#ifdef V8_NATIVE_REGEXP
-
-  OffsetsVector captures(number_of_capture_registers);
-  int* captures_vector = captures.vector();
-  NativeRegExpMacroAssembler::Result res;
-  do {
-    bool is_ascii = subject->IsAsciiRepresentation();
-    if (!EnsureCompiledIrregexp(jsregexp, is_ascii)) {
-      return Handle<Object>::null();
-    }
-    Handle<Code> code(RegExpImpl::IrregexpNativeCode(*regexp, is_ascii));
-    res = NativeRegExpMacroAssembler::Match(code,
-                                            subject,
-                                            captures_vector,
-                                            captures.length(),
-                                            previous_index);
-    // If result is RETRY, the string have changed representation, and we
-    // must restart from scratch.
-  } while (res == NativeRegExpMacroAssembler::RETRY);
-  if (res == NativeRegExpMacroAssembler::EXCEPTION) {
+  int required_registers = RegExpImpl::IrregexpPrepare(jsregexp, subject);
+  if (required_registers < 0) {
+    // Compiling failed with an exception.
     ASSERT(Top::has_pending_exception());
     return Handle<Object>::null();
   }
-  ASSERT(res == NativeRegExpMacroAssembler::SUCCESS
-      || res == NativeRegExpMacroAssembler::FAILURE);
 
-  if (res != NativeRegExpMacroAssembler::SUCCESS) return Factory::null_value();
+  OffsetsVector registers(required_registers);
 
-  array = Handle<FixedArray>(FixedArray::cast(last_match_info->elements()));
-  ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead);
-  // The captures come in (start, end+1) pairs.
-  for (int i = 0; i < number_of_capture_registers; i += 2) {
-    // Capture values are relative to start_offset only.
-    // Convert them to be relative to start of string.
-    if (captures_vector[i] >= 0) {
-      captures_vector[i] += previous_index;
+  IrregexpResult res = IrregexpExecOnce(jsregexp,
+                                        subject,
+                                        previous_index,
+                                        Vector<int>(registers.vector(),
+                                                    registers.length()));
+  if (res == RE_SUCCESS) {
+    int capture_register_count =
+        (IrregexpNumberOfCaptures(FixedArray::cast(jsregexp->data())) + 1) * 2;
+    last_match_info->EnsureSize(capture_register_count + kLastMatchOverhead);
+    AssertNoAllocation no_gc;
+    int* register_vector = registers.vector();
+    FixedArray* array = FixedArray::cast(last_match_info->elements());
+    for (int i = 0; i < capture_register_count; i += 2) {
+      SetCapture(array, i, register_vector[i]);
+      SetCapture(array, i + 1, register_vector[i + 1]);
     }
-    if (captures_vector[i + 1] >= 0) {
-      captures_vector[i + 1] += previous_index;
-    }
-    SetCapture(*array, i, captures_vector[i]);
-    SetCapture(*array, i + 1, captures_vector[i + 1]);
+    SetLastCaptureCount(array, capture_register_count);
+    SetLastSubject(array, *subject);
+    SetLastInput(array, *subject);
+    return last_match_info;
   }
-
-#else  // ! V8_NATIVE_REGEXP
-
-  bool is_ascii = subject->IsAsciiRepresentation();
-  if (!EnsureCompiledIrregexp(jsregexp, is_ascii)) {
+  if (res == RE_EXCEPTION) {
+    ASSERT(Top::has_pending_exception());
     return Handle<Object>::null();
   }
-  // Now that we have done EnsureCompiledIrregexp we can get the number of
-  // registers.
-  int number_of_registers =
-      IrregexpNumberOfRegisters(FixedArray::cast(jsregexp->data()));
-  OffsetsVector registers(number_of_registers);
-  int* register_vector = registers.vector();
-  for (int i = number_of_capture_registers - 1; i >= 0; i--) {
-    register_vector[i] = -1;
-  }
-  Handle<ByteArray> byte_codes(IrregexpByteCode(*regexp, is_ascii));
-
-  if (!IrregexpInterpreter::Match(byte_codes,
-                                  subject,
-                                  register_vector,
-                                  previous_index)) {
-    return Factory::null_value();
-  }
-
-  array = Handle<FixedArray>(FixedArray::cast(last_match_info->elements()));
-  ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead);
-  // The captures come in (start, end+1) pairs.
-  for (int i = 0; i < number_of_capture_registers; i += 2) {
-    SetCapture(*array, i, register_vector[i]);
-    SetCapture(*array, i + 1, register_vector[i + 1]);
-  }
-
-#endif  // V8_NATIVE_REGEXP
-
-  SetLastCaptureCount(*array, number_of_capture_registers);
-  SetLastSubject(*array, *subject);
-  SetLastInput(*array, *subject);
-
-  return last_match_info;
+  ASSERT(res == RE_FAILURE);
+  return Factory::null_value();
 }
 
 
@@ -4962,7 +4990,9 @@
       case AFTER_WORD_CHARACTER: {
         ASSERT_NOT_NULL(on_success());
         budget = on_success()->ComputeFirstCharacterSet(budget);
-        set_first_character_set(on_success()->first_character_set());
+        if (budget >= 0) {
+          set_first_character_set(on_success()->first_character_set());
+        }
         break;
       }
     }
@@ -4988,6 +5018,10 @@
 int BackReferenceNode::ComputeFirstCharacterSet(int budget) {
   // We don't know anything about the first character of a backreference
   // at this point.
+  // The potential first characters are the first characters of the capture,
+  // and the first characters of the on_success node, depending on whether the
+  // capture can be empty and whether it is known to be participating or known
+  // not to be.
   return kComputeFirstCharacterSetFail;
 }
 
@@ -5007,8 +5041,11 @@
     } else {
       ASSERT(text.type == TextElement::CHAR_CLASS);
       RegExpCharacterClass* char_class = text.data.u_char_class;
+      ZoneList<CharacterRange>* ranges = char_class->ranges();
+      // TODO(lrn): Canonicalize ranges when they are created
+      // instead of waiting until now.
+      CharacterRange::Canonicalize(ranges);
       if (char_class->is_negated()) {
-        ZoneList<CharacterRange>* ranges = char_class->ranges();
         int length = ranges->length();
         int new_length = length + 1;
         if (length > 0) {
@@ -5022,7 +5059,7 @@
         CharacterRange::Negate(ranges, negated_ranges);
         set_first_character_set(negated_ranges);
       } else {
-        set_first_character_set(char_class->ranges());
+        set_first_character_set(ranges);
       }
     }
   }
@@ -5196,7 +5233,7 @@
   NodeInfo info = *node->info();
 
   // Create the correct assembler for the architecture.
-#ifdef V8_NATIVE_REGEXP
+#ifndef V8_INTERPRETED_REGEXP
   // Native regexp implementation.
 
   NativeRegExpMacroAssembler::Mode mode =
@@ -5211,11 +5248,11 @@
   RegExpMacroAssemblerARM macro_assembler(mode, (data->capture_count + 1) * 2);
 #endif
 
-#else  // ! V8_NATIVE_REGEXP
+#else  // V8_INTERPRETED_REGEXP
   // Interpreted regexp implementation.
   EmbeddedVector<byte, 1024> codes;
   RegExpMacroAssemblerIrregexp macro_assembler(codes);
-#endif
+#endif  // V8_INTERPRETED_REGEXP
 
   return compiler.Assemble(&macro_assembler,
                            node,
diff --git a/src/jsregexp.h b/src/jsregexp.h
index b99a89e..f6d511f 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -29,6 +29,7 @@
 #define V8_JSREGEXP_H_
 
 #include "macro-assembler.h"
+#include "zone-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -41,10 +42,10 @@
  public:
   // Whether V8 is compiled with native regexp support or not.
   static bool UsesNativeRegExp() {
-#ifdef V8_NATIVE_REGEXP
-    return true;
-#else
+#ifdef V8_INTERPRETED_REGEXP
     return false;
+#else
+    return true;
 #endif
   }
 
@@ -76,10 +77,10 @@
                              Handle<JSArray> lastMatchInfo);
 
   // Prepares a JSRegExp object with Irregexp-specific data.
-  static void IrregexpPrepare(Handle<JSRegExp> re,
-                              Handle<String> pattern,
-                              JSRegExp::Flags flags,
-                              int capture_register_count);
+  static void IrregexpInitialize(Handle<JSRegExp> re,
+                                 Handle<String> pattern,
+                                 JSRegExp::Flags flags,
+                                 int capture_register_count);
 
 
   static void AtomCompile(Handle<JSRegExp> re,
@@ -92,6 +93,29 @@
                                  int index,
                                  Handle<JSArray> lastMatchInfo);
 
+  enum IrregexpResult { RE_FAILURE = 0, RE_SUCCESS = 1, RE_EXCEPTION = -1 };
+
+  // Prepare a RegExp for being executed one or more times (using
+  // IrregexpExecOnce) on the subject.
+  // This ensures that the regexp is compiled for the subject, and that
+  // the subject is flat.
+  // Returns the number of integer spaces required by IrregexpExecOnce
+  // as its "registers" argument. If the regexp cannot be compiled,
+  // an exception is set as pending, and this function returns negative.
+  static int IrregexpPrepare(Handle<JSRegExp> regexp,
+                             Handle<String> subject);
+
+  // Execute a regular expression once on the subject, starting from
+  // character "index".
+  // If successful, returns RE_SUCCESS and set the capture positions
+  // in the first registers.
+  // If matching fails, returns RE_FAILURE.
+  // If execution fails, sets a pending exception and returns RE_EXCEPTION.
+  static IrregexpResult IrregexpExecOnce(Handle<JSRegExp> regexp,
+                                         Handle<String> subject,
+                                         int index,
+                                         Vector<int32_t> registers);
+
   // Execute an Irregexp bytecode pattern.
   // On a successful match, the result is a JSArray containing
   // captured positions. On a failure, the result is the null value.
diff --git a/src/usage-analyzer.h b/src/jump-target-heavy-inl.h
similarity index 70%
copy from src/usage-analyzer.h
copy to src/jump-target-heavy-inl.h
index 1b0ea4a..0a2a569 100644
--- a/src/usage-analyzer.h
+++ b/src/jump-target-heavy-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,16 +25,27 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef V8_USAGE_ANALYZER_H_
-#define V8_USAGE_ANALYZER_H_
+#ifndef V8_JUMP_TARGET_HEAVY_INL_H_
+#define V8_JUMP_TARGET_HEAVY_INL_H_
+
+#include "virtual-frame-inl.h"
 
 namespace v8 {
 namespace internal {
 
-// Compute usage counts for all variables.
-// Used for variable allocation.
-bool AnalyzeVariableUsage(FunctionLiteral* lit);
+void JumpTarget::InitializeEntryElement(int index, FrameElement* target) {
+  FrameElement* element = &entry_frame_->elements_[index];
+  element->clear_copied();
+  if (target->is_register()) {
+    entry_frame_->set_register_location(target->reg(), index);
+  } else if (target->is_copy()) {
+    entry_frame_->elements_[target->index()].set_copied();
+  }
+  if (direction_ == BIDIRECTIONAL && !target->is_copy()) {
+    element->set_type_info(TypeInfo::Unknown());
+  }
+}
 
 } }  // namespace v8::internal
 
-#endif  // V8_USAGE_ANALYZER_H_
+#endif  // V8_JUMP_TARGET_HEAVY_INL_H_
diff --git a/src/jump-target-heavy.cc b/src/jump-target-heavy.cc
new file mode 100644
index 0000000..85620a2
--- /dev/null
+++ b/src/jump-target-heavy.cc
@@ -0,0 +1,363 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "jump-target-inl.h"
+#include "register-allocator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+void JumpTarget::Jump(Result* arg) {
+  ASSERT(cgen()->has_valid_frame());
+
+  cgen()->frame()->Push(arg);
+  DoJump();
+}
+
+
+void JumpTarget::Branch(Condition cc, Result* arg, Hint hint) {
+  ASSERT(cgen()->has_valid_frame());
+
+  // We want to check that non-frame registers at the call site stay in
+  // the same registers on the fall-through branch.
+#ifdef DEBUG
+  Result::Type arg_type = arg->type();
+  Register arg_reg = arg->is_register() ? arg->reg() : no_reg;
+#endif
+
+  cgen()->frame()->Push(arg);
+  DoBranch(cc, hint);
+  *arg = cgen()->frame()->Pop();
+
+  ASSERT(arg->type() == arg_type);
+  ASSERT(!arg->is_register() || arg->reg().is(arg_reg));
+}
+
+
+void JumpTarget::Branch(Condition cc, Result* arg0, Result* arg1, Hint hint) {
+  ASSERT(cgen()->has_valid_frame());
+
+  // We want to check that non-frame registers at the call site stay in
+  // the same registers on the fall-through branch.
+#ifdef DEBUG
+  Result::Type arg0_type = arg0->type();
+  Register arg0_reg = arg0->is_register() ? arg0->reg() : no_reg;
+  Result::Type arg1_type = arg1->type();
+  Register arg1_reg = arg1->is_register() ? arg1->reg() : no_reg;
+#endif
+
+  cgen()->frame()->Push(arg0);
+  cgen()->frame()->Push(arg1);
+  DoBranch(cc, hint);
+  *arg1 = cgen()->frame()->Pop();
+  *arg0 = cgen()->frame()->Pop();
+
+  ASSERT(arg0->type() == arg0_type);
+  ASSERT(!arg0->is_register() || arg0->reg().is(arg0_reg));
+  ASSERT(arg1->type() == arg1_type);
+  ASSERT(!arg1->is_register() || arg1->reg().is(arg1_reg));
+}
+
+
+void BreakTarget::Branch(Condition cc, Result* arg, Hint hint) {
+  ASSERT(cgen()->has_valid_frame());
+
+  int count = cgen()->frame()->height() - expected_height_;
+  if (count > 0) {
+    // We negate and branch here rather than using DoBranch's negate
+    // and branch.  This gives us a hook to remove statement state
+    // from the frame.
+    JumpTarget fall_through;
+    // Branch to fall through will not negate, because it is a
+    // forward-only target.
+    fall_through.Branch(NegateCondition(cc), NegateHint(hint));
+    Jump(arg);  // May emit merge code here.
+    fall_through.Bind();
+  } else {
+#ifdef DEBUG
+    Result::Type arg_type = arg->type();
+    Register arg_reg = arg->is_register() ? arg->reg() : no_reg;
+#endif
+    cgen()->frame()->Push(arg);
+    DoBranch(cc, hint);
+    *arg = cgen()->frame()->Pop();
+    ASSERT(arg->type() == arg_type);
+    ASSERT(!arg->is_register() || arg->reg().is(arg_reg));
+  }
+}
+
+
+void JumpTarget::Bind(Result* arg) {
+  if (cgen()->has_valid_frame()) {
+    cgen()->frame()->Push(arg);
+  }
+  DoBind();
+  *arg = cgen()->frame()->Pop();
+}
+
+
+void JumpTarget::Bind(Result* arg0, Result* arg1) {
+  if (cgen()->has_valid_frame()) {
+    cgen()->frame()->Push(arg0);
+    cgen()->frame()->Push(arg1);
+  }
+  DoBind();
+  *arg1 = cgen()->frame()->Pop();
+  *arg0 = cgen()->frame()->Pop();
+}
+
+
+void JumpTarget::ComputeEntryFrame() {
+  // Given: a collection of frames reaching by forward CFG edges and
+  // the directionality of the block.  Compute: an entry frame for the
+  // block.
+
+  Counters::compute_entry_frame.Increment();
+#ifdef DEBUG
+  if (compiling_deferred_code_) {
+    ASSERT(reaching_frames_.length() > 1);
+    VirtualFrame* frame = reaching_frames_[0];
+    bool all_identical = true;
+    for (int i = 1; i < reaching_frames_.length(); i++) {
+      if (!frame->Equals(reaching_frames_[i])) {
+        all_identical = false;
+        break;
+      }
+    }
+    ASSERT(!all_identical || all_identical);
+  }
+#endif
+
+  // Choose an initial frame.
+  VirtualFrame* initial_frame = reaching_frames_[0];
+
+  // A list of pointers to frame elements in the entry frame.  NULL
+  // indicates that the element has not yet been determined.
+  int length = initial_frame->element_count();
+  ZoneList<FrameElement*> elements(length);
+
+  // Initially populate the list of elements based on the initial
+  // frame.
+  for (int i = 0; i < length; i++) {
+    FrameElement element = initial_frame->elements_[i];
+    // We do not allow copies or constants in bidirectional frames.
+    if (direction_ == BIDIRECTIONAL) {
+      if (element.is_constant() || element.is_copy()) {
+        elements.Add(NULL);
+        continue;
+      }
+    }
+    elements.Add(&initial_frame->elements_[i]);
+  }
+
+  // Compute elements based on the other reaching frames.
+  if (reaching_frames_.length() > 1) {
+    for (int i = 0; i < length; i++) {
+      FrameElement* element = elements[i];
+      for (int j = 1; j < reaching_frames_.length(); j++) {
+        // Element computation is monotonic: new information will not
+        // change our decision about undetermined or invalid elements.
+        if (element == NULL || !element->is_valid()) break;
+
+        FrameElement* other = &reaching_frames_[j]->elements_[i];
+        element = element->Combine(other);
+        if (element != NULL && !element->is_copy()) {
+          ASSERT(other != NULL);
+          // We overwrite the number information of one of the incoming frames.
+          // This is safe because we only use the frame for emitting merge code.
+          // The number information of incoming frames is not used anymore.
+          element->set_type_info(TypeInfo::Combine(element->type_info(),
+                                                   other->type_info()));
+        }
+      }
+      elements[i] = element;
+    }
+  }
+
+  // Build the new frame.  A freshly allocated frame has memory elements
+  // for the parameters and some platform-dependent elements (e.g.,
+  // return address).  Replace those first.
+  entry_frame_ = new VirtualFrame();
+  int index = 0;
+  for (; index < entry_frame_->element_count(); index++) {
+    FrameElement* target = elements[index];
+    // If the element is determined, set it now.  Count registers.  Mark
+    // elements as copied exactly when they have a copy.  Undetermined
+    // elements are initially recorded as if in memory.
+    if (target != NULL) {
+      entry_frame_->elements_[index] = *target;
+      InitializeEntryElement(index, target);
+    }
+  }
+  // Then fill in the rest of the frame with new elements.
+  for (; index < length; index++) {
+    FrameElement* target = elements[index];
+    if (target == NULL) {
+      entry_frame_->elements_.Add(
+          FrameElement::MemoryElement(TypeInfo::Uninitialized()));
+    } else {
+      entry_frame_->elements_.Add(*target);
+      InitializeEntryElement(index, target);
+    }
+  }
+
+  // Allocate any still-undetermined frame elements to registers or
+  // memory, from the top down.
+  for (int i = length - 1; i >= 0; i--) {
+    if (elements[i] == NULL) {
+      // Loop over all the reaching frames to check whether the element
+      // is synced on all frames and to count the registers it occupies.
+      bool is_synced = true;
+      RegisterFile candidate_registers;
+      int best_count = kMinInt;
+      int best_reg_num = RegisterAllocator::kInvalidRegister;
+      TypeInfo info = TypeInfo::Uninitialized();
+
+      for (int j = 0; j < reaching_frames_.length(); j++) {
+        FrameElement element = reaching_frames_[j]->elements_[i];
+        if (direction_ == BIDIRECTIONAL) {
+          info = TypeInfo::Unknown();
+        } else if (!element.is_copy()) {
+          info = TypeInfo::Combine(info, element.type_info());
+        } else {
+          // New elements will not be copies, so get number information from
+          // backing element in the reaching frame.
+          info = TypeInfo::Combine(info,
+            reaching_frames_[j]->elements_[element.index()].type_info());
+        }
+        is_synced = is_synced && element.is_synced();
+        if (element.is_register() && !entry_frame_->is_used(element.reg())) {
+          // Count the register occurrence and remember it if better
+          // than the previous best.
+          int num = RegisterAllocator::ToNumber(element.reg());
+          candidate_registers.Use(num);
+          if (candidate_registers.count(num) > best_count) {
+            best_count = candidate_registers.count(num);
+            best_reg_num = num;
+          }
+        }
+      }
+
+      // We must have a number type information now (not for copied elements).
+      ASSERT(entry_frame_->elements_[i].is_copy()
+             || !info.IsUninitialized());
+
+      // If the value is synced on all frames, put it in memory.  This
+      // costs nothing at the merge code but will incur a
+      // memory-to-register move when the value is needed later.
+      if (is_synced) {
+        // Already recorded as a memory element.
+        // Set combined number info.
+        entry_frame_->elements_[i].set_type_info(info);
+        continue;
+      }
+
+      // Try to put it in a register.  If there was no best choice
+      // consider any free register.
+      if (best_reg_num == RegisterAllocator::kInvalidRegister) {
+        for (int j = 0; j < RegisterAllocator::kNumRegisters; j++) {
+          if (!entry_frame_->is_used(j)) {
+            best_reg_num = j;
+            break;
+          }
+        }
+      }
+
+      if (best_reg_num != RegisterAllocator::kInvalidRegister) {
+        // If there was a register choice, use it.  Preserve the copied
+        // flag on the element.
+        bool is_copied = entry_frame_->elements_[i].is_copied();
+        Register reg = RegisterAllocator::ToRegister(best_reg_num);
+        entry_frame_->elements_[i] =
+            FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED,
+                                          TypeInfo::Uninitialized());
+        if (is_copied) entry_frame_->elements_[i].set_copied();
+        entry_frame_->set_register_location(reg, i);
+      }
+      // Set combined number info.
+      entry_frame_->elements_[i].set_type_info(info);
+    }
+  }
+
+  // If we have incoming backward edges assert we forget all number information.
+#ifdef DEBUG
+  if (direction_ == BIDIRECTIONAL) {
+    for (int i = 0; i < length; ++i) {
+      if (!entry_frame_->elements_[i].is_copy()) {
+        ASSERT(entry_frame_->elements_[i].type_info().IsUnknown());
+      }
+    }
+  }
+#endif
+
+  // The stack pointer is at the highest synced element or the base of
+  // the expression stack.
+  int stack_pointer = length - 1;
+  while (stack_pointer >= entry_frame_->expression_base_index() &&
+         !entry_frame_->elements_[stack_pointer].is_synced()) {
+    stack_pointer--;
+  }
+  entry_frame_->stack_pointer_ = stack_pointer;
+}
+
+
+DeferredCode::DeferredCode()
+    : masm_(CodeGeneratorScope::Current()->masm()),
+      statement_position_(masm_->current_statement_position()),
+      position_(masm_->current_position()) {
+  ASSERT(statement_position_ != RelocInfo::kNoPosition);
+  ASSERT(position_ != RelocInfo::kNoPosition);
+
+  CodeGeneratorScope::Current()->AddDeferred(this);
+#ifdef DEBUG
+  comment_ = "";
+#endif
+
+  // Copy the register locations from the code generator's frame.
+  // These are the registers that will be spilled on entry to the
+  // deferred code and restored on exit.
+  VirtualFrame* frame = CodeGeneratorScope::Current()->frame();
+  int sp_offset = frame->fp_relative(frame->stack_pointer_);
+  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+    int loc = frame->register_location(i);
+    if (loc == VirtualFrame::kIllegalIndex) {
+      registers_[i] = kIgnore;
+    } else if (frame->elements_[loc].is_synced()) {
+      // Needs to be restored on exit but not saved on entry.
+      registers_[i] = frame->fp_relative(loc) | kSyncedFlag;
+    } else {
+      int offset = frame->fp_relative(loc);
+      registers_[i] = (offset < sp_offset) ? kPush : offset;
+    }
+  }
+}
+
+} }  // namespace v8::internal
diff --git a/src/jump-target-inl.h b/src/jump-target-inl.h
index 3cd9a8b..4c9ee5b 100644
--- a/src/jump-target-inl.h
+++ b/src/jump-target-inl.h
@@ -28,6 +28,14 @@
 #ifndef V8_JUMP_TARGET_INL_H_
 #define V8_JUMP_TARGET_INL_H_
 
+#include "virtual-frame-inl.h"
+
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
+#include "jump-target-heavy-inl.h"
+#else
+#include "jump-target-light-inl.h"
+#endif
+
 namespace v8 {
 namespace internal {
 
@@ -35,18 +43,6 @@
   return CodeGeneratorScope::Current();
 }
 
-void JumpTarget::InitializeEntryElement(int index, FrameElement* target) {
-  entry_frame_->elements_[index].clear_copied();
-  if (target->is_register()) {
-    entry_frame_->set_register_location(target->reg(), index);
-  } else if (target->is_copy()) {
-    entry_frame_->elements_[target->index()].set_copied();
-  }
-  if (direction_ == BIDIRECTIONAL && !target->is_copy()) {
-    entry_frame_->elements_[index].set_number_info(NumberInfo::kUnknown);
-  }
-}
-
 } }  // namespace v8::internal
 
 #endif  // V8_JUMP_TARGET_INL_H_
diff --git a/src/usage-analyzer.h b/src/jump-target-light-inl.h
similarity index 83%
copy from src/usage-analyzer.h
copy to src/jump-target-light-inl.h
index 1b0ea4a..8d6c3ac 100644
--- a/src/usage-analyzer.h
+++ b/src/jump-target-light-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,16 +25,18 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef V8_USAGE_ANALYZER_H_
-#define V8_USAGE_ANALYZER_H_
+#ifndef V8_JUMP_TARGET_LIGHT_INL_H_
+#define V8_JUMP_TARGET_LIGHT_INL_H_
+
+#include "virtual-frame-inl.h"
 
 namespace v8 {
 namespace internal {
 
-// Compute usage counts for all variables.
-// Used for variable allocation.
-bool AnalyzeVariableUsage(FunctionLiteral* lit);
+void JumpTarget::InitializeEntryElement(int index, FrameElement* target) {
+  UNIMPLEMENTED();
+}
 
 } }  // namespace v8::internal
 
-#endif  // V8_USAGE_ANALYZER_H_
+#endif  // V8_JUMP_TARGET_LIGHT_INL_H_
diff --git a/src/number-info.h b/src/jump-target-light.cc
similarity index 61%
copy from src/number-info.h
copy to src/jump-target-light.cc
index c6f32e4..befb430 100644
--- a/src/number-info.h
+++ b/src/jump-target-light.cc
@@ -25,48 +25,62 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef V8_NUMBER_INFO_H_
-#define V8_NUMBER_INFO_H_
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "jump-target-inl.h"
 
 namespace v8 {
 namespace internal {
 
-class NumberInfo : public AllStatic {
- public:
-  enum Type {
-    kUnknown = 0,
-    kNumber = 1,
-    kSmi = 3,
-    kHeapNumber = 5,
-    kUninitialized = 7
-  };
 
-  // Return the weakest (least precise) common type.
-  static Type Combine(Type a, Type b) {
-    // Make use of the order of enum values.
-    return static_cast<Type>(a & b);
-  }
+void JumpTarget::Jump(Result* arg) {
+  UNIMPLEMENTED();
+}
 
-  static bool IsNumber(Type a) {
-    ASSERT(a != kUninitialized);
-    return ((a & kNumber) != 0);
-  }
 
-  static const char* ToString(Type a) {
-    switch (a) {
-      case kUnknown: return "UnknownType";
-      case kNumber: return "NumberType";
-      case kSmi: return "SmiType";
-      case kHeapNumber: return "HeapNumberType";
-      case kUninitialized:
-        UNREACHABLE();
-        return "UninitializedType";
-    }
-    UNREACHABLE();
-    return "Unreachable code";
-  }
-};
+void JumpTarget::Branch(Condition cc, Result* arg, Hint hint) {
+  UNIMPLEMENTED();
+}
+
+
+void JumpTarget::Branch(Condition cc, Result* arg0, Result* arg1, Hint hint) {
+  UNIMPLEMENTED();
+}
+
+
+void BreakTarget::Branch(Condition cc, Result* arg, Hint hint) {
+  UNIMPLEMENTED();
+}
+
+
+void JumpTarget::Bind(Result* arg) {
+  UNIMPLEMENTED();
+}
+
+
+void JumpTarget::Bind(Result* arg0, Result* arg1) {
+  UNIMPLEMENTED();
+}
+
+
+void JumpTarget::ComputeEntryFrame() {
+  UNIMPLEMENTED();
+}
+
+
+DeferredCode::DeferredCode()
+    : masm_(CodeGeneratorScope::Current()->masm()),
+      statement_position_(masm_->current_statement_position()),
+      position_(masm_->current_position()) {
+  ASSERT(statement_position_ != RelocInfo::kNoPosition);
+  ASSERT(position_ != RelocInfo::kNoPosition);
+
+  CodeGeneratorScope::Current()->AddDeferred(this);
+
+#ifdef DEBUG
+  CodeGeneratorScope::Current()->frame()->AssertIsSpilled();
+#endif
+}
 
 } }  // namespace v8::internal
-
-#endif  // V8_NUMBER_INFO_H_
diff --git a/src/jump-target.cc b/src/jump-target.cc
index bce379a..8b29995 100644
--- a/src/jump-target.cc
+++ b/src/jump-target.cc
@@ -48,291 +48,21 @@
 }
 
 
-void JumpTarget::ComputeEntryFrame() {
-  // Given: a collection of frames reaching by forward CFG edges and
-  // the directionality of the block.  Compute: an entry frame for the
-  // block.
-
-  Counters::compute_entry_frame.Increment();
-#ifdef DEBUG
-  if (compiling_deferred_code_) {
-    ASSERT(reaching_frames_.length() > 1);
-    VirtualFrame* frame = reaching_frames_[0];
-    bool all_identical = true;
-    for (int i = 1; i < reaching_frames_.length(); i++) {
-      if (!frame->Equals(reaching_frames_[i])) {
-        all_identical = false;
-        break;
-      }
-    }
-    ASSERT(!all_identical || all_identical);
-  }
-#endif
-
-  // Choose an initial frame.
-  VirtualFrame* initial_frame = reaching_frames_[0];
-
-  // A list of pointers to frame elements in the entry frame.  NULL
-  // indicates that the element has not yet been determined.
-  int length = initial_frame->element_count();
-  ZoneList<FrameElement*> elements(length);
-
-  // Initially populate the list of elements based on the initial
-  // frame.
-  for (int i = 0; i < length; i++) {
-    FrameElement element = initial_frame->elements_[i];
-    // We do not allow copies or constants in bidirectional frames.
-    if (direction_ == BIDIRECTIONAL) {
-      if (element.is_constant() || element.is_copy()) {
-        elements.Add(NULL);
-        continue;
-      }
-    }
-    elements.Add(&initial_frame->elements_[i]);
-  }
-
-  // Compute elements based on the other reaching frames.
-  if (reaching_frames_.length() > 1) {
-    for (int i = 0; i < length; i++) {
-      FrameElement* element = elements[i];
-      for (int j = 1; j < reaching_frames_.length(); j++) {
-        // Element computation is monotonic: new information will not
-        // change our decision about undetermined or invalid elements.
-        if (element == NULL || !element->is_valid()) break;
-
-        element = element->Combine(&reaching_frames_[j]->elements_[i]);
-
-        FrameElement* other = &reaching_frames_[j]->elements_[i];
-        if (element != NULL && !element->is_copy()) {
-          ASSERT(other != NULL);
-          // We overwrite the number information of one of the incoming frames.
-          // This is safe because we only use the frame for emitting merge code.
-          // The number information of incoming frames is not used anymore.
-          element->set_number_info(NumberInfo::Combine(element->number_info(),
-                                                       other->number_info()));
-        }
-      }
-      elements[i] = element;
-    }
-  }
-
-  // Build the new frame.  A freshly allocated frame has memory elements
-  // for the parameters and some platform-dependent elements (e.g.,
-  // return address).  Replace those first.
-  entry_frame_ = new VirtualFrame();
-  int index = 0;
-  for (; index < entry_frame_->element_count(); index++) {
-    FrameElement* target = elements[index];
-    // If the element is determined, set it now.  Count registers.  Mark
-    // elements as copied exactly when they have a copy.  Undetermined
-    // elements are initially recorded as if in memory.
-    if (target != NULL) {
-      entry_frame_->elements_[index] = *target;
-      InitializeEntryElement(index, target);
-    }
-  }
-  // Then fill in the rest of the frame with new elements.
-  for (; index < length; index++) {
-    FrameElement* target = elements[index];
-    if (target == NULL) {
-      entry_frame_->elements_.Add(
-          FrameElement::MemoryElement(NumberInfo::kUninitialized));
-    } else {
-      entry_frame_->elements_.Add(*target);
-      InitializeEntryElement(index, target);
-    }
-  }
-
-  // Allocate any still-undetermined frame elements to registers or
-  // memory, from the top down.
-  for (int i = length - 1; i >= 0; i--) {
-    if (elements[i] == NULL) {
-      // Loop over all the reaching frames to check whether the element
-      // is synced on all frames and to count the registers it occupies.
-      bool is_synced = true;
-      RegisterFile candidate_registers;
-      int best_count = kMinInt;
-      int best_reg_num = RegisterAllocator::kInvalidRegister;
-      NumberInfo::Type info = NumberInfo::kUninitialized;
-
-      for (int j = 0; j < reaching_frames_.length(); j++) {
-        FrameElement element = reaching_frames_[j]->elements_[i];
-        if (direction_ == BIDIRECTIONAL) {
-            info = NumberInfo::kUnknown;
-        } else if (!element.is_copy()) {
-          info = NumberInfo::Combine(info, element.number_info());
-        } else {
-          // New elements will not be copies, so get number information from
-          // backing element in the reaching frame.
-          info = NumberInfo::Combine(info,
-            reaching_frames_[j]->elements_[element.index()].number_info());
-        }
-        is_synced = is_synced && element.is_synced();
-        if (element.is_register() && !entry_frame_->is_used(element.reg())) {
-          // Count the register occurrence and remember it if better
-          // than the previous best.
-          int num = RegisterAllocator::ToNumber(element.reg());
-          candidate_registers.Use(num);
-          if (candidate_registers.count(num) > best_count) {
-            best_count = candidate_registers.count(num);
-            best_reg_num = num;
-          }
-        }
-      }
-
-      // We must have a number type information now (not for copied elements).
-      ASSERT(entry_frame_->elements_[i].is_copy()
-             || info != NumberInfo::kUninitialized);
-
-      // If the value is synced on all frames, put it in memory.  This
-      // costs nothing at the merge code but will incur a
-      // memory-to-register move when the value is needed later.
-      if (is_synced) {
-        // Already recorded as a memory element.
-        // Set combined number info.
-        entry_frame_->elements_[i].set_number_info(info);
-        continue;
-      }
-
-      // Try to put it in a register.  If there was no best choice
-      // consider any free register.
-      if (best_reg_num == RegisterAllocator::kInvalidRegister) {
-        for (int j = 0; j < RegisterAllocator::kNumRegisters; j++) {
-          if (!entry_frame_->is_used(j)) {
-            best_reg_num = j;
-            break;
-          }
-        }
-      }
-
-      if (best_reg_num != RegisterAllocator::kInvalidRegister) {
-        // If there was a register choice, use it.  Preserve the copied
-        // flag on the element.
-        bool is_copied = entry_frame_->elements_[i].is_copied();
-        Register reg = RegisterAllocator::ToRegister(best_reg_num);
-        entry_frame_->elements_[i] =
-            FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED,
-                                          NumberInfo::kUninitialized);
-        if (is_copied) entry_frame_->elements_[i].set_copied();
-        entry_frame_->set_register_location(reg, i);
-      }
-      // Set combined number info.
-      entry_frame_->elements_[i].set_number_info(info);
-    }
-  }
-
-  // If we have incoming backward edges assert we forget all number information.
-#ifdef DEBUG
-  if (direction_ == BIDIRECTIONAL) {
-    for (int i = 0; i < length; ++i) {
-      if (!entry_frame_->elements_[i].is_copy()) {
-        ASSERT(entry_frame_->elements_[i].number_info() ==
-               NumberInfo::kUnknown);
-      }
-    }
-  }
-#endif
-
-  // The stack pointer is at the highest synced element or the base of
-  // the expression stack.
-  int stack_pointer = length - 1;
-  while (stack_pointer >= entry_frame_->expression_base_index() &&
-         !entry_frame_->elements_[stack_pointer].is_synced()) {
-    stack_pointer--;
-  }
-  entry_frame_->stack_pointer_ = stack_pointer;
-}
-
-
 void JumpTarget::Jump() {
   DoJump();
 }
 
 
-void JumpTarget::Jump(Result* arg) {
-  ASSERT(cgen()->has_valid_frame());
-
-  cgen()->frame()->Push(arg);
-  DoJump();
-}
-
-
 void JumpTarget::Branch(Condition cc, Hint hint) {
   DoBranch(cc, hint);
 }
 
 
-#ifdef DEBUG
-#define DECLARE_ARGCHECK_VARS(name)                                \
-  Result::Type name##_type = name->type();                         \
-  Register name##_reg = name->is_register() ? name->reg() : no_reg
-
-#define ASSERT_ARGCHECK(name)                                \
-  ASSERT(name->type() == name##_type);                       \
-  ASSERT(!name->is_register() || name->reg().is(name##_reg))
-
-#else
-#define DECLARE_ARGCHECK_VARS(name) do {} while (false)
-
-#define ASSERT_ARGCHECK(name) do {} while (false)
-#endif
-
-void JumpTarget::Branch(Condition cc, Result* arg, Hint hint) {
-  ASSERT(cgen()->has_valid_frame());
-
-  // We want to check that non-frame registers at the call site stay in
-  // the same registers on the fall-through branch.
-  DECLARE_ARGCHECK_VARS(arg);
-
-  cgen()->frame()->Push(arg);
-  DoBranch(cc, hint);
-  *arg = cgen()->frame()->Pop();
-
-  ASSERT_ARGCHECK(arg);
-}
-
-
-void BreakTarget::Branch(Condition cc, Result* arg, Hint hint) {
-  ASSERT(cgen()->has_valid_frame());
-
-  int count = cgen()->frame()->height() - expected_height_;
-  if (count > 0) {
-    // We negate and branch here rather than using DoBranch's negate
-    // and branch.  This gives us a hook to remove statement state
-    // from the frame.
-    JumpTarget fall_through;
-    // Branch to fall through will not negate, because it is a
-    // forward-only target.
-    fall_through.Branch(NegateCondition(cc), NegateHint(hint));
-    Jump(arg);  // May emit merge code here.
-    fall_through.Bind();
-  } else {
-    DECLARE_ARGCHECK_VARS(arg);
-    cgen()->frame()->Push(arg);
-    DoBranch(cc, hint);
-    *arg = cgen()->frame()->Pop();
-    ASSERT_ARGCHECK(arg);
-  }
-}
-
-#undef DECLARE_ARGCHECK_VARS
-#undef ASSERT_ARGCHECK
-
-
 void JumpTarget::Bind() {
   DoBind();
 }
 
 
-void JumpTarget::Bind(Result* arg) {
-  if (cgen()->has_valid_frame()) {
-    cgen()->frame()->Push(arg);
-  }
-  DoBind();
-  *arg = cgen()->frame()->Pop();
-}
-
-
 void JumpTarget::AddReachingFrame(VirtualFrame* frame) {
   ASSERT(reaching_frames_.length() == merge_labels_.length());
   ASSERT(entry_frame_ == NULL);
diff --git a/src/jump-target.h b/src/jump-target.h
index dd291c6..db523b5 100644
--- a/src/jump-target.h
+++ b/src/jump-target.h
@@ -29,6 +29,7 @@
 #define V8_JUMP_TARGET_H_
 
 #include "macro-assembler.h"
+#include "zone-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -116,12 +117,17 @@
   // the target and the fall-through.
   virtual void Branch(Condition cc, Hint hint = no_hint);
   virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
+  virtual void Branch(Condition cc,
+                      Result* arg0,
+                      Result* arg1,
+                      Hint hint = no_hint);
 
   // Bind a jump target.  If there is no current frame at the binding
   // site, there must be at least one frame reaching via a forward
   // jump.
   virtual void Bind();
   virtual void Bind(Result* arg);
+  virtual void Bind(Result* arg0, Result* arg1);
 
   // Emit a call to a jump target.  There must be a current frame at
   // the call.  The frame at the target is the same as the current
diff --git a/src/liveedit-debugger.js b/src/liveedit-debugger.js
new file mode 100644
index 0000000..34d5c0d
--- /dev/null
+++ b/src/liveedit-debugger.js
@@ -0,0 +1,944 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// LiveEdit feature implementation. The script should be executed after
+// debug-debugger.js.
+
+// A LiveEdit namespace. It contains functions that modifies JavaScript code
+// according to changes of script source (if possible).
+//
+// When new script source is put in, the difference is calculated textually,
+// in form of list of delete/add/change chunks. The functions that include
+// change chunk(s) get recompiled, or their enclosing functions are
+// recompiled instead.
+// If the function may not be recompiled (e.g. it was completely erased in new
+// version of the script) it remains unchanged, but the code that could
+// create a new instance of this function goes away. An old version of script
+// is created to back up this obsolete function.
+// All unchanged functions have their positions updated accordingly.
+//
+// LiveEdit namespace is declared inside a single function constructor.
+Debug.LiveEdit = new function() {
+
+  // Forward declaration for minifier.
+  var FunctionStatus;
+  
+  
+  // Applies the change to the script.
+  // The change is in form of list of chunks encoded in a single array as
+  // a series of triplets (pos1_start, pos1_end, pos2_end)
+  function ApplyPatchMultiChunk(script, diff_array, new_source, change_log) {
+
+    var old_source = script.source;
+
+    // Gather compile information about old version of script.
+    var old_compile_info = GatherCompileInfo(old_source, script);
+  
+    // Build tree structures for old and new versions of the script.
+    var root_old_node = BuildCodeInfoTree(old_compile_info);
+
+    var pos_translator = new PosTranslator(diff_array);
+
+    // Analyze changes.
+    MarkChangedFunctions(root_old_node, pos_translator.GetChunks());
+
+    // Find all SharedFunctionInfo's that were compiled from this script.
+    FindLiveSharedInfos(root_old_node, script);
+    
+    // Gather compile information about new version of script.
+    var new_compile_info;
+    try {
+      new_compile_info = GatherCompileInfo(new_source, script);
+    } catch (e) {
+      throw new Failure("Failed to compile new version of script: " + e);
+    }
+    var root_new_node = BuildCodeInfoTree(new_compile_info);
+
+    // Link recompiled script data with other data.
+    FindCorrespondingFunctions(root_old_node, root_new_node);
+    
+    // Prepare to-do lists.
+    var replace_code_list = new Array();
+    var link_to_old_script_list = new Array();
+    var link_to_original_script_list = new Array();
+    var update_positions_list = new Array();
+
+    function HarvestTodo(old_node) {
+      function CollectDamaged(node) {
+        link_to_old_script_list.push(node);
+        for (var i = 0; i < node.children.length; i++) {
+          CollectDamaged(node.children[i]);
+        }
+      }
+
+      // Recursively collects all newly compiled functions that are going into
+      // business and should be have link to the actual script updated.
+      function CollectNew(node_list) {
+        for (var i = 0; i < node_list.length; i++) {
+          link_to_original_script_list.push(node_list[i]);
+          CollectNew(node_list[i].children);
+        }
+      }
+      
+      if (old_node.status == FunctionStatus.DAMAGED) {
+        CollectDamaged(old_node);
+        return;
+      }
+      if (old_node.status == FunctionStatus.UNCHANGED) {
+        update_positions_list.push(old_node);
+      } else if (old_node.status == FunctionStatus.SOURCE_CHANGED) {
+        update_positions_list.push(old_node);
+      } else if (old_node.status == FunctionStatus.CHANGED) {
+        replace_code_list.push(old_node);
+        CollectNew(old_node.unmatched_new_nodes);
+      }
+      for (var i = 0; i < old_node.children.length; i++) {
+        HarvestTodo(old_node.children[i]);
+      }
+    }
+
+    HarvestTodo(root_old_node);
+    
+    // Collect shared infos for functions whose code need to be patched.
+    var replaced_function_infos = new Array();
+    for (var i = 0; i < replace_code_list.length; i++) {
+      var info_wrapper = replace_code_list[i].live_shared_info_wrapper; 
+      if (info_wrapper) {
+        replaced_function_infos.push(info_wrapper);
+      }
+    }
+
+    // Check that function being patched is not currently on stack.
+    CheckStackActivations(replaced_function_infos, change_log);
+  
+  
+    // We haven't changed anything before this line yet.
+    // Committing all changes.
+    
+    // Start with breakpoints. Convert their line/column positions and 
+    // temporary remove.
+    var break_points_restorer = TemporaryRemoveBreakPoints(script, change_log);
+
+    var old_script;
+
+    // Create an old script only if there are function that should be linked
+    // to old version.
+    if (link_to_old_script_list.length == 0) {
+      %LiveEditReplaceScript(script, new_source, null);
+      old_script = void 0;
+    } else {
+      var old_script_name = CreateNameForOldScript(script);
+      
+      // Update the script text and create a new script representing an old
+      // version of the script.
+      old_script = %LiveEditReplaceScript(script, new_source,
+          old_script_name);
+      
+      var link_to_old_script_report = new Array();
+      change_log.push( { linked_to_old_script: link_to_old_script_report } );
+    
+      // We need to link to old script all former nested functions.
+      for (var i = 0; i < link_to_old_script_list.length; i++) {
+        LinkToOldScript(link_to_old_script_list[i], old_script,
+            link_to_old_script_report);
+      }
+    }
+    
+    // Link to an actual script all the functions that we are going to use.
+    for (var i = 0; i < link_to_original_script_list.length; i++) {
+      %LiveEditFunctionSetScript(
+          link_to_original_script_list[i].info.shared_function_info, script);
+    }
+
+    for (var i = 0; i < replace_code_list.length; i++) {
+      PatchFunctionCode(replace_code_list[i], change_log);
+    }
+  
+    var position_patch_report = new Array();
+    change_log.push( {position_patched: position_patch_report} );
+    
+    for (var i = 0; i < update_positions_list.length; i++) {
+      // TODO(LiveEdit): take into account wether it's source_changed or
+      // unchanged and whether positions changed at all.
+      PatchPositions(update_positions_list[i], diff_array,
+          position_patch_report);
+    }
+    
+    break_points_restorer(pos_translator, old_script);
+  }
+  // Function is public.
+  this.ApplyPatchMultiChunk = ApplyPatchMultiChunk;
+
+  
+  // Fully compiles source string as a script. Returns Array of
+  // FunctionCompileInfo -- a descriptions of all functions of the script.
+  // Elements of array are ordered by start positions of functions (from top
+  // to bottom) in the source. Fields outer_index and next_sibling_index help
+  // to navigate the nesting structure of functions.
+  //
+  // All functions get compiled linked to script provided as parameter script.
+  // TODO(LiveEdit): consider not using actual scripts as script, because
+  //     we have to manually erase all links right after compile. 
+  function GatherCompileInfo(source, script) {
+    // Get function info, elements are partially sorted (it is a tree of
+    // nested functions serialized as parent followed by serialized children.
+    var raw_compile_info = %LiveEditGatherCompileInfo(script, source);
+
+    // Sort function infos by start position field.
+    var compile_info = new Array();
+    var old_index_map = new Array();
+    for (var i = 0; i < raw_compile_info.length; i++) {
+      var info = new FunctionCompileInfo(raw_compile_info[i]);
+      // Remove all links to the actual script. Breakpoints system and
+      // LiveEdit itself believe that any function in heap that points to a
+      // particular script is a regular function.
+      // For some functions we will restore this link later.
+      %LiveEditFunctionSetScript(info.shared_function_info, void 0);
+      compile_info.push(info);
+      old_index_map.push(i);
+    }
+
+    for (var i = 0; i < compile_info.length; i++) {
+      var k = i;
+      for (var j = i + 1; j < compile_info.length; j++) {
+        if (compile_info[k].start_position > compile_info[j].start_position) {
+          k = j;
+        }
+      }
+      if (k != i) {
+        var temp_info = compile_info[k];
+        var temp_index = old_index_map[k];
+        compile_info[k] = compile_info[i];
+        old_index_map[k] = old_index_map[i];
+        compile_info[i] = temp_info;
+        old_index_map[i] = temp_index;
+      }
+    }
+
+    // After sorting update outer_inder field using old_index_map. Also
+    // set next_sibling_index field.
+    var current_index = 0;
+
+    // The recursive function, that goes over all children of a particular
+    // node (i.e. function info).
+    function ResetIndexes(new_parent_index, old_parent_index) {
+      var previous_sibling = -1;
+      while (current_index < compile_info.length &&
+          compile_info[current_index].outer_index == old_parent_index) {
+        var saved_index = current_index;
+        compile_info[saved_index].outer_index = new_parent_index;
+        if (previous_sibling != -1) {
+          compile_info[previous_sibling].next_sibling_index = saved_index;
+        }
+        previous_sibling = saved_index;
+        current_index++;
+        ResetIndexes(saved_index, old_index_map[saved_index]);
+      }
+      if (previous_sibling != -1) {
+        compile_info[previous_sibling].next_sibling_index = -1;
+      }
+    }
+
+    ResetIndexes(-1, -1);
+    Assert(current_index == compile_info.length);
+
+    return compile_info;
+  }
+
+  
+  // Replaces function's Code.
+  function PatchFunctionCode(old_node, change_log) {
+    var new_info = old_node.corresponding_node.info;
+    var shared_info_wrapper = old_node.live_shared_info_wrapper;
+    if (shared_info_wrapper) {
+      %LiveEditReplaceFunctionCode(new_info.raw_array,
+          shared_info_wrapper.raw_array);
+
+      // The function got a new code. However, this new code brings all new
+      // instances of SharedFunctionInfo for nested functions. However,
+      // we want the original instances to be used wherever possible.
+      // (This is because old instances and new instances will be both
+      // linked to a script and breakpoints subsystem does not really
+      // expects this; neither does LiveEdit subsystem on next call).
+      for (var i = 0; i < old_node.children.length; i++) {
+        if (old_node.children[i].corresponding_node) {
+          var corresponding_child = old_node.children[i].corresponding_node;
+          var child_shared_info_wrapper =
+              old_node.children[i].live_shared_info_wrapper;
+          if (child_shared_info_wrapper) {
+            %LiveEditReplaceRefToNestedFunction(shared_info_wrapper.info,
+                corresponding_child.info.shared_function_info,
+                child_shared_info_wrapper.info);
+          }
+        }
+      }
+      
+      change_log.push( {function_patched: new_info.function_name} );
+    } else {
+      change_log.push( {function_patched: new_info.function_name,
+          function_info_not_found: true} );
+    }
+  }
+
+  
+  // Makes a function associated with another instance of a script (the
+  // one representing its old version). This way the function still
+  // may access its own text.
+  function LinkToOldScript(old_info_node, old_script, report_array) {
+    var shared_info = old_info_node.live_shared_info_wrapper;
+    if (shared_info) {
+      %LiveEditFunctionSetScript(shared_info.info, old_script);
+      report_array.push( { name: shared_info.function_name } );
+    } else {
+      report_array.push(
+          { name: old_info_node.info.function_name, not_found: true } );
+    }
+  }
+  
+
+  // Returns function that restores breakpoints.
+  function TemporaryRemoveBreakPoints(original_script, change_log) {
+    var script_break_points = GetScriptBreakPoints(original_script);
+    
+    var break_points_update_report = [];
+    change_log.push( { break_points_update: break_points_update_report } );
+
+    var break_point_old_positions = [];
+    for (var i = 0; i < script_break_points.length; i++) {
+      var break_point = script_break_points[i];
+
+      break_point.clear();
+      
+      // TODO(LiveEdit): be careful with resource offset here. 
+      var break_point_position = Debug.findScriptSourcePosition(original_script,
+          break_point.line(), break_point.column());
+      
+      var old_position_description = {
+          position: break_point_position,
+          line: break_point.line(),
+          column: break_point.column()
+      }
+      break_point_old_positions.push(old_position_description);
+    }
+    
+    
+    // Restores breakpoints and creates their copies in the "old" copy of
+    // the script.
+    return function (pos_translator, old_script_copy_opt) {
+      // Update breakpoints (change positions and restore them in old version
+      // of script.
+      for (var i = 0; i < script_break_points.length; i++) {
+        var break_point = script_break_points[i];
+        if (old_script_copy_opt) {
+          var clone = break_point.cloneForOtherScript(old_script_copy_opt);
+          clone.set(old_script_copy_opt);
+          
+          break_points_update_report.push( {
+            type: "copied_to_old",
+            id: break_point.number(),
+            new_id: clone.number(), 
+            positions: break_point_old_positions[i]
+            } );
+        }
+        
+        var updated_position = pos_translator.Translate(
+            break_point_old_positions[i].position,
+            PosTranslator.ShiftWithTopInsideChunkHandler);
+        
+        var new_location =
+            original_script.locationFromPosition(updated_position, false);
+
+        break_point.update_positions(new_location.line, new_location.column);
+
+        var new_position_description = {
+            position: updated_position,
+            line: new_location.line,
+            column: new_location.column
+        }
+        
+        break_point.set(original_script);
+        
+        break_points_update_report.push( { type: "position_changed",
+          id: break_point.number(),
+          old_positions: break_point_old_positions[i],
+          new_positions: new_position_description
+          } );
+      }
+    }
+  }
+
+  
+  function Assert(condition, message) {
+    if (!condition) {
+      if (message) {
+        throw "Assert " + message;
+      } else {
+        throw "Assert";
+      }
+    }
+  }
+
+  function DiffChunk(pos1, pos2, len1, len2) {
+    this.pos1 = pos1;
+    this.pos2 = pos2;
+    this.len1 = len1;
+    this.len2 = len2;
+  }
+  
+  function PosTranslator(diff_array) {
+    var chunks = new Array();
+    var current_diff = 0;
+    for (var i = 0; i < diff_array.length; i += 3) {
+      var pos1_begin = diff_array[i];
+      var pos2_begin = pos1_begin + current_diff;
+      var pos1_end = diff_array[i + 1];
+      var pos2_end = diff_array[i + 2];
+      chunks.push(new DiffChunk(pos1_begin, pos2_begin, pos1_end - pos1_begin,
+          pos2_end - pos2_begin));
+      current_diff = pos2_end - pos1_end; 
+    }
+    this.chunks = chunks;
+  }
+  PosTranslator.prototype.GetChunks = function() {
+    return this.chunks;
+  }
+  
+  PosTranslator.prototype.Translate = function(pos, inside_chunk_handler) {
+    var array = this.chunks; 
+    if (array.length == 0 || pos < array[0].pos1) {
+      return pos;
+    }
+    var chunk_index1 = 0;
+    var chunk_index2 = array.length - 1;
+
+    while (chunk_index1 < chunk_index2) {
+      var middle_index = Math.floor((chunk_index1 + chunk_index2) / 2);
+      if (pos < array[middle_index + 1].pos1) {
+        chunk_index2 = middle_index;
+      } else {
+        chunk_index1 = middle_index + 1;
+      }
+    }
+    var chunk = array[chunk_index1];
+    if (pos >= chunk.pos1 + chunk.len1) {
+      return pos + chunk.pos2 + chunk.len2 - chunk.pos1 - chunk.len1; 
+    }
+    
+    if (!inside_chunk_handler) {
+      inside_chunk_handler = PosTranslator.DefaultInsideChunkHandler;
+    }
+    return inside_chunk_handler(pos, chunk);
+  }
+
+  PosTranslator.DefaultInsideChunkHandler = function(pos, diff_chunk) {
+    Assert(false, "Cannot translate position in changed area");
+  }
+  
+  PosTranslator.ShiftWithTopInsideChunkHandler =
+      function(pos, diff_chunk) {
+    // We carelessly do not check whether we stay inside the chunk after
+    // translation.
+    return pos - diff_chunk.pos1 + diff_chunk.pos2; 
+  }
+  
+  var FunctionStatus = {
+      // No change to function or its inner functions; however its positions
+      // in script may have been shifted. 
+      UNCHANGED: "unchanged",
+      // The code of a function remains unchanged, but something happened inside
+      // some inner functions.
+      SOURCE_CHANGED: "source changed",
+      // The code of a function is changed or some nested function cannot be
+      // properly patched so this function must be recompiled.
+      CHANGED: "changed",
+      // Function is changed but cannot be patched.
+      DAMAGED: "damaged"
+  }
+  
+  function CodeInfoTreeNode(code_info, children, array_index) {
+    this.info = code_info;
+    this.children = children;
+    // an index in array of compile_info
+    this.array_index = array_index; 
+    this.parent = void 0;
+    
+    this.status = FunctionStatus.UNCHANGED;
+    // Status explanation is used for debugging purposes and will be shown
+    // in user UI if some explanations are needed.
+    this.status_explanation = void 0;
+    this.new_start_pos = void 0;
+    this.new_end_pos = void 0;
+    this.corresponding_node = void 0;
+    this.unmatched_new_nodes = void 0;
+    this.live_shared_info_wrapper = void 0;
+  }
+  
+  // From array of function infos that is implicitly a tree creates
+  // an actual tree of functions in script.
+  function BuildCodeInfoTree(code_info_array) {
+    // Throughtout all function we iterate over input array.
+    var index = 0;
+
+    // Recursive function that builds a branch of tree. 
+    function BuildNode() {
+      var my_index = index;
+      index++;
+      var child_array = new Array();
+      while (index < code_info_array.length &&
+          code_info_array[index].outer_index == my_index) {
+        child_array.push(BuildNode());
+      }
+      var node = new CodeInfoTreeNode(code_info_array[my_index], child_array,
+          my_index);
+      for (var i = 0; i < child_array.length; i++) {
+        child_array[i].parent = node;
+      }
+      return node;
+    }
+    
+    var root = BuildNode();
+    Assert(index == code_info_array.length);
+    return root;
+  }
+
+  // Applies a list of the textual diff chunks onto the tree of functions.
+  // Determines status of each function (from unchanged to damaged). However
+  // children of unchanged functions are ignored.
+  function MarkChangedFunctions(code_info_tree, chunks) {
+
+    // A convenient interator over diff chunks that also translates
+    // positions from old to new in a current non-changed part of script.
+    var chunk_it = new function() {
+      var chunk_index = 0;
+      var pos_diff = 0;
+      this.current = function() { return chunks[chunk_index]; }
+      this.next = function() {
+        var chunk = chunks[chunk_index];
+        pos_diff = chunk.pos2 + chunk.len2 - (chunk.pos1 + chunk.len1); 
+        chunk_index++;
+      }
+      this.done = function() { return chunk_index >= chunks.length; }
+      this.TranslatePos = function(pos) { return pos + pos_diff; }
+    };
+
+    // A recursive function that processes internals of a function and all its
+    // inner functions. Iterator chunk_it initially points to a chunk that is
+    // below function start.
+    function ProcessInternals(info_node) {
+      info_node.new_start_pos = chunk_it.TranslatePos(
+          info_node.info.start_position); 
+      var child_index = 0;
+      var code_changed = false;
+      var source_changed = false;
+      // Simultaneously iterates over child functions and over chunks.
+      while (!chunk_it.done() &&
+          chunk_it.current().pos1 < info_node.info.end_position) {
+        if (child_index < info_node.children.length) {
+          var child = info_node.children[child_index];
+          
+          if (child.info.end_position <= chunk_it.current().pos1) {
+            ProcessUnchangedChild(child);
+            child_index++;
+            continue;
+          } else if (child.info.start_position >=
+              chunk_it.current().pos1 + chunk_it.current().len1) {
+            code_changed = true;
+            chunk_it.next();
+            continue;
+          } else if (child.info.start_position <= chunk_it.current().pos1 &&
+              child.info.end_position >= chunk_it.current().pos1 +
+              chunk_it.current().len1) {
+            ProcessInternals(child);
+            source_changed = source_changed ||
+                ( child.status != FunctionStatus.UNCHANGED );
+            code_changed = code_changed ||
+                ( child.status == FunctionStatus.DAMAGED );
+            child_index++;
+            continue;
+          } else {
+            code_changed = true;
+            child.status = FunctionStatus.DAMAGED;
+            child.status_explanation =
+                "Text diff overlaps with function boundary";
+            child_index++;
+            continue;
+          }
+        } else {
+          if (chunk_it.current().pos1 + chunk_it.current().len1 <= 
+              info_node.info.end_position) {
+            info_node.status = FunctionStatus.CHANGED;
+            chunk_it.next();
+            continue;
+          } else {
+            info_node.status = FunctionStatus.DAMAGED;
+            info_node.status_explanation =
+                "Text diff overlaps with function boundary";
+            return;
+          }
+        }
+        Assert("Unreachable", false);
+      }
+      while (child_index < info_node.children.length) {
+        var child = info_node.children[child_index];
+        ProcessUnchangedChild(child);
+        child_index++;
+      }
+      if (code_changed) {
+        info_node.status = FunctionStatus.CHANGED;
+      } else if (source_changed) {
+        info_node.status = FunctionStatus.SOURCE_CHANGED;
+      }
+      info_node.new_end_pos =
+          chunk_it.TranslatePos(info_node.info.end_position); 
+    }
+    
+    function ProcessUnchangedChild(node) {
+      node.new_start_pos = chunk_it.TranslatePos(node.info.start_position);
+      node.new_end_pos = chunk_it.TranslatePos(node.info.end_position);
+    }
+    
+    ProcessInternals(code_info_tree);
+  }
+
+  // For ecah old function (if it is not damaged) tries to find a corresponding
+  // function in new script. Typically it should succeed (non-damaged functions
+  // by definition may only have changes inside their bodies). However there are
+  // reasons for corresponence not to be found; function with unmodified text
+  // in new script may become enclosed into other function; the innocent change
+  // inside function body may in fact be something like "} function B() {" that
+  // splits a function into 2 functions.
+  function FindCorrespondingFunctions(old_code_tree, new_code_tree) {
+
+    // A recursive function that tries to find a correspondence for all
+    // child functions and for their inner functions.
+    function ProcessChildren(old_node, new_node) {
+      var old_children = old_node.children;
+      var new_children = new_node.children;
+      
+      var unmatched_new_nodes_list = [];
+
+      var old_index = 0;
+      var new_index = 0;
+      while (old_index < old_children.length) {
+        if (old_children[old_index].status == FunctionStatus.DAMAGED) {
+          old_index++;
+        } else if (new_index < new_children.length) {
+          if (new_children[new_index].info.start_position <
+              old_children[old_index].new_start_pos) {
+            unmatched_new_nodes_list.push(new_children[new_index]);
+            new_index++;
+          } else if (new_children[new_index].info.start_position ==
+              old_children[old_index].new_start_pos) {
+            if (new_children[new_index].info.end_position ==
+                old_children[old_index].new_end_pos) {
+              old_children[old_index].corresponding_node =
+                  new_children[new_index];
+              if (old_children[old_index].status != FunctionStatus.UNCHANGED) {
+                ProcessChildren(old_children[old_index],
+                    new_children[new_index]);
+                if (old_children[old_index].status == FunctionStatus.DAMAGED) {
+                  unmatched_new_nodes_list.push(
+                      old_children[old_index].corresponding_node);
+                  old_children[old_index].corresponding_node = void 0;
+                  old_node.status = FunctionStatus.CHANGED;
+                }
+              }
+            } else {
+              old_children[old_index].status = FunctionStatus.DAMAGED;
+              old_children[old_index].status_explanation =
+                  "No corresponding function in new script found";
+              old_node.status = FunctionStatus.CHANGED;
+              unmatched_new_nodes_list.push(new_children[new_index]);
+            }
+            new_index++;
+            old_index++;
+          } else {
+            old_children[old_index].status = FunctionStatus.DAMAGED;
+            old_children[old_index].status_explanation =
+                "No corresponding function in new script found";
+            old_node.status = FunctionStatus.CHANGED;
+            old_index++;
+          }
+        } else {
+          old_children[old_index].status = FunctionStatus.DAMAGED;
+          old_children[old_index].status_explanation =
+              "No corresponding function in new script found";
+          old_node.status = FunctionStatus.CHANGED;
+          old_index++;
+        }
+      }
+      
+      while (new_index < new_children.length) {
+        unmatched_new_nodes_list.push(new_children[new_index]);
+        new_index++;
+      }
+      
+      if (old_node.status == FunctionStatus.CHANGED) {
+        if (!CompareFunctionExpectations(old_node.info, new_node.info)) {
+          old_node.status = FunctionStatus.DAMAGED;
+          old_node.status_explanation = "Changed code expectations";
+        }
+      }
+      old_node.unmatched_new_nodes = unmatched_new_nodes_list;
+    }
+
+    ProcessChildren(old_code_tree, new_code_tree);
+    
+    old_code_tree.corresponding_node = new_code_tree;
+    Assert(old_code_tree.status != FunctionStatus.DAMAGED,
+        "Script became damaged");
+  }
+  
+  function FindLiveSharedInfos(old_code_tree, script) {
+    var shared_raw_list = %LiveEditFindSharedFunctionInfosForScript(script);
+    
+    var shared_infos = new Array();
+  
+    for (var i = 0; i < shared_raw_list.length; i++) {
+      shared_infos.push(new SharedInfoWrapper(shared_raw_list[i]));
+    }
+    
+    // Finds SharedFunctionInfo that corresponds compile info with index
+    // in old version of the script.
+    function FindFunctionInfo(compile_info) {
+      for (var i = 0; i < shared_infos.length; i++) {
+        var wrapper = shared_infos[i];
+        if (wrapper.start_position == compile_info.start_position &&
+            wrapper.end_position == compile_info.end_position) {
+          return wrapper;
+        }
+      }
+    }
+    
+    function TraverseTree(node) {
+      var info_wrapper = FindFunctionInfo(node.info);
+      if (info_wrapper) {
+        node.live_shared_info_wrapper = info_wrapper;
+      }
+      for (var i = 0; i < node.children.length; i++) {
+        TraverseTree(node.children[i]);
+      }
+    }
+
+    TraverseTree(old_code_tree);
+  }
+
+  
+  // An object describing function compilation details. Its index fields
+  // apply to indexes inside array that stores these objects.
+  function FunctionCompileInfo(raw_array) {
+    this.function_name = raw_array[0];
+    this.start_position = raw_array[1];
+    this.end_position = raw_array[2];
+    this.param_num = raw_array[3];
+    this.code = raw_array[4];
+    this.scope_info = raw_array[5];
+    this.outer_index = raw_array[6];
+    this.shared_function_info = raw_array[7];
+    this.next_sibling_index = null;
+    this.raw_array = raw_array;
+  }
+  
+  function SharedInfoWrapper(raw_array) {
+    this.function_name = raw_array[0];
+    this.start_position = raw_array[1];
+    this.end_position = raw_array[2];
+    this.info = raw_array[3];
+    this.raw_array = raw_array;
+  }
+
+  // Changes positions (including all statments) in function.
+  function PatchPositions(old_info_node, diff_array, report_array) {
+    var shared_info_wrapper = old_info_node.live_shared_info_wrapper;
+    if (!shared_info_wrapper) {
+      // TODO(LiveEdit): function is not compiled yet or is already collected.
+      report_array.push( 
+          { name: old_info_node.info.function_name, info_not_found: true } );
+      return;
+    }
+    %LiveEditPatchFunctionPositions(shared_info_wrapper.raw_array,
+        diff_array);
+    report_array.push( { name: old_info_node.info.function_name } );
+  }
+
+  // Adds a suffix to script name to mark that it is old version.
+  function CreateNameForOldScript(script) {
+    // TODO(635): try better than this; support several changes.
+    return script.name + " (old)";
+  }
+  
+  // Compares a function interface old and new version, whether it
+  // changed or not.
+  function CompareFunctionExpectations(function_info1, function_info2) {
+    // Check that function has the same number of parameters (there may exist
+    // an adapter, that won't survive function parameter number change).
+    if (function_info1.param_num != function_info2.param_num) {
+      return false;
+    }
+    var scope_info1 = function_info1.scope_info;
+    var scope_info2 = function_info2.scope_info;
+  
+    if (!scope_info1) {
+      return !scope_info2;
+    }
+  
+    if (scope_info1.length != scope_info2.length) {
+      return false;
+    }
+  
+    // Check that outer scope structure is not changed. Otherwise the function
+    // will not properly work with existing scopes.
+    return scope_info1.toString() == scope_info2.toString();
+  }
+  
+  // Minifier forward declaration.
+  var FunctionPatchabilityStatus;
+  
+  // For array of wrapped shared function infos checks that none of them
+  // have activations on stack (of any thread). Throws a Failure exception
+  // if this proves to be false.
+  function CheckStackActivations(shared_wrapper_list, change_log) {
+    var shared_list = new Array();
+    for (var i = 0; i < shared_wrapper_list.length; i++) {
+      shared_list[i] = shared_wrapper_list[i].info;
+    }
+    var result = %LiveEditCheckAndDropActivations(shared_list, true);
+    if (result[shared_list.length]) {
+      // Extra array element may contain error message.
+      throw new Failure(result[shared_list.length]);
+    }
+  
+    var problems = new Array();
+    var dropped = new Array();
+    for (var i = 0; i < shared_list.length; i++) {
+      var shared = shared_wrapper_list[i];
+      if (result[i] == FunctionPatchabilityStatus.REPLACED_ON_ACTIVE_STACK) {
+        dropped.push({ name: shared.function_name } );
+      } else if (result[i] != FunctionPatchabilityStatus.AVAILABLE_FOR_PATCH) {
+        var description = {
+            name: shared.function_name,
+            start_pos: shared.start_position, 
+            end_pos: shared.end_position,
+            replace_problem:
+                FunctionPatchabilityStatus.SymbolName(result[i])
+        };
+        problems.push(description);
+      }
+    }
+    if (dropped.length > 0) {
+      change_log.push({ dropped_from_stack: dropped });
+    }
+    if (problems.length > 0) {
+      change_log.push( { functions_on_stack: problems } );
+      throw new Failure("Blocked by functions on stack");
+    }
+  }
+  
+  // A copy of the FunctionPatchabilityStatus enum from liveedit.h
+  var FunctionPatchabilityStatus = {
+      AVAILABLE_FOR_PATCH: 1,
+      BLOCKED_ON_ACTIVE_STACK: 2,
+      BLOCKED_ON_OTHER_STACK: 3,
+      BLOCKED_UNDER_NATIVE_CODE: 4,
+      REPLACED_ON_ACTIVE_STACK: 5
+  }
+  
+  FunctionPatchabilityStatus.SymbolName = function(code) {
+    var enum = FunctionPatchabilityStatus;
+    for (name in enum) {
+      if (enum[name] == code) {
+        return name;
+      }
+    }      
+  }
+  
+  
+  // A logical failure in liveedit process. This means that change_log
+  // is valid and consistent description of what happened.
+  function Failure(message) {
+    this.message = message;
+  }
+  // Function (constructor) is public.
+  this.Failure = Failure;
+  
+  Failure.prototype.toString = function() {
+    return "LiveEdit Failure: " + this.message;
+  }
+  
+  // A testing entry.
+  function GetPcFromSourcePos(func, source_pos) {
+    return %GetFunctionCodePositionFromSource(func, source_pos);
+  }
+  // Function is public.
+  this.GetPcFromSourcePos = GetPcFromSourcePos;
+
+  // LiveEdit main entry point: changes a script text to a new string.
+  function SetScriptSource(script, new_source, change_log) {
+    var old_source = script.source;
+    var diff = CompareStringsLinewise(old_source, new_source);
+    if (diff.length == 0) {
+      change_log.push( { empty_diff: true } );
+      return;
+    }
+    ApplyPatchMultiChunk(script, diff, new_source, change_log);
+  }
+  // Function is public.
+  this.SetScriptSource = SetScriptSource;
+  
+  function CompareStringsLinewise(s1, s2) {
+    return %LiveEditCompareStringsLinewise(s1, s2);
+  }
+
+  // Applies the change to the script.
+  // The change is always a substring (change_pos, change_pos + change_len)
+  // being replaced with a completely different string new_str.
+  // This API is a legacy and is obsolete.
+  //
+  // @param {Script} script that is being changed
+  // @param {Array} change_log a list that collects engineer-readable
+  //     description of what happened.
+  function ApplySingleChunkPatch(script, change_pos, change_len, new_str,
+      change_log) {
+    var old_source = script.source;
+  
+    // Prepare new source string.
+    var new_source = old_source.substring(0, change_pos) +
+        new_str + old_source.substring(change_pos + change_len);
+    
+    return ApplyPatchMultiChunk(script,
+        [ change_pos, change_pos + change_len, change_pos + new_str.length],
+        new_source, change_log);
+  }
+
+  
+  // Functions are public for tests.
+  this.TestApi = {
+    PosTranslator: PosTranslator,
+    CompareStringsLinewise: CompareStringsLinewise,
+    ApplySingleChunkPatch: ApplySingleChunkPatch
+  }
+}
diff --git a/src/liveedit.cc b/src/liveedit.cc
index c50e007..592ef49 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -34,54 +34,1435 @@
 #include "scopes.h"
 #include "global-handles.h"
 #include "debug.h"
+#include "memory.h"
 
 namespace v8 {
 namespace internal {
 
 
-class FunctionInfoListener {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+
+
+// A simple implementation of dynamic programming algorithm. It solves
+// the problem of finding the difference of 2 arrays. It uses a table of results
+// of subproblems. Each cell contains a number together with 2-bit flag
+// that helps building the chunk list.
+class Differencer {
  public:
-  void FunctionStarted(FunctionLiteral* fun) {
-    // Implementation follows.
+  explicit Differencer(Comparator::Input* input)
+      : input_(input), len1_(input->getLength1()), len2_(input->getLength2()) {
+    buffer_ = NewArray<int>(len1_ * len2_);
+  }
+  ~Differencer() {
+    DeleteArray(buffer_);
   }
 
-  void FunctionDone() {
-    // Implementation follows.
+  void Initialize() {
+    int array_size = len1_ * len2_;
+    for (int i = 0; i < array_size; i++) {
+      buffer_[i] = kEmptyCellValue;
+    }
   }
 
-  void FunctionScope(Scope* scope) {
-    // Implementation follows.
+  // Makes sure that result for the full problem is calculated and stored
+  // in the table together with flags showing a path through subproblems.
+  void FillTable() {
+    CompareUpToTail(0, 0);
   }
 
-  void FunctionCode(Handle<Code> function_code) {
-    // Implementation follows.
+  void SaveResult(Comparator::Output* chunk_writer) {
+    ResultWriter writer(chunk_writer);
+
+    int pos1 = 0;
+    int pos2 = 0;
+    while (true) {
+      if (pos1 < len1_) {
+        if (pos2 < len2_) {
+          Direction dir = get_direction(pos1, pos2);
+          switch (dir) {
+            case EQ:
+              writer.eq();
+              pos1++;
+              pos2++;
+              break;
+            case SKIP1:
+              writer.skip1(1);
+              pos1++;
+              break;
+            case SKIP2:
+            case SKIP_ANY:
+              writer.skip2(1);
+              pos2++;
+              break;
+            default:
+              UNREACHABLE();
+          }
+        } else {
+          writer.skip1(len1_ - pos1);
+          break;
+        }
+      } else {
+        if (len2_ != pos2) {
+          writer.skip2(len2_ - pos2);
+        }
+        break;
+      }
+    }
+    writer.close();
+  }
+
+ private:
+  Comparator::Input* input_;
+  int* buffer_;
+  int len1_;
+  int len2_;
+
+  enum Direction {
+    EQ = 0,
+    SKIP1,
+    SKIP2,
+    SKIP_ANY,
+
+    MAX_DIRECTION_FLAG_VALUE = SKIP_ANY
+  };
+
+  // Computes result for a subtask and optionally caches it in the buffer table.
+  // All results values are shifted to make space for flags in the lower bits.
+  int CompareUpToTail(int pos1, int pos2) {
+    if (pos1 < len1_) {
+      if (pos2 < len2_) {
+        int cached_res = get_value4(pos1, pos2);
+        if (cached_res == kEmptyCellValue) {
+          Direction dir;
+          int res;
+          if (input_->equals(pos1, pos2)) {
+            res = CompareUpToTail(pos1 + 1, pos2 + 1);
+            dir = EQ;
+          } else {
+            int res1 = CompareUpToTail(pos1 + 1, pos2) +
+                (1 << kDirectionSizeBits);
+            int res2 = CompareUpToTail(pos1, pos2 + 1) +
+                (1 << kDirectionSizeBits);
+            if (res1 == res2) {
+              res = res1;
+              dir = SKIP_ANY;
+            } else if (res1 < res2) {
+              res = res1;
+              dir = SKIP1;
+            } else {
+              res = res2;
+              dir = SKIP2;
+            }
+          }
+          set_value4_and_dir(pos1, pos2, res, dir);
+          cached_res = res;
+        }
+        return cached_res;
+      } else {
+        return (len1_ - pos1) << kDirectionSizeBits;
+      }
+    } else {
+      return (len2_ - pos2) << kDirectionSizeBits;
+    }
+  }
+
+  inline int& get_cell(int i1, int i2) {
+    return buffer_[i1 + i2 * len1_];
+  }
+
+  // Each cell keeps a value plus direction. Value is multiplied by 4.
+  void set_value4_and_dir(int i1, int i2, int value4, Direction dir) {
+    ASSERT((value4 & kDirectionMask) == 0);
+    get_cell(i1, i2) = value4 | dir;
+  }
+
+  int get_value4(int i1, int i2) {
+    return get_cell(i1, i2) & (kMaxUInt32 ^ kDirectionMask);
+  }
+  Direction get_direction(int i1, int i2) {
+    return static_cast<Direction>(get_cell(i1, i2) & kDirectionMask);
+  }
+
+  static const int kDirectionSizeBits = 2;
+  static const int kDirectionMask = (1 << kDirectionSizeBits) - 1;
+  static const int kEmptyCellValue = -1 << kDirectionSizeBits;
+
+  // This method only holds static assert statement (unfortunately you cannot
+  // place one in class scope).
+  void StaticAssertHolder() {
+    STATIC_ASSERT(MAX_DIRECTION_FLAG_VALUE < (1 << kDirectionSizeBits));
+  }
+
+  class ResultWriter {
+   public:
+    explicit ResultWriter(Comparator::Output* chunk_writer)
+        : chunk_writer_(chunk_writer), pos1_(0), pos2_(0),
+          pos1_begin_(-1), pos2_begin_(-1), has_open_chunk_(false) {
+    }
+    void eq() {
+      FlushChunk();
+      pos1_++;
+      pos2_++;
+    }
+    void skip1(int len1) {
+      StartChunk();
+      pos1_ += len1;
+    }
+    void skip2(int len2) {
+      StartChunk();
+      pos2_ += len2;
+    }
+    void close() {
+      FlushChunk();
+    }
+
+   private:
+    Comparator::Output* chunk_writer_;
+    int pos1_;
+    int pos2_;
+    int pos1_begin_;
+    int pos2_begin_;
+    bool has_open_chunk_;
+
+    void StartChunk() {
+      if (!has_open_chunk_) {
+        pos1_begin_ = pos1_;
+        pos2_begin_ = pos2_;
+        has_open_chunk_ = true;
+      }
+    }
+
+    void FlushChunk() {
+      if (has_open_chunk_) {
+        chunk_writer_->AddChunk(pos1_begin_, pos2_begin_,
+                                pos1_ - pos1_begin_, pos2_ - pos2_begin_);
+        has_open_chunk_ = false;
+      }
+    }
+  };
+};
+
+
+void Comparator::CalculateDifference(Comparator::Input* input,
+                                     Comparator::Output* result_writer) {
+  Differencer differencer(input);
+  differencer.Initialize();
+  differencer.FillTable();
+  differencer.SaveResult(result_writer);
+}
+
+
+static bool CompareSubstrings(Handle<String> s1, int pos1,
+                              Handle<String> s2, int pos2, int len) {
+  static StringInputBuffer buf1;
+  static StringInputBuffer buf2;
+  buf1.Reset(*s1);
+  buf1.Seek(pos1);
+  buf2.Reset(*s2);
+  buf2.Seek(pos2);
+  for (int i = 0; i < len; i++) {
+    ASSERT(buf1.has_more() && buf2.has_more());
+    if (buf1.GetNext() != buf2.GetNext()) {
+      return false;
+    }
+  }
+  return true;
+}
+
+
+// Wraps raw n-elements line_ends array as a list of n+1 lines. The last line
+// never has terminating new line character.
+class LineEndsWrapper {
+ public:
+  explicit LineEndsWrapper(Handle<String> string)
+      : ends_array_(CalculateLineEnds(string, false)),
+        string_len_(string->length()) {
+  }
+  int length() {
+    return ends_array_->length() + 1;
+  }
+  // Returns start for any line including start of the imaginary line after
+  // the last line.
+  int GetLineStart(int index) {
+    if (index == 0) {
+      return 0;
+    } else {
+      return GetLineEnd(index - 1);
+    }
+  }
+  int GetLineEnd(int index) {
+    if (index == ends_array_->length()) {
+      // End of the last line is always an end of the whole string.
+      // If the string ends with a new line character, the last line is an
+      // empty string after this character.
+      return string_len_;
+    } else {
+      return GetPosAfterNewLine(index);
+    }
+  }
+
+ private:
+  Handle<FixedArray> ends_array_;
+  int string_len_;
+
+  int GetPosAfterNewLine(int index) {
+    return Smi::cast(ends_array_->get(index))->value() + 1;
   }
 };
 
+
+// Represents 2 strings as 2 arrays of lines.
+class LineArrayCompareInput : public Comparator::Input {
+ public:
+  LineArrayCompareInput(Handle<String> s1, Handle<String> s2,
+                        LineEndsWrapper line_ends1, LineEndsWrapper line_ends2)
+      : s1_(s1), s2_(s2), line_ends1_(line_ends1), line_ends2_(line_ends2) {
+  }
+  int getLength1() {
+    return line_ends1_.length();
+  }
+  int getLength2() {
+    return line_ends2_.length();
+  }
+  bool equals(int index1, int index2) {
+    int line_start1 = line_ends1_.GetLineStart(index1);
+    int line_start2 = line_ends2_.GetLineStart(index2);
+    int line_end1 = line_ends1_.GetLineEnd(index1);
+    int line_end2 = line_ends2_.GetLineEnd(index2);
+    int len1 = line_end1 - line_start1;
+    int len2 = line_end2 - line_start2;
+    if (len1 != len2) {
+      return false;
+    }
+    return CompareSubstrings(s1_, line_start1, s2_, line_start2, len1);
+  }
+
+ private:
+  Handle<String> s1_;
+  Handle<String> s2_;
+  LineEndsWrapper line_ends1_;
+  LineEndsWrapper line_ends2_;
+};
+
+
+// Stores compare result in JSArray. Each chunk is stored as 3 array elements:
+// (pos1_begin, pos1_end, pos2_end).
+class LineArrayCompareOutput : public Comparator::Output {
+ public:
+  LineArrayCompareOutput(LineEndsWrapper line_ends1, LineEndsWrapper line_ends2)
+      : array_(Factory::NewJSArray(10)), current_size_(0),
+        line_ends1_(line_ends1), line_ends2_(line_ends2) {
+  }
+
+  void AddChunk(int line_pos1, int line_pos2, int line_len1, int line_len2) {
+    int char_pos1 = line_ends1_.GetLineStart(line_pos1);
+    int char_pos2 = line_ends2_.GetLineStart(line_pos2);
+    int char_len1 = line_ends1_.GetLineStart(line_pos1 + line_len1) - char_pos1;
+    int char_len2 = line_ends2_.GetLineStart(line_pos2 + line_len2) - char_pos2;
+
+    SetElement(array_, current_size_, Handle<Object>(Smi::FromInt(char_pos1)));
+    SetElement(array_, current_size_ + 1,
+               Handle<Object>(Smi::FromInt(char_pos1 + char_len1)));
+    SetElement(array_, current_size_ + 2,
+               Handle<Object>(Smi::FromInt(char_pos2 + char_len2)));
+    current_size_ += 3;
+  }
+
+  Handle<JSArray> GetResult() {
+    return array_;
+  }
+
+ private:
+  Handle<JSArray> array_;
+  int current_size_;
+  LineEndsWrapper line_ends1_;
+  LineEndsWrapper line_ends2_;
+};
+
+
+Handle<JSArray> LiveEdit::CompareStringsLinewise(Handle<String> s1,
+                                                 Handle<String> s2) {
+  LineEndsWrapper line_ends1(s1);
+  LineEndsWrapper line_ends2(s2);
+
+  LineArrayCompareInput input(s1, s2, line_ends1, line_ends2);
+  LineArrayCompareOutput output(line_ends1, line_ends2);
+
+  Comparator::CalculateDifference(&input, &output);
+
+  return output.GetResult();
+}
+
+
+static void CompileScriptForTracker(Handle<Script> script) {
+  const bool is_eval = false;
+  const bool is_global = true;
+  // TODO(635): support extensions.
+  Extension* extension = NULL;
+
+  PostponeInterruptsScope postpone;
+
+  // Only allow non-global compiles for eval.
+  ASSERT(is_eval || is_global);
+
+  // Build AST.
+  ScriptDataImpl* pre_data = NULL;
+  FunctionLiteral* lit = MakeAST(is_global, script, extension, pre_data);
+
+  // Check for parse errors.
+  if (lit == NULL) {
+    ASSERT(Top::has_pending_exception());
+    return;
+  }
+
+  // Compile the code.
+  CompilationInfo info(lit, script, is_eval);
+
+  LiveEditFunctionTracker tracker(lit);
+  Handle<Code> code = MakeCodeForLiveEdit(&info);
+
+  // Check for stack-overflow exceptions.
+  if (code.is_null()) {
+    Top::StackOverflow();
+    return;
+  }
+  tracker.RecordRootFunctionInfo(code);
+}
+
+// Unwraps JSValue object, returning its field "value"
+static Handle<Object> UnwrapJSValue(Handle<JSValue> jsValue) {
+  return Handle<Object>(jsValue->value());
+}
+
+// Wraps any object into a OpaqueReference, that will hide the object
+// from JavaScript.
+static Handle<JSValue> WrapInJSValue(Object* object) {
+  Handle<JSFunction> constructor = Top::opaque_reference_function();
+  Handle<JSValue> result =
+      Handle<JSValue>::cast(Factory::NewJSObject(constructor));
+  result->set_value(object);
+  return result;
+}
+
+// Simple helper class that creates more or less typed structures over
+// JSArray object. This is an adhoc method of passing structures from C++
+// to JavaScript.
+template<typename S>
+class JSArrayBasedStruct {
+ public:
+  static S Create() {
+    Handle<JSArray> array = Factory::NewJSArray(S::kSize_);
+    return S(array);
+  }
+  static S cast(Object* object) {
+    JSArray* array = JSArray::cast(object);
+    Handle<JSArray> array_handle(array);
+    return S(array_handle);
+  }
+  explicit JSArrayBasedStruct(Handle<JSArray> array) : array_(array) {
+  }
+  Handle<JSArray> GetJSArray() {
+    return array_;
+  }
+ protected:
+  void SetField(int field_position, Handle<Object> value) {
+    SetElement(array_, field_position, value);
+  }
+  void SetSmiValueField(int field_position, int value) {
+    SetElement(array_, field_position, Handle<Smi>(Smi::FromInt(value)));
+  }
+  Object* GetField(int field_position) {
+    return array_->GetElement(field_position);
+  }
+  int GetSmiValueField(int field_position) {
+    Object* res = GetField(field_position);
+    return Smi::cast(res)->value();
+  }
+ private:
+  Handle<JSArray> array_;
+};
+
+
+// Represents some function compilation details. This structure will be used
+// from JavaScript. It contains Code object, which is kept wrapped
+// into a BlindReference for sanitizing reasons.
+class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
+ public:
+  explicit FunctionInfoWrapper(Handle<JSArray> array)
+      : JSArrayBasedStruct<FunctionInfoWrapper>(array) {
+  }
+  void SetInitialProperties(Handle<String> name, int start_position,
+                            int end_position, int param_num, int parent_index) {
+    HandleScope scope;
+    this->SetField(kFunctionNameOffset_, name);
+    this->SetSmiValueField(kStartPositionOffset_, start_position);
+    this->SetSmiValueField(kEndPositionOffset_, end_position);
+    this->SetSmiValueField(kParamNumOffset_, param_num);
+    this->SetSmiValueField(kParentIndexOffset_, parent_index);
+  }
+  void SetFunctionCode(Handle<Code> function_code) {
+    Handle<JSValue> wrapper = WrapInJSValue(*function_code);
+    this->SetField(kCodeOffset_, wrapper);
+  }
+  void SetScopeInfo(Handle<Object> scope_info_array) {
+    this->SetField(kScopeInfoOffset_, scope_info_array);
+  }
+  void SetSharedFunctionInfo(Handle<SharedFunctionInfo> info) {
+    Handle<JSValue> info_holder = WrapInJSValue(*info);
+    this->SetField(kSharedFunctionInfoOffset_, info_holder);
+  }
+  int GetParentIndex() {
+    return this->GetSmiValueField(kParentIndexOffset_);
+  }
+  Handle<Code> GetFunctionCode() {
+    Handle<Object> raw_result = UnwrapJSValue(Handle<JSValue>(
+        JSValue::cast(this->GetField(kCodeOffset_))));
+    return Handle<Code>::cast(raw_result);
+  }
+  int GetStartPosition() {
+    return this->GetSmiValueField(kStartPositionOffset_);
+  }
+  int GetEndPosition() {
+    return this->GetSmiValueField(kEndPositionOffset_);
+  }
+
+ private:
+  static const int kFunctionNameOffset_ = 0;
+  static const int kStartPositionOffset_ = 1;
+  static const int kEndPositionOffset_ = 2;
+  static const int kParamNumOffset_ = 3;
+  static const int kCodeOffset_ = 4;
+  static const int kScopeInfoOffset_ = 5;
+  static const int kParentIndexOffset_ = 6;
+  static const int kSharedFunctionInfoOffset_ = 7;
+  static const int kSize_ = 8;
+
+  friend class JSArrayBasedStruct<FunctionInfoWrapper>;
+};
+
+// Wraps SharedFunctionInfo along with some of its fields for passing it
+// back to JavaScript. SharedFunctionInfo object itself is additionally
+// wrapped into BlindReference for sanitizing reasons.
+class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
+ public:
+  static bool IsInstance(Handle<JSArray> array) {
+    return array->length() == Smi::FromInt(kSize_) &&
+        array->GetElement(kSharedInfoOffset_)->IsJSValue();
+  }
+
+  explicit SharedInfoWrapper(Handle<JSArray> array)
+      : JSArrayBasedStruct<SharedInfoWrapper>(array) {
+  }
+
+  void SetProperties(Handle<String> name, int start_position, int end_position,
+                     Handle<SharedFunctionInfo> info) {
+    HandleScope scope;
+    this->SetField(kFunctionNameOffset_, name);
+    Handle<JSValue> info_holder = WrapInJSValue(*info);
+    this->SetField(kSharedInfoOffset_, info_holder);
+    this->SetSmiValueField(kStartPositionOffset_, start_position);
+    this->SetSmiValueField(kEndPositionOffset_, end_position);
+  }
+  Handle<SharedFunctionInfo> GetInfo() {
+    Object* element = this->GetField(kSharedInfoOffset_);
+    Handle<JSValue> value_wrapper(JSValue::cast(element));
+    Handle<Object> raw_result = UnwrapJSValue(value_wrapper);
+    return Handle<SharedFunctionInfo>::cast(raw_result);
+  }
+
+ private:
+  static const int kFunctionNameOffset_ = 0;
+  static const int kStartPositionOffset_ = 1;
+  static const int kEndPositionOffset_ = 2;
+  static const int kSharedInfoOffset_ = 3;
+  static const int kSize_ = 4;
+
+  friend class JSArrayBasedStruct<SharedInfoWrapper>;
+};
+
+class FunctionInfoListener {
+ public:
+  FunctionInfoListener() {
+    current_parent_index_ = -1;
+    len_ = 0;
+    result_ = Factory::NewJSArray(10);
+  }
+
+  void FunctionStarted(FunctionLiteral* fun) {
+    HandleScope scope;
+    FunctionInfoWrapper info = FunctionInfoWrapper::Create();
+    info.SetInitialProperties(fun->name(), fun->start_position(),
+                              fun->end_position(), fun->num_parameters(),
+                              current_parent_index_);
+    current_parent_index_ = len_;
+    SetElement(result_, len_, info.GetJSArray());
+    len_++;
+  }
+
+  void FunctionDone() {
+    HandleScope scope;
+    FunctionInfoWrapper info =
+        FunctionInfoWrapper::cast(result_->GetElement(current_parent_index_));
+    current_parent_index_ = info.GetParentIndex();
+  }
+
+// TODO(LiveEdit): Move private method below.
+//     This private section was created here to avoid moving the function
+//      to keep already complex diff simpler.
+ private:
+  Object* SerializeFunctionScope(Scope* scope) {
+    HandleScope handle_scope;
+
+    Handle<JSArray> scope_info_list = Factory::NewJSArray(10);
+    int scope_info_length = 0;
+
+    // Saves some description of scope. It stores name and indexes of
+    // variables in the whole scope chain. Null-named slots delimit
+    // scopes of this chain.
+    Scope* outer_scope = scope->outer_scope();
+    if (outer_scope == NULL) {
+      return Heap::undefined_value();
+    }
+    do {
+      ZoneList<Variable*> list(10);
+      outer_scope->CollectUsedVariables(&list);
+      int j = 0;
+      for (int i = 0; i < list.length(); i++) {
+        Variable* var1 = list[i];
+        Slot* slot = var1->slot();
+        if (slot != NULL && slot->type() == Slot::CONTEXT) {
+          if (j != i) {
+            list[j] = var1;
+          }
+          j++;
+        }
+      }
+
+      // Sort it.
+      for (int k = 1; k < j; k++) {
+        int l = k;
+        for (int m = k + 1; m < j; m++) {
+          if (list[l]->slot()->index() > list[m]->slot()->index()) {
+            l = m;
+          }
+        }
+        list[k] = list[l];
+      }
+      for (int i = 0; i < j; i++) {
+        SetElement(scope_info_list, scope_info_length, list[i]->name());
+        scope_info_length++;
+        SetElement(scope_info_list, scope_info_length,
+                   Handle<Smi>(Smi::FromInt(list[i]->slot()->index())));
+        scope_info_length++;
+      }
+      SetElement(scope_info_list, scope_info_length,
+                 Handle<Object>(Heap::null_value()));
+      scope_info_length++;
+
+      outer_scope = outer_scope->outer_scope();
+    } while (outer_scope != NULL);
+
+    return *scope_info_list;
+  }
+
+ public:
+  // Saves only function code, because for a script function we
+  // may never create a SharedFunctionInfo object.
+  void FunctionCode(Handle<Code> function_code) {
+    FunctionInfoWrapper info =
+        FunctionInfoWrapper::cast(result_->GetElement(current_parent_index_));
+    info.SetFunctionCode(function_code);
+  }
+
+  // Saves full information about a function: its code, its scope info
+  // and a SharedFunctionInfo object.
+  void FunctionInfo(Handle<SharedFunctionInfo> shared, Scope* scope) {
+    if (!shared->IsSharedFunctionInfo()) {
+      return;
+    }
+    FunctionInfoWrapper info =
+        FunctionInfoWrapper::cast(result_->GetElement(current_parent_index_));
+    info.SetFunctionCode(Handle<Code>(shared->code()));
+    info.SetSharedFunctionInfo(shared);
+
+    Handle<Object> scope_info_list(SerializeFunctionScope(scope));
+    info.SetScopeInfo(scope_info_list);
+  }
+
+  Handle<JSArray> GetResult() {
+    return result_;
+  }
+
+ private:
+  Handle<JSArray> result_;
+  int len_;
+  int current_parent_index_;
+};
+
 static FunctionInfoListener* active_function_info_listener = NULL;
 
+JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
+                                     Handle<String> source) {
+  CompilationZoneScope zone_scope(DELETE_ON_EXIT);
+
+  FunctionInfoListener listener;
+  Handle<Object> original_source = Handle<Object>(script->source());
+  script->set_source(*source);
+  active_function_info_listener = &listener;
+  CompileScriptForTracker(script);
+  active_function_info_listener = NULL;
+  script->set_source(*original_source);
+
+  return *(listener.GetResult());
+}
+
+
+void LiveEdit::WrapSharedFunctionInfos(Handle<JSArray> array) {
+  HandleScope scope;
+  int len = Smi::cast(array->length())->value();
+  for (int i = 0; i < len; i++) {
+    Handle<SharedFunctionInfo> info(
+        SharedFunctionInfo::cast(array->GetElement(i)));
+    SharedInfoWrapper info_wrapper = SharedInfoWrapper::Create();
+    Handle<String> name_handle(String::cast(info->name()));
+    info_wrapper.SetProperties(name_handle, info->start_position(),
+                               info->end_position(), info);
+    array->SetElement(i, *(info_wrapper.GetJSArray()));
+  }
+}
+
+
+// Visitor that collects all references to a particular code object,
+// including "CODE_TARGET" references in other code objects.
+// It works in context of ZoneScope.
+class ReferenceCollectorVisitor : public ObjectVisitor {
+ public:
+  explicit ReferenceCollectorVisitor(Code* original)
+      : original_(original), rvalues_(10), reloc_infos_(10) {
+  }
+
+  virtual void VisitPointers(Object** start, Object** end) {
+    for (Object** p = start; p < end; p++) {
+      if (*p == original_) {
+        rvalues_.Add(p);
+      }
+    }
+  }
+
+  void VisitCodeTarget(RelocInfo* rinfo) {
+    if (RelocInfo::IsCodeTarget(rinfo->rmode()) &&
+        Code::GetCodeFromTargetAddress(rinfo->target_address()) == original_) {
+      reloc_infos_.Add(*rinfo);
+    }
+  }
+
+  virtual void VisitDebugTarget(RelocInfo* rinfo) {
+    VisitCodeTarget(rinfo);
+  }
+
+  // Post-visiting method that iterates over all collected references and
+  // modifies them.
+  void Replace(Code* substitution) {
+    for (int i = 0; i < rvalues_.length(); i++) {
+      *(rvalues_[i]) = substitution;
+    }
+    for (int i = 0; i < reloc_infos_.length(); i++) {
+      reloc_infos_[i].set_target_address(substitution->instruction_start());
+    }
+  }
+
+ private:
+  Code* original_;
+  ZoneList<Object**> rvalues_;
+  ZoneList<RelocInfo> reloc_infos_;
+};
+
+
+class FrameCookingThreadVisitor : public ThreadVisitor {
+ public:
+  void VisitThread(ThreadLocalTop* top) {
+    StackFrame::CookFramesForThread(top);
+  }
+};
+
+class FrameUncookingThreadVisitor : public ThreadVisitor {
+ public:
+  void VisitThread(ThreadLocalTop* top) {
+    StackFrame::UncookFramesForThread(top);
+  }
+};
+
+static void IterateAllThreads(ThreadVisitor* visitor) {
+  Top::IterateThread(visitor);
+  ThreadManager::IterateThreads(visitor);
+}
+
+// Finds all references to original and replaces them with substitution.
+static void ReplaceCodeObject(Code* original, Code* substitution) {
+  ASSERT(!Heap::InNewSpace(substitution));
+
+  AssertNoAllocation no_allocations_please;
+
+  // A zone scope for ReferenceCollectorVisitor.
+  ZoneScope scope(DELETE_ON_EXIT);
+
+  ReferenceCollectorVisitor visitor(original);
+
+  // Iterate over all roots. Stack frames may have pointer into original code,
+  // so temporary replace the pointers with offset numbers
+  // in prologue/epilogue.
+  {
+    FrameCookingThreadVisitor cooking_visitor;
+    IterateAllThreads(&cooking_visitor);
+
+    Heap::IterateStrongRoots(&visitor, VISIT_ALL);
+
+    FrameUncookingThreadVisitor uncooking_visitor;
+    IterateAllThreads(&uncooking_visitor);
+  }
+
+  // Now iterate over all pointers of all objects, including code_target
+  // implicit pointers.
+  HeapIterator iterator;
+  for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+    obj->Iterate(&visitor);
+  }
+
+  visitor.Replace(substitution);
+}
+
+
+// Check whether the code is natural function code (not a lazy-compile stub
+// code).
+static bool IsJSFunctionCode(Code* code) {
+  return code->kind() == Code::FUNCTION;
+}
+
+
+Object* LiveEdit::ReplaceFunctionCode(Handle<JSArray> new_compile_info_array,
+                                      Handle<JSArray> shared_info_array) {
+  HandleScope scope;
+
+  if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
+    return Top::ThrowIllegalOperation();
+  }
+
+  FunctionInfoWrapper compile_info_wrapper(new_compile_info_array);
+  SharedInfoWrapper shared_info_wrapper(shared_info_array);
+
+  Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
+
+  if (IsJSFunctionCode(shared_info->code())) {
+    ReplaceCodeObject(shared_info->code(),
+                      *(compile_info_wrapper.GetFunctionCode()));
+  }
+
+  if (shared_info->debug_info()->IsDebugInfo()) {
+    Handle<DebugInfo> debug_info(DebugInfo::cast(shared_info->debug_info()));
+    Handle<Code> new_original_code =
+        Factory::CopyCode(compile_info_wrapper.GetFunctionCode());
+    debug_info->set_original_code(*new_original_code);
+  }
+
+  shared_info->set_start_position(compile_info_wrapper.GetStartPosition());
+  shared_info->set_end_position(compile_info_wrapper.GetEndPosition());
+
+  shared_info->set_construct_stub(
+      Builtins::builtin(Builtins::JSConstructStubGeneric));
+
+  return Heap::undefined_value();
+}
+
+
+// TODO(635): Eval caches its scripts (same text -- same compiled info).
+// Make sure we clear such caches.
+void LiveEdit::SetFunctionScript(Handle<JSValue> function_wrapper,
+                                 Handle<Object> script_handle) {
+  Handle<SharedFunctionInfo> shared_info =
+      Handle<SharedFunctionInfo>::cast(UnwrapJSValue(function_wrapper));
+  shared_info->set_script(*script_handle);
+}
+
+
+// For a script text change (defined as position_change_array), translates
+// position in unchanged text to position in changed text.
+// Text change is a set of non-overlapping regions in text, that have changed
+// their contents and length. It is specified as array of groups of 3 numbers:
+// (change_begin, change_end, change_end_new_position).
+// Each group describes a change in text; groups are sorted by change_begin.
+// Only position in text beyond any changes may be successfully translated.
+// If a positions is inside some region that changed, result is currently
+// undefined.
+static int TranslatePosition(int original_position,
+                             Handle<JSArray> position_change_array) {
+  int position_diff = 0;
+  int array_len = Smi::cast(position_change_array->length())->value();
+  // TODO(635): binary search may be used here
+  for (int i = 0; i < array_len; i += 3) {
+    int chunk_start =
+        Smi::cast(position_change_array->GetElement(i))->value();
+    if (original_position < chunk_start) {
+      break;
+    }
+    int chunk_end =
+        Smi::cast(position_change_array->GetElement(i + 1))->value();
+    // Position mustn't be inside a chunk.
+    ASSERT(original_position >= chunk_end);
+    int chunk_changed_end =
+        Smi::cast(position_change_array->GetElement(i + 2))->value();
+    position_diff = chunk_changed_end - chunk_end;
+  }
+
+  return original_position + position_diff;
+}
+
+
+// Auto-growing buffer for writing relocation info code section. This buffer
+// is a simplified version of buffer from Assembler. Unlike Assembler, this
+// class is platform-independent and it works without dealing with instructions.
+// As specified by RelocInfo format, the buffer is filled in reversed order:
+// from upper to lower addresses.
+// It uses NewArray/DeleteArray for memory management.
+class RelocInfoBuffer {
+ public:
+  RelocInfoBuffer(int buffer_initial_capicity, byte* pc) {
+    buffer_size_ = buffer_initial_capicity + kBufferGap;
+    buffer_ = NewArray<byte>(buffer_size_);
+
+    reloc_info_writer_.Reposition(buffer_ + buffer_size_, pc);
+  }
+  ~RelocInfoBuffer() {
+    DeleteArray(buffer_);
+  }
+
+  // As specified by RelocInfo format, the buffer is filled in reversed order:
+  // from upper to lower addresses.
+  void Write(const RelocInfo* rinfo) {
+    if (buffer_ + kBufferGap >= reloc_info_writer_.pos()) {
+      Grow();
+    }
+    reloc_info_writer_.Write(rinfo);
+  }
+
+  Vector<byte> GetResult() {
+    // Return the bytes from pos up to end of buffer.
+    int result_size =
+        static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer_.pos());
+    return Vector<byte>(reloc_info_writer_.pos(), result_size);
+  }
+
+ private:
+  void Grow() {
+    // Compute new buffer size.
+    int new_buffer_size;
+    if (buffer_size_ < 2 * KB) {
+      new_buffer_size = 4 * KB;
+    } else {
+      new_buffer_size = 2 * buffer_size_;
+    }
+    // Some internal data structures overflow for very large buffers,
+    // they must ensure that kMaximalBufferSize is not too large.
+    if (new_buffer_size > kMaximalBufferSize) {
+      V8::FatalProcessOutOfMemory("RelocInfoBuffer::GrowBuffer");
+    }
+
+    // Setup new buffer.
+    byte* new_buffer = NewArray<byte>(new_buffer_size);
+
+    // Copy the data.
+    int curently_used_size =
+        static_cast<int>(buffer_ + buffer_size_ - reloc_info_writer_.pos());
+    memmove(new_buffer + new_buffer_size - curently_used_size,
+            reloc_info_writer_.pos(), curently_used_size);
+
+    reloc_info_writer_.Reposition(
+        new_buffer + new_buffer_size - curently_used_size,
+        reloc_info_writer_.last_pc());
+
+    DeleteArray(buffer_);
+    buffer_ = new_buffer;
+    buffer_size_ = new_buffer_size;
+  }
+
+  RelocInfoWriter reloc_info_writer_;
+  byte* buffer_;
+  int buffer_size_;
+
+  static const int kBufferGap = 8;
+  static const int kMaximalBufferSize = 512*MB;
+};
+
+// Patch positions in code (changes relocation info section) and possibly
+// returns new instance of code.
+static Handle<Code> PatchPositionsInCode(Handle<Code> code,
+    Handle<JSArray> position_change_array) {
+
+  RelocInfoBuffer buffer_writer(code->relocation_size(),
+                                code->instruction_start());
+
+  {
+    AssertNoAllocation no_allocations_please;
+    for (RelocIterator it(*code); !it.done(); it.next()) {
+      RelocInfo* rinfo = it.rinfo();
+      if (RelocInfo::IsPosition(rinfo->rmode())) {
+        int position = static_cast<int>(rinfo->data());
+        int new_position = TranslatePosition(position,
+                                             position_change_array);
+        if (position != new_position) {
+          RelocInfo info_copy(rinfo->pc(), rinfo->rmode(), new_position);
+          buffer_writer.Write(&info_copy);
+          continue;
+        }
+      }
+      buffer_writer.Write(it.rinfo());
+    }
+  }
+
+  Vector<byte> buffer = buffer_writer.GetResult();
+
+  if (buffer.length() == code->relocation_size()) {
+    // Simply patch relocation area of code.
+    memcpy(code->relocation_start(), buffer.start(), buffer.length());
+    return code;
+  } else {
+    // Relocation info section now has different size. We cannot simply
+    // rewrite it inside code object. Instead we have to create a new
+    // code object.
+    Handle<Code> result(Factory::CopyCode(code, buffer));
+    return result;
+  }
+}
+
+
+Object* LiveEdit::PatchFunctionPositions(
+    Handle<JSArray> shared_info_array, Handle<JSArray> position_change_array) {
+
+  if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
+    return Top::ThrowIllegalOperation();
+  }
+
+  SharedInfoWrapper shared_info_wrapper(shared_info_array);
+  Handle<SharedFunctionInfo> info = shared_info_wrapper.GetInfo();
+
+  int old_function_start = info->start_position();
+  int new_function_start = TranslatePosition(old_function_start,
+                                             position_change_array);
+  info->set_start_position(new_function_start);
+  info->set_end_position(TranslatePosition(info->end_position(),
+                                           position_change_array));
+
+  info->set_function_token_position(
+      TranslatePosition(info->function_token_position(),
+      position_change_array));
+
+  if (IsJSFunctionCode(info->code())) {
+    // Patch relocation info section of the code.
+    Handle<Code> patched_code = PatchPositionsInCode(Handle<Code>(info->code()),
+                                                     position_change_array);
+    if (*patched_code != info->code()) {
+      // Replace all references to the code across the heap. In particular,
+      // some stubs may refer to this code and this code may be being executed
+      // on stack (it is safe to substitute the code object on stack, because
+      // we only change the structure of rinfo and leave instructions
+      // untouched).
+      ReplaceCodeObject(info->code(), *patched_code);
+    }
+  }
+
+  return Heap::undefined_value();
+}
+
+
+static Handle<Script> CreateScriptCopy(Handle<Script> original) {
+  Handle<String> original_source(String::cast(original->source()));
+
+  Handle<Script> copy = Factory::NewScript(original_source);
+
+  copy->set_name(original->name());
+  copy->set_line_offset(original->line_offset());
+  copy->set_column_offset(original->column_offset());
+  copy->set_data(original->data());
+  copy->set_type(original->type());
+  copy->set_context_data(original->context_data());
+  copy->set_compilation_type(original->compilation_type());
+  copy->set_eval_from_shared(original->eval_from_shared());
+  copy->set_eval_from_instructions_offset(
+      original->eval_from_instructions_offset());
+
+  return copy;
+}
+
+
+Object* LiveEdit::ChangeScriptSource(Handle<Script> original_script,
+                                     Handle<String> new_source,
+                                     Handle<Object> old_script_name) {
+  Handle<Object> old_script_object;
+  if (old_script_name->IsString()) {
+    Handle<Script> old_script = CreateScriptCopy(original_script);
+    old_script->set_name(String::cast(*old_script_name));
+    old_script_object = old_script;
+    Debugger::OnAfterCompile(old_script, Debugger::SEND_WHEN_DEBUGGING);
+  } else {
+    old_script_object = Handle<Object>(Heap::null_value());
+  }
+
+  original_script->set_source(*new_source);
+
+  // Drop line ends so that they will be recalculated.
+  original_script->set_line_ends(Heap::undefined_value());
+
+  return *old_script_object;
+}
+
+
+
+void LiveEdit::ReplaceRefToNestedFunction(
+    Handle<JSValue> parent_function_wrapper,
+    Handle<JSValue> orig_function_wrapper,
+    Handle<JSValue> subst_function_wrapper) {
+
+  Handle<SharedFunctionInfo> parent_shared =
+      Handle<SharedFunctionInfo>::cast(UnwrapJSValue(parent_function_wrapper));
+  Handle<SharedFunctionInfo> orig_shared =
+      Handle<SharedFunctionInfo>::cast(UnwrapJSValue(orig_function_wrapper));
+  Handle<SharedFunctionInfo> subst_shared =
+      Handle<SharedFunctionInfo>::cast(UnwrapJSValue(subst_function_wrapper));
+
+  for (RelocIterator it(parent_shared->code()); !it.done(); it.next()) {
+    if (it.rinfo()->rmode() == RelocInfo::EMBEDDED_OBJECT) {
+      if (it.rinfo()->target_object() == *orig_shared) {
+        it.rinfo()->set_target_object(*subst_shared);
+      }
+    }
+  }
+}
+
+
+// Check an activation against list of functions. If there is a function
+// that matches, its status in result array is changed to status argument value.
+static bool CheckActivation(Handle<JSArray> shared_info_array,
+                            Handle<JSArray> result, StackFrame* frame,
+                            LiveEdit::FunctionPatchabilityStatus status) {
+  if (!frame->is_java_script()) {
+    return false;
+  }
+  int len = Smi::cast(shared_info_array->length())->value();
+  for (int i = 0; i < len; i++) {
+    JSValue* wrapper = JSValue::cast(shared_info_array->GetElement(i));
+    Handle<SharedFunctionInfo> shared(
+        SharedFunctionInfo::cast(wrapper->value()));
+
+    if (frame->code() == shared->code()) {
+      SetElement(result, i, Handle<Smi>(Smi::FromInt(status)));
+      return true;
+    }
+  }
+  return false;
+}
+
+
+// Iterates over handler chain and removes all elements that are inside
+// frames being dropped.
+static bool FixTryCatchHandler(StackFrame* top_frame,
+                               StackFrame* bottom_frame) {
+  Address* pointer_address =
+      &Memory::Address_at(Top::get_address_from_id(Top::k_handler_address));
+
+  while (*pointer_address < top_frame->sp()) {
+    pointer_address = &Memory::Address_at(*pointer_address);
+  }
+  Address* above_frame_address = pointer_address;
+  while (*pointer_address < bottom_frame->fp()) {
+    pointer_address = &Memory::Address_at(*pointer_address);
+  }
+  bool change = *above_frame_address != *pointer_address;
+  *above_frame_address = *pointer_address;
+  return change;
+}
+
+
+// Removes specified range of frames from stack. There may be 1 or more
+// frames in range. Anyway the bottom frame is restarted rather than dropped,
+// and therefore has to be a JavaScript frame.
+// Returns error message or NULL.
+static const char* DropFrames(Vector<StackFrame*> frames,
+                              int top_frame_index,
+                              int bottom_js_frame_index) {
+  StackFrame* pre_top_frame = frames[top_frame_index - 1];
+  StackFrame* top_frame = frames[top_frame_index];
+  StackFrame* bottom_js_frame = frames[bottom_js_frame_index];
+
+  ASSERT(bottom_js_frame->is_java_script());
+
+  // Check the nature of the top frame.
+  if (pre_top_frame->code()->is_inline_cache_stub() &&
+      pre_top_frame->code()->ic_state() == DEBUG_BREAK) {
+    // OK, we can drop inline cache calls.
+  } else if (pre_top_frame->code() ==
+      Builtins::builtin(Builtins::FrameDropper_LiveEdit)) {
+    // OK, we can drop our own code.
+  } else if (pre_top_frame->code()->kind() == Code::STUB &&
+      pre_top_frame->code()->major_key()) {
+    // Unit Test entry, it's fine, we support this case.
+  } else {
+    return "Unknown structure of stack above changing function";
+  }
+
+  Address unused_stack_top = top_frame->sp();
+  Address unused_stack_bottom = bottom_js_frame->fp()
+      - Debug::kFrameDropperFrameSize * kPointerSize  // Size of the new frame.
+      + kPointerSize;  // Bigger address end is exclusive.
+
+  if (unused_stack_top > unused_stack_bottom) {
+    return "Not enough space for frame dropper frame";
+  }
+
+  // Committing now. After this point we should return only NULL value.
+
+  FixTryCatchHandler(pre_top_frame, bottom_js_frame);
+  // Make sure FixTryCatchHandler is idempotent.
+  ASSERT(!FixTryCatchHandler(pre_top_frame, bottom_js_frame));
+
+  Handle<Code> code(Builtins::builtin(Builtins::FrameDropper_LiveEdit));
+  top_frame->set_pc(code->entry());
+  pre_top_frame->SetCallerFp(bottom_js_frame->fp());
+
+  Debug::SetUpFrameDropperFrame(bottom_js_frame, code);
+
+  for (Address a = unused_stack_top;
+      a < unused_stack_bottom;
+      a += kPointerSize) {
+    Memory::Object_at(a) = Smi::FromInt(0);
+  }
+
+  return NULL;
+}
+
+
+static bool IsDropableFrame(StackFrame* frame) {
+  return !frame->is_exit();
+}
+
+// Fills result array with statuses of functions. Modifies the stack
+// removing all listed function if possible and if do_drop is true.
+static const char* DropActivationsInActiveThread(
+    Handle<JSArray> shared_info_array, Handle<JSArray> result, bool do_drop) {
+
+  ZoneScope scope(DELETE_ON_EXIT);
+  Vector<StackFrame*> frames = CreateStackMap();
+
+  int array_len = Smi::cast(shared_info_array->length())->value();
+
+  int top_frame_index = -1;
+  int frame_index = 0;
+  for (; frame_index < frames.length(); frame_index++) {
+    StackFrame* frame = frames[frame_index];
+    if (frame->id() == Debug::break_frame_id()) {
+      top_frame_index = frame_index;
+      break;
+    }
+    if (CheckActivation(shared_info_array, result, frame,
+                        LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE)) {
+      // We are still above break_frame. It is not a target frame,
+      // it is a problem.
+      return "Debugger mark-up on stack is not found";
+    }
+  }
+
+  if (top_frame_index == -1) {
+    // We haven't found break frame, but no function is blocking us anyway.
+    return NULL;
+  }
+
+  bool target_frame_found = false;
+  int bottom_js_frame_index = top_frame_index;
+  bool c_code_found = false;
+
+  for (; frame_index < frames.length(); frame_index++) {
+    StackFrame* frame = frames[frame_index];
+    if (!IsDropableFrame(frame)) {
+      c_code_found = true;
+      break;
+    }
+    if (CheckActivation(shared_info_array, result, frame,
+                        LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
+      target_frame_found = true;
+      bottom_js_frame_index = frame_index;
+    }
+  }
+
+  if (c_code_found) {
+    // There is a C frames on stack. Check that there are no target frames
+    // below them.
+    for (; frame_index < frames.length(); frame_index++) {
+      StackFrame* frame = frames[frame_index];
+      if (frame->is_java_script()) {
+        if (CheckActivation(shared_info_array, result, frame,
+                            LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE)) {
+          // Cannot drop frame under C frames.
+          return NULL;
+        }
+      }
+    }
+  }
+
+  if (!do_drop) {
+    // We are in check-only mode.
+    return NULL;
+  }
+
+  if (!target_frame_found) {
+    // Nothing to drop.
+    return NULL;
+  }
+
+  const char* error_message = DropFrames(frames, top_frame_index,
+                                         bottom_js_frame_index);
+
+  if (error_message != NULL) {
+    return error_message;
+  }
+
+  // Adjust break_frame after some frames has been dropped.
+  StackFrame::Id new_id = StackFrame::NO_ID;
+  for (int i = bottom_js_frame_index + 1; i < frames.length(); i++) {
+    if (frames[i]->type() == StackFrame::JAVA_SCRIPT) {
+      new_id = frames[i]->id();
+      break;
+    }
+  }
+  Debug::FramesHaveBeenDropped(new_id);
+
+  // Replace "blocked on active" with "replaced on active" status.
+  for (int i = 0; i < array_len; i++) {
+    if (result->GetElement(i) ==
+        Smi::FromInt(LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
+      result->SetElement(i, Smi::FromInt(
+          LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK));
+    }
+  }
+  return NULL;
+}
+
+
+class InactiveThreadActivationsChecker : public ThreadVisitor {
+ public:
+  InactiveThreadActivationsChecker(Handle<JSArray> shared_info_array,
+                                   Handle<JSArray> result)
+      : shared_info_array_(shared_info_array), result_(result),
+        has_blocked_functions_(false) {
+  }
+  void VisitThread(ThreadLocalTop* top) {
+    for (StackFrameIterator it(top); !it.done(); it.Advance()) {
+      has_blocked_functions_ |= CheckActivation(
+          shared_info_array_, result_, it.frame(),
+          LiveEdit::FUNCTION_BLOCKED_ON_OTHER_STACK);
+    }
+  }
+  bool HasBlockedFunctions() {
+    return has_blocked_functions_;
+  }
+
+ private:
+  Handle<JSArray> shared_info_array_;
+  Handle<JSArray> result_;
+  bool has_blocked_functions_;
+};
+
+
+Handle<JSArray> LiveEdit::CheckAndDropActivations(
+    Handle<JSArray> shared_info_array, bool do_drop) {
+  int len = Smi::cast(shared_info_array->length())->value();
+
+  Handle<JSArray> result = Factory::NewJSArray(len);
+
+  // Fill the default values.
+  for (int i = 0; i < len; i++) {
+    SetElement(result, i,
+               Handle<Smi>(Smi::FromInt(FUNCTION_AVAILABLE_FOR_PATCH)));
+  }
+
+
+  // First check inactive threads. Fail if some functions are blocked there.
+  InactiveThreadActivationsChecker inactive_threads_checker(shared_info_array,
+                                                            result);
+  ThreadManager::IterateThreads(&inactive_threads_checker);
+  if (inactive_threads_checker.HasBlockedFunctions()) {
+    return result;
+  }
+
+  // Try to drop activations from the current stack.
+  const char* error_message =
+      DropActivationsInActiveThread(shared_info_array, result, do_drop);
+  if (error_message != NULL) {
+    // Add error message as an array extra element.
+    Vector<const char> vector_message(error_message, StrLength(error_message));
+    Handle<String> str = Factory::NewStringFromAscii(vector_message);
+    SetElement(result, len, str);
+  }
+  return result;
+}
+
+
 LiveEditFunctionTracker::LiveEditFunctionTracker(FunctionLiteral* fun) {
   if (active_function_info_listener != NULL) {
     active_function_info_listener->FunctionStarted(fun);
   }
 }
+
+
 LiveEditFunctionTracker::~LiveEditFunctionTracker() {
   if (active_function_info_listener != NULL) {
     active_function_info_listener->FunctionDone();
   }
 }
-void LiveEditFunctionTracker::RecordFunctionCode(Handle<Code> code) {
+
+
+void LiveEditFunctionTracker::RecordFunctionInfo(
+    Handle<SharedFunctionInfo> info, FunctionLiteral* lit) {
   if (active_function_info_listener != NULL) {
-    active_function_info_listener->FunctionCode(code);
+    active_function_info_listener->FunctionInfo(info, lit->scope());
   }
 }
-void LiveEditFunctionTracker::RecordFunctionScope(Scope* scope) {
-  if (active_function_info_listener != NULL) {
-    active_function_info_listener->FunctionScope(scope);
-  }
+
+
+void LiveEditFunctionTracker::RecordRootFunctionInfo(Handle<Code> code) {
+  active_function_info_listener->FunctionCode(code);
 }
+
+
 bool LiveEditFunctionTracker::IsActive() {
   return active_function_info_listener != NULL;
 }
 
+
+#else  // ENABLE_DEBUGGER_SUPPORT
+
+// This ifdef-else-endif section provides working or stub implementation of
+// LiveEditFunctionTracker.
+LiveEditFunctionTracker::LiveEditFunctionTracker(FunctionLiteral* fun) {
+}
+
+
+LiveEditFunctionTracker::~LiveEditFunctionTracker() {
+}
+
+
+void LiveEditFunctionTracker::RecordFunctionInfo(
+    Handle<SharedFunctionInfo> info, FunctionLiteral* lit) {
+}
+
+
+void LiveEditFunctionTracker::RecordRootFunctionInfo(Handle<Code> code) {
+}
+
+
+bool LiveEditFunctionTracker::IsActive() {
+  return false;
+}
+
+#endif  // ENABLE_DEBUGGER_SUPPORT
+
+
+
 } }  // namespace v8::internal
diff --git a/src/liveedit.h b/src/liveedit.h
index 73aa7d3..d8e2a13 100644
--- a/src/liveedit.h
+++ b/src/liveedit.h
@@ -67,12 +67,104 @@
  public:
   explicit LiveEditFunctionTracker(FunctionLiteral* fun);
   ~LiveEditFunctionTracker();
-  void RecordFunctionCode(Handle<Code> code);
-  void RecordFunctionScope(Scope* scope);
+  void RecordFunctionInfo(Handle<SharedFunctionInfo> info,
+                          FunctionLiteral* lit);
+  void RecordRootFunctionInfo(Handle<Code> code);
 
   static bool IsActive();
 };
 
+#ifdef ENABLE_DEBUGGER_SUPPORT
+
+class LiveEdit : AllStatic {
+ public:
+  static JSArray* GatherCompileInfo(Handle<Script> script,
+                                    Handle<String> source);
+
+  static void WrapSharedFunctionInfos(Handle<JSArray> array);
+
+  static Object* ReplaceFunctionCode(Handle<JSArray> new_compile_info_array,
+                                     Handle<JSArray> shared_info_array);
+
+  // Updates script field in FunctionSharedInfo.
+  static void SetFunctionScript(Handle<JSValue> function_wrapper,
+                                Handle<Object> script_handle);
+
+  static Object* PatchFunctionPositions(
+      Handle<JSArray> shared_info_array, Handle<JSArray> position_change_array);
+
+  // For a script updates its source field. If old_script_name is provided
+  // (i.e. is a String), also creates a copy of the script with its original
+  // source and sends notification to debugger.
+  static Object* ChangeScriptSource(Handle<Script> original_script,
+                                    Handle<String> new_source,
+                                    Handle<Object> old_script_name);
+
+  // In a code of a parent function replaces original function as embedded
+  // object with a substitution one.
+  static void ReplaceRefToNestedFunction(Handle<JSValue> parent_function_shared,
+                                         Handle<JSValue> orig_function_shared,
+                                         Handle<JSValue> subst_function_shared);
+
+  // Checks listed functions on stack and return array with corresponding
+  // FunctionPatchabilityStatus statuses; extra array element may
+  // contain general error message. Modifies the current stack and
+  // has restart the lowest found frames and drops all other frames above
+  // if possible and if do_drop is true.
+  static Handle<JSArray> CheckAndDropActivations(
+      Handle<JSArray> shared_info_array, bool do_drop);
+
+  // A copy of this is in liveedit-debugger.js.
+  enum FunctionPatchabilityStatus {
+    FUNCTION_AVAILABLE_FOR_PATCH = 1,
+    FUNCTION_BLOCKED_ON_ACTIVE_STACK = 2,
+    FUNCTION_BLOCKED_ON_OTHER_STACK = 3,
+    FUNCTION_BLOCKED_UNDER_NATIVE_CODE = 4,
+    FUNCTION_REPLACED_ON_ACTIVE_STACK = 5
+  };
+
+  // Compares 2 strings line-by-line and returns diff in form of array of
+  // triplets (pos1, pos1_end, pos2_end) describing list of diff chunks.
+  static Handle<JSArray> CompareStringsLinewise(Handle<String> s1,
+                                                Handle<String> s2);
+};
+
+
+// A general-purpose comparator between 2 arrays.
+class Comparator {
+ public:
+
+  // Holds 2 arrays of some elements allowing to compare any pair of
+  // element from the first array and element from the second array.
+  class Input {
+   public:
+    virtual int getLength1() = 0;
+    virtual int getLength2() = 0;
+    virtual bool equals(int index1, int index2) = 0;
+
+   protected:
+    virtual ~Input() {}
+  };
+
+  // Receives compare result as a series of chunks.
+  class Output {
+   public:
+    // Puts another chunk in result list. Note that technically speaking
+    // only 3 arguments actually needed with 4th being derivable.
+    virtual void AddChunk(int pos1, int pos2, int len1, int len2) = 0;
+
+   protected:
+    virtual ~Output() {}
+  };
+
+  // Finds the difference between 2 arrays of elements.
+  static void CalculateDifference(Input* input,
+                                  Output* result_writer);
+};
+
+#endif  // ENABLE_DEBUGGER_SUPPORT
+
+
 } }  // namespace v8::internal
 
 #endif /* V*_LIVEEDIT_H_ */
diff --git a/src/log-inl.h b/src/log-inl.h
index 1500252..02238fe 100644
--- a/src/log-inl.h
+++ b/src/log-inl.h
@@ -29,96 +29,29 @@
 #define V8_LOG_INL_H_
 
 #include "log.h"
+#include "cpu-profiler.h"
 
 namespace v8 {
 namespace internal {
 
-//
-// VMState class implementation.  A simple stack of VM states held by the
-// logger and partially threaded through the call stack.  States are pushed by
-// VMState construction and popped by destruction.
-//
 #ifdef ENABLE_LOGGING_AND_PROFILING
-inline const char* StateToString(StateTag state) {
-  switch (state) {
-    case JS:
-      return "JS";
-    case GC:
-      return "GC";
-    case COMPILER:
-      return "COMPILER";
-    case OTHER:
-      return "OTHER";
-    default:
-      UNREACHABLE();
-      return NULL;
+
+Logger::LogEventsAndTags Logger::ToNativeByScript(Logger::LogEventsAndTags tag,
+                                                  Script* script) {
+  if ((tag == FUNCTION_TAG || tag == LAZY_COMPILE_TAG || tag == SCRIPT_TAG)
+      && script->type()->value() == Script::TYPE_NATIVE) {
+    switch (tag) {
+      case FUNCTION_TAG: return NATIVE_FUNCTION_TAG;
+      case LAZY_COMPILE_TAG: return NATIVE_LAZY_COMPILE_TAG;
+      case SCRIPT_TAG: return NATIVE_SCRIPT_TAG;
+      default: return tag;
+    }
+  } else {
+    return tag;
   }
 }
 
-VMState::VMState(StateTag state) : disabled_(true), external_callback_(NULL) {
-  if (!Logger::is_logging()) {
-    return;
-  }
-
-  disabled_ = false;
-#if !defined(ENABLE_HEAP_PROTECTION)
-  // When not protecting the heap, there is no difference between
-  // EXTERNAL and OTHER.  As an optimization in that case, we will not
-  // perform EXTERNAL->OTHER transitions through the API.  We thus
-  // compress the two states into one.
-  if (state == EXTERNAL) state = OTHER;
-#endif
-  state_ = state;
-  previous_ = Logger::current_state_;
-  Logger::current_state_ = this;
-
-  if (FLAG_log_state_changes) {
-    LOG(UncheckedStringEvent("Entering", StateToString(state_)));
-    if (previous_ != NULL) {
-      LOG(UncheckedStringEvent("From", StateToString(previous_->state_)));
-    }
-  }
-
-#ifdef ENABLE_HEAP_PROTECTION
-  if (FLAG_protect_heap && previous_ != NULL) {
-    if (state_ == EXTERNAL) {
-      // We are leaving V8.
-      ASSERT(previous_->state_ != EXTERNAL);
-      Heap::Protect();
-    } else if (previous_->state_ == EXTERNAL) {
-      // We are entering V8.
-      Heap::Unprotect();
-    }
-  }
-#endif
-}
-
-
-VMState::~VMState() {
-  if (disabled_) return;
-  Logger::current_state_ = previous_;
-
-  if (FLAG_log_state_changes) {
-    LOG(UncheckedStringEvent("Leaving", StateToString(state_)));
-    if (previous_ != NULL) {
-      LOG(UncheckedStringEvent("To", StateToString(previous_->state_)));
-    }
-  }
-
-#ifdef ENABLE_HEAP_PROTECTION
-  if (FLAG_protect_heap && previous_ != NULL) {
-    if (state_ == EXTERNAL) {
-      // We are reentering V8.
-      ASSERT(previous_->state_ != EXTERNAL);
-      Heap::Unprotect();
-    } else if (previous_->state_ == EXTERNAL) {
-      // We are leaving V8.
-      Heap::Protect();
-    }
-  }
-#endif
-}
-#endif
+#endif  // ENABLE_LOGGING_AND_PROFILING
 
 
 } }  // namespace v8::internal
diff --git a/src/log-utils.cc b/src/log-utils.cc
index 722e0fc..62f0ca6 100644
--- a/src/log-utils.cc
+++ b/src/log-utils.cc
@@ -196,6 +196,9 @@
   char* end_pos = dest_buf + actual_size - 1;
   while (end_pos >= dest_buf && *end_pos != '\n') --end_pos;
   actual_size = static_cast<int>(end_pos - dest_buf + 1);
+  // If the assertion below is hit, it means that there was no line end
+  // found --- something wrong has happened.
+  ASSERT(actual_size > 0);
   ASSERT(actual_size <= max_size);
   return actual_size;
 }
diff --git a/src/log-utils.h b/src/log-utils.h
index b769e90..8889f1b 100644
--- a/src/log-utils.h
+++ b/src/log-utils.h
@@ -115,7 +115,7 @@
   }
 
   // Size of buffer used for formatting log messages.
-  static const int kMessageBufferSize = 2048;
+  static const int kMessageBufferSize = v8::V8::kMinimumSizeForLogLinesBuffer;
 
  private:
   typedef int (*WritePtr)(const char* msg, int length);
diff --git a/src/log.cc b/src/log.cc
index a3fef73..e1ebc87 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -143,15 +143,14 @@
 // StackTracer implementation
 //
 void StackTracer::Trace(TickSample* sample) {
-  if (sample->state == GC) {
-    sample->frames_count = 0;
-    return;
-  }
+  sample->function = NULL;
+  sample->frames_count = 0;
+
+  if (sample->state == GC) return;
 
   const Address js_entry_sp = Top::js_entry_sp(Top::GetCurrentThread());
   if (js_entry_sp == 0) {
     // Not executing JS now.
-    sample->frames_count = 0;
     return;
   }
 
@@ -163,8 +162,7 @@
   }
 
   int i = 0;
-  const Address callback = Logger::current_state_ != NULL ?
-      Logger::current_state_->external_callback() : NULL;
+  const Address callback = VMState::external_callback();
   if (callback != NULL) {
     sample->stack[i++] = callback;
   }
@@ -324,12 +322,10 @@
 //
 Ticker* Logger::ticker_ = NULL;
 Profiler* Logger::profiler_ = NULL;
-VMState* Logger::current_state_ = NULL;
-VMState Logger::bottom_state_(EXTERNAL);
 SlidingStateWindow* Logger::sliding_state_window_ = NULL;
 const char** Logger::log_events_ = NULL;
 CompressionHelper* Logger::compression_helper_ = NULL;
-bool Logger::is_logging_ = false;
+int Logger::logging_nesting_ = 0;
 int Logger::cpu_profiler_nesting_ = 0;
 int Logger::heap_profiler_nesting_ = 0;
 
@@ -389,12 +385,19 @@
 
 void Logger::IntEvent(const char* name, int value) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::IsEnabled() || !FLAG_log) return;
+  if (FLAG_log) UncheckedIntEvent(name, value);
+#endif
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+void Logger::UncheckedIntEvent(const char* name, int value) {
+  if (!Log::IsEnabled()) return;
   LogMessageBuilder msg;
   msg.Append("%s,%d\n", name, value);
   msg.WriteToLogFile();
-#endif
 }
+#endif
 
 
 void Logger::HandleEvent(const char* name, Object** location) {
@@ -1169,19 +1172,18 @@
         // Must be the same message as Log::kDynamicBufferSeal.
         LOG(UncheckedStringEvent("profiler", "pause"));
       }
+      --logging_nesting_;
     }
   }
   if (flags &
       (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
     if (--heap_profiler_nesting_ == 0) {
       FLAG_log_gc = false;
+      --logging_nesting_;
     }
   }
   if (tag != 0) {
-    IntEvent("close-tag", tag);
-  }
-  if (GetActiveProfilerModules() == PROFILER_MODULE_NONE) {
-    is_logging_ = false;
+    UncheckedIntEvent("close-tag", tag);
   }
 }
 
@@ -1189,11 +1191,11 @@
 void Logger::ResumeProfiler(int flags, int tag) {
   if (!Log::IsEnabled()) return;
   if (tag != 0) {
-    IntEvent("open-tag", tag);
+    UncheckedIntEvent("open-tag", tag);
   }
   if (flags & PROFILER_MODULE_CPU) {
     if (cpu_profiler_nesting_++ == 0) {
-      is_logging_ = true;
+      ++logging_nesting_;
       if (FLAG_prof_lazy) {
         profiler_->Engage();
         LOG(UncheckedStringEvent("profiler", "resume"));
@@ -1209,7 +1211,7 @@
   if (flags &
       (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
     if (heap_profiler_nesting_++ == 0) {
-      is_logging_ = true;
+      ++logging_nesting_;
       FLAG_log_gc = true;
     }
   }
@@ -1261,6 +1263,8 @@
     switch (code_object->kind()) {
       case Code::FUNCTION:
         return;  // We log this later using LogCompiledFunctions.
+      case Code::BINARY_OP_IC:
+        // fall through
       case Code::STUB:
         description = CodeStub::MajorName(code_object->major_key(), true);
         if (description == NULL)
@@ -1292,7 +1296,7 @@
         tag = Logger::CALL_IC_TAG;
         break;
     }
-    LOG(CodeCreateEvent(tag, code_object, description));
+    PROFILE(CodeCreateEvent(tag, code_object, description));
   }
 }
 
@@ -1326,31 +1330,33 @@
         Handle<String> script_name(String::cast(script->name()));
         int line_num = GetScriptLineNumber(script, shared->start_position());
         if (line_num > 0) {
-          LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG,
-                              shared->code(), *func_name,
-                              *script_name, line_num + 1));
+          PROFILE(CodeCreateEvent(
+              Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
+              shared->code(), *func_name,
+              *script_name, line_num + 1));
         } else {
-          // Can't distinguish enum and script here, so always use Script.
-          LOG(CodeCreateEvent(Logger::SCRIPT_TAG,
-                              shared->code(), *script_name));
+          // Can't distinguish eval and script here, so always use Script.
+          PROFILE(CodeCreateEvent(
+              Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
+              shared->code(), *script_name));
         }
       } else {
-        LOG(CodeCreateEvent(
-            Logger::LAZY_COMPILE_TAG, shared->code(), *func_name));
+        PROFILE(CodeCreateEvent(
+            Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
+            shared->code(), *func_name));
       }
-    } else if (shared->function_data()->IsFunctionTemplateInfo()) {
+    } else if (shared->IsApiFunction()) {
       // API function.
-      FunctionTemplateInfo* fun_data =
-          FunctionTemplateInfo::cast(shared->function_data());
+      FunctionTemplateInfo* fun_data = shared->get_api_func_data();
       Object* raw_call_data = fun_data->call_code();
       if (!raw_call_data->IsUndefined()) {
         CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
         Object* callback_obj = call_data->callback();
         Address entry_point = v8::ToCData<Address>(callback_obj);
-        LOG(CallbackEvent(*func_name, entry_point));
+        PROFILE(CallbackEvent(*func_name, entry_point));
       }
     } else {
-      LOG(CodeCreateEvent(
+      PROFILE(CodeCreateEvent(
           Logger::LAZY_COMPILE_TAG, shared->code(), *func_name));
     }
   }
@@ -1366,7 +1372,7 @@
     if (!obj->IsJSFunction()) continue;
     JSFunction* jsf = JSFunction::cast(obj);
     if (!jsf->is_compiled()) continue;
-    LOG(FunctionCreateEvent(jsf));
+    PROFILE(FunctionCreateEvent(jsf));
   }
 }
 
@@ -1381,11 +1387,11 @@
     String* name = String::cast(ai->name());
     Address getter_entry = v8::ToCData<Address>(ai->getter());
     if (getter_entry != 0) {
-      LOG(GetterCallbackEvent(name, getter_entry));
+      PROFILE(GetterCallbackEvent(name, getter_entry));
     }
     Address setter_entry = v8::ToCData<Address>(ai->setter());
     if (setter_entry != 0) {
-      LOG(SetterCallbackEvent(name, setter_entry));
+      PROFILE(SetterCallbackEvent(name, setter_entry));
     }
   }
 }
@@ -1468,7 +1474,7 @@
     }
   }
 
-  current_state_ = &bottom_state_;
+  ASSERT(VMState::is_outermost_external());
 
   ticker_ = new Ticker(kSamplingIntervalMs);
 
@@ -1482,14 +1488,16 @@
     compression_helper_ = new CompressionHelper(kCompressionWindowSize);
   }
 
-  is_logging_ = start_logging;
+  if (start_logging) {
+    logging_nesting_ = 1;
+  }
 
   if (FLAG_prof) {
     profiler_ = new Profiler();
     if (!FLAG_prof_auto) {
       profiler_->pause();
     } else {
-      is_logging_ = true;
+      logging_nesting_ = 1;
     }
     if (!FLAG_prof_lazy) {
       profiler_->Engage();
@@ -1549,5 +1557,4 @@
 #endif
 }
 
-
 } }  // namespace v8::internal
diff --git a/src/log.h b/src/log.h
index eb8369c..a1441ac 100644
--- a/src/log.h
+++ b/src/log.h
@@ -87,31 +87,6 @@
 #define LOG(Call) ((void) 0)
 #endif
 
-
-class VMState BASE_EMBEDDED {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- public:
-  inline VMState(StateTag state);
-  inline ~VMState();
-
-  StateTag state() { return state_; }
-  Address external_callback() { return external_callback_; }
-  void set_external_callback(Address external_callback) {
-    external_callback_ = external_callback;
-  }
-
- private:
-  bool disabled_;
-  StateTag state_;
-  VMState* previous_;
-  Address external_callback_;
-#else
- public:
-  explicit VMState(StateTag state) {}
-#endif
-};
-
-
 #define LOG_EVENTS_AND_TAGS_LIST(V) \
   V(CODE_CREATION_EVENT,            "code-creation",          "cc")       \
   V(CODE_MOVE_EVENT,                "code-move",              "cm")       \
@@ -141,7 +116,13 @@
   V(REG_EXP_TAG,                    "RegExp",                 "re")       \
   V(SCRIPT_TAG,                     "Script",                 "sc")       \
   V(STORE_IC_TAG,                   "StoreIC",                "sic")      \
-  V(STUB_TAG,                       "Stub",                   "s")
+  V(STUB_TAG,                       "Stub",                   "s")        \
+  V(NATIVE_FUNCTION_TAG,            "Function",               "f")        \
+  V(NATIVE_LAZY_COMPILE_TAG,        "LazyCompile",            "lc")       \
+  V(NATIVE_SCRIPT_TAG,              "Script",                 "sc")
+// Note that 'NATIVE_' cases for functions and scripts are mapped onto
+// original tags when writing to the log.
+
 
 class Logger {
  public:
@@ -260,12 +241,8 @@
   static void LogRuntime(Vector<const char> format, JSArray* args);
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  static StateTag state() {
-    return current_state_ ? current_state_->state() : OTHER;
-  }
-
   static bool is_logging() {
-    return is_logging_;
+    return logging_nesting_ > 0;
   }
 
   // Pause/Resume collection of profiling data.
@@ -288,11 +265,14 @@
   // Used for logging stubs found in the snapshot.
   static void LogCodeObjects();
 
- private:
+  // Converts tag to a corresponding NATIVE_... if the script is native.
+  INLINE(static LogEventsAndTags ToNativeByScript(LogEventsAndTags, Script*));
 
   // Profiler's sampling interval (in milliseconds).
   static const int kSamplingIntervalMs = 1;
 
+ private:
+
   // Size of window used for log records compression.
   static const int kCompressionWindowSize = 4;
 
@@ -330,6 +310,9 @@
   // Logs a StringEvent regardless of whether FLAG_log is true.
   static void UncheckedStringEvent(const char* name, const char* value);
 
+  // Logs an IntEvent regardless of whether FLAG_log is true.
+  static void UncheckedIntEvent(const char* name, int value);
+
   // Stops logging and profiling in case of insufficient resources.
   static void StopLoggingAndProfiling();
 
@@ -344,12 +327,6 @@
   // of samples.
   static Profiler* profiler_;
 
-  // A stack of VM states.
-  static VMState* current_state_;
-
-  // Singleton bottom or default vm state.
-  static VMState bottom_state_;
-
   // SlidingStateWindow instance keeping a sliding window of the most
   // recent VM states.
   static SlidingStateWindow* sliding_state_window_;
@@ -372,9 +349,11 @@
 
   friend class LoggerTestHelper;
 
-  static bool is_logging_;
+  static int logging_nesting_;
   static int cpu_profiler_nesting_;
   static int heap_profiler_nesting_;
+
+  friend class CpuProfiler;
 #else
   static bool is_logging() { return false; }
 #endif
@@ -387,7 +366,7 @@
   static void Trace(TickSample* sample);
 };
 
-
 } }  // namespace v8::internal
 
+
 #endif  // V8_LOG_H_
diff --git a/src/macros.py b/src/macros.py
index 4751933..d6ba2ca 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -73,6 +73,19 @@
 const kYearShift          = 9;
 const kMonthShift         = 5;
 
+# Limits for parts of the date, so that we support all the dates that
+# ECMA 262 - 15.9.1.1 requires us to, but at the same time be sure that
+# the date (days since 1970) is in SMI range.
+const kMinYear  = -1000000;
+const kMaxYear  = 1000000;
+const kMinMonth = -10000000;
+const kMaxMonth = 10000000;
+const kMinDate  = -100000000;
+const kMaxDate  = 100000000;
+
+# Native cache ids.
+const STRING_TO_REGEXP_CACHE_ID = 0;
+
 # Type query macros.
 #
 # Note: We have special support for typeof(foo) === 'bar' in the compiler.
@@ -118,13 +131,16 @@
 # REGEXP_NUMBER_OF_CAPTURES
 macro NUMBER_OF_CAPTURES(array) = ((array)[0]);
 
+# Limit according to ECMA 262 15.9.1.1
+const MAX_TIME_MS = 8640000000000000;
+
 # Gets the value of a Date object. If arg is not a Date object
 # a type error is thrown.
 macro DATE_VALUE(arg) = (%_ClassOf(arg) === 'Date' ? %_ValueOf(arg) : ThrowDateTypeError());
 macro DAY(time) = ($floor(time / 86400000));
-macro MONTH_FROM_TIME(time) = (FromJulianDay(($floor(time / 86400000)) + 2440588).month);
-macro DATE_FROM_TIME(time) = (FromJulianDay(($floor(time / 86400000)) + 2440588).date);
-macro YEAR_FROM_TIME(time) = (FromJulianDay(($floor(time / 86400000)) + 2440588).year);
+macro MONTH_FROM_TIME(time) = (MonthFromTime(time));
+macro DATE_FROM_TIME(time) = (DateFromTime(time));
+macro YEAR_FROM_TIME(time) = (YearFromTime(time));
 macro HOUR_FROM_TIME(time) = (Modulo($floor(time / 3600000), 24));
 macro MIN_FROM_TIME(time) = (Modulo($floor(time / 60000), 60));
 macro SEC_FROM_TIME(time) = (Modulo($floor(time / 1000), 60));
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 1f2c37d..e3cc6ab 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -53,13 +53,13 @@
 // Counters used for debugging the marking phase of mark-compact or mark-sweep
 // collection.
 int MarkCompactCollector::live_bytes_ = 0;
-int MarkCompactCollector::live_young_objects_ = 0;
-int MarkCompactCollector::live_old_data_objects_ = 0;
-int MarkCompactCollector::live_old_pointer_objects_ = 0;
-int MarkCompactCollector::live_code_objects_ = 0;
-int MarkCompactCollector::live_map_objects_ = 0;
-int MarkCompactCollector::live_cell_objects_ = 0;
-int MarkCompactCollector::live_lo_objects_ = 0;
+int MarkCompactCollector::live_young_objects_size_ = 0;
+int MarkCompactCollector::live_old_data_objects_size_ = 0;
+int MarkCompactCollector::live_old_pointer_objects_size_ = 0;
+int MarkCompactCollector::live_code_objects_size_ = 0;
+int MarkCompactCollector::live_map_objects_size_ = 0;
+int MarkCompactCollector::live_cell_objects_size_ = 0;
+int MarkCompactCollector::live_lo_objects_size_ = 0;
 #endif
 
 void MarkCompactCollector::CollectGarbage() {
@@ -136,13 +136,13 @@
 
 #ifdef DEBUG
   live_bytes_ = 0;
-  live_young_objects_ = 0;
-  live_old_pointer_objects_ = 0;
-  live_old_data_objects_ = 0;
-  live_code_objects_ = 0;
-  live_map_objects_ = 0;
-  live_cell_objects_ = 0;
-  live_lo_objects_ = 0;
+  live_young_objects_size_ = 0;
+  live_old_pointer_objects_size_ = 0;
+  live_old_data_objects_size_ = 0;
+  live_code_objects_size_ = 0;
+  live_map_objects_size_ = 0;
+  live_cell_objects_size_ = 0;
+  live_lo_objects_size_ = 0;
 #endif
 }
 
@@ -742,21 +742,21 @@
 void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
   live_bytes_ += obj->Size();
   if (Heap::new_space()->Contains(obj)) {
-    live_young_objects_++;
+    live_young_objects_size_ += obj->Size();
   } else if (Heap::map_space()->Contains(obj)) {
     ASSERT(obj->IsMap());
-    live_map_objects_++;
+    live_map_objects_size_ += obj->Size();
   } else if (Heap::cell_space()->Contains(obj)) {
     ASSERT(obj->IsJSGlobalPropertyCell());
-    live_cell_objects_++;
+    live_cell_objects_size_ += obj->Size();
   } else if (Heap::old_pointer_space()->Contains(obj)) {
-    live_old_pointer_objects_++;
+    live_old_pointer_objects_size_ += obj->Size();
   } else if (Heap::old_data_space()->Contains(obj)) {
-    live_old_data_objects_++;
+    live_old_data_objects_size_ += obj->Size();
   } else if (Heap::code_space()->Contains(obj)) {
-    live_code_objects_++;
+    live_code_objects_size_ += obj->Size();
   } else if (Heap::lo_space()->Contains(obj)) {
-    live_lo_objects_++;
+    live_lo_objects_size_ += obj->Size();
   } else {
     UNREACHABLE();
   }
@@ -1055,6 +1055,7 @@
   PageIterator it(space, PageIterator::PAGES_IN_USE);
   while (it.has_next()) {
     Page* p = it.next();
+
     // The offset of each live object in the page from the first live object
     // in the page.
     int offset = 0;
@@ -1068,36 +1069,238 @@
 }
 
 
-static void SweepSpace(NewSpace* space) {
+// We scavange new space simultaneously with sweeping. This is done in two
+// passes.
+// The first pass migrates all alive objects from one semispace to another or
+// promotes them to old space. Forwading address is written directly into
+// first word of object without any encoding. If object is dead we are writing
+// NULL as a forwarding address.
+// The second pass updates pointers to new space in all spaces. It is possible
+// to encounter pointers to dead objects during traversal of remembered set for
+// map space because remembered set bits corresponding to dead maps are cleared
+// later during map space sweeping.
+static void MigrateObject(Address dst, Address src, int size) {
+  Heap::CopyBlock(reinterpret_cast<Object**>(dst),
+                  reinterpret_cast<Object**>(src),
+                  size);
+
+  Memory::Address_at(src) = dst;
+}
+
+
+// Visitor for updating pointers from live objects in old spaces to new space.
+// It does not expect to encounter pointers to dead objects.
+class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
+ public:
+  void VisitPointer(Object** p) {
+    UpdatePointer(p);
+  }
+
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** p = start; p < end; p++) UpdatePointer(p);
+  }
+
+  void VisitCodeTarget(RelocInfo* rinfo) {
+    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+    Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+    VisitPointer(&target);
+    rinfo->set_target_address(Code::cast(target)->instruction_start());
+  }
+
+  void VisitDebugTarget(RelocInfo* rinfo) {
+    ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) &&
+           rinfo->IsPatchedReturnSequence());
+    Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+    VisitPointer(&target);
+    rinfo->set_call_address(Code::cast(target)->instruction_start());
+  }
+
+ private:
+  void UpdatePointer(Object** p) {
+    if (!(*p)->IsHeapObject()) return;
+
+    HeapObject* obj = HeapObject::cast(*p);
+    Address old_addr = obj->address();
+
+    if (Heap::new_space()->Contains(obj)) {
+      ASSERT(Heap::InFromSpace(*p));
+      *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
+    }
+  }
+};
+
+// Visitor for updating pointers from live objects in old spaces to new space.
+// It can encounter pointers to dead objects in new space when traversing map
+// space (see comment for MigrateObject).
+static void UpdatePointerToNewGen(HeapObject** p) {
+  if (!(*p)->IsHeapObject()) return;
+
+  Address old_addr = (*p)->address();
+  ASSERT(Heap::InFromSpace(*p));
+
+  Address new_addr = Memory::Address_at(old_addr);
+
+  // Object pointed by *p is dead. Update is not required.
+  if (new_addr == NULL) return;
+
+  *p = HeapObject::FromAddress(new_addr);
+}
+
+
+static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Object **p) {
+  Address old_addr = HeapObject::cast(*p)->address();
+  Address new_addr = Memory::Address_at(old_addr);
+  return String::cast(HeapObject::FromAddress(new_addr));
+}
+
+
+static bool TryPromoteObject(HeapObject* object, int object_size) {
+  Object* result;
+
+  if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
+    result = Heap::lo_space()->AllocateRawFixedArray(object_size);
+    if (!result->IsFailure()) {
+      HeapObject* target = HeapObject::cast(result);
+      MigrateObject(target->address(), object->address(), object_size);
+      Heap::UpdateRSet(target);
+      return true;
+    }
+  } else {
+    OldSpace* target_space = Heap::TargetSpace(object);
+
+    ASSERT(target_space == Heap::old_pointer_space() ||
+           target_space == Heap::old_data_space());
+    result = target_space->AllocateRaw(object_size);
+    if (!result->IsFailure()) {
+      HeapObject* target = HeapObject::cast(result);
+      MigrateObject(target->address(), object->address(), object_size);
+      if (target_space == Heap::old_pointer_space()) {
+        Heap::UpdateRSet(target);
+      }
+      return true;
+    }
+  }
+
+  return false;
+}
+
+
+static void SweepNewSpace(NewSpace* space) {
+  Heap::CheckNewSpaceExpansionCriteria();
+
+  Address from_bottom = space->bottom();
+  Address from_top = space->top();
+
+  // Flip the semispaces.  After flipping, to space is empty, from space has
+  // live objects.
+  space->Flip();
+  space->ResetAllocationInfo();
+
+  int size = 0;
+  int survivors_size = 0;
+
+  // First pass: traverse all objects in inactive semispace, remove marks,
+  // migrate live objects and write forwarding addresses.
+  for (Address current = from_bottom; current < from_top; current += size) {
+    HeapObject* object = HeapObject::FromAddress(current);
+
+    if (object->IsMarked()) {
+      object->ClearMark();
+      MarkCompactCollector::tracer()->decrement_marked_count();
+
+      size = object->Size();
+      survivors_size += size;
+
+      // Aggressively promote young survivors to the old space.
+      if (TryPromoteObject(object, size)) {
+        continue;
+      }
+
+      // Promotion either failed or not required.
+      // Copy the content of the object.
+      Object* target = space->AllocateRaw(size);
+
+      // Allocation cannot fail at this point: semispaces are of equal size.
+      ASSERT(!target->IsFailure());
+
+      MigrateObject(HeapObject::cast(target)->address(), current, size);
+    } else {
+      size = object->Size();
+      Memory::Address_at(current) = NULL;
+    }
+  }
+
+  // Second pass: find pointers to new space and update them.
+  PointersToNewGenUpdatingVisitor updating_visitor;
+
+  // Update pointers in to space.
   HeapObject* object;
   for (Address current = space->bottom();
        current < space->top();
        current += object->Size()) {
     object = HeapObject::FromAddress(current);
-    if (object->IsMarked()) {
-      object->ClearMark();
-      MarkCompactCollector::tracer()->decrement_marked_count();
-    } else {
-      // We give non-live objects a map that will correctly give their size,
-      // since their existing map might not be live after the collection.
-      int size = object->Size();
-      if (size >= ByteArray::kHeaderSize) {
-        object->set_map(Heap::raw_unchecked_byte_array_map());
-        ByteArray::cast(object)->set_length(ByteArray::LengthFor(size));
-      } else {
-        ASSERT(size == kPointerSize);
-        object->set_map(Heap::raw_unchecked_one_pointer_filler_map());
-      }
-      ASSERT(object->Size() == size);
-    }
-    // The object is now unmarked for the call to Size() at the top of the
-    // loop.
+
+    object->IterateBody(object->map()->instance_type(),
+                        object->Size(),
+                        &updating_visitor);
   }
+
+  // Update roots.
+  Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
+
+  // Update pointers in old spaces.
+  Heap::IterateRSet(Heap::old_pointer_space(), &UpdatePointerToNewGen);
+  Heap::IterateRSet(Heap::map_space(), &UpdatePointerToNewGen);
+  Heap::lo_space()->IterateRSet(&UpdatePointerToNewGen);
+
+  // Update pointers from cells.
+  HeapObjectIterator cell_iterator(Heap::cell_space());
+  for (HeapObject* cell = cell_iterator.next();
+       cell != NULL;
+       cell = cell_iterator.next()) {
+    if (cell->IsJSGlobalPropertyCell()) {
+      Address value_address =
+          reinterpret_cast<Address>(cell) +
+          (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
+      updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
+    }
+  }
+
+  // Update pointers from external string table.
+  Heap::UpdateNewSpaceReferencesInExternalStringTable(
+      &UpdateNewSpaceReferenceInExternalStringTableEntry);
+
+  // All pointers were updated. Update auxiliary allocation info.
+  Heap::IncrementYoungSurvivorsCounter(survivors_size);
+  space->set_age_mark(space->top());
 }
 
 
 static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
   PageIterator it(space, PageIterator::PAGES_IN_USE);
+
+  // During sweeping of paged space we are trying to find longest sequences
+  // of pages without live objects and free them (instead of putting them on
+  // the free list).
+
+  // Page preceding current.
+  Page* prev = Page::FromAddress(NULL);
+
+  // First empty page in a sequence.
+  Page* first_empty_page = Page::FromAddress(NULL);
+
+  // Page preceding first empty page.
+  Page* prec_first_empty_page = Page::FromAddress(NULL);
+
+  // If last used page of space ends with a sequence of dead objects
+  // we can adjust allocation top instead of puting this free area into
+  // the free list. Thus during sweeping we keep track of such areas
+  // and defer their deallocation until the sweeping of the next page
+  // is done: if one of the next pages contains live objects we have
+  // to put such area into the free list.
+  Address last_free_start = NULL;
+  int last_free_size = 0;
+
   while (it.has_next()) {
     Page* p = it.next();
 
@@ -1112,8 +1315,9 @@
       if (object->IsMarked()) {
         object->ClearMark();
         MarkCompactCollector::tracer()->decrement_marked_count();
+
         if (!is_previous_alive) {  // Transition from free to live.
-          dealloc(free_start, static_cast<int>(current - free_start));
+          dealloc(free_start, static_cast<int>(current - free_start), true);
           is_previous_alive = true;
         }
       } else {
@@ -1127,39 +1331,113 @@
       // loop.
     }
 
-    // If the last region was not live we need to deallocate from
-    // free_start to the allocation top in the page.
-    if (!is_previous_alive) {
-      int free_size = static_cast<int>(p->AllocationTop() - free_start);
-      if (free_size > 0) {
-        dealloc(free_start, free_size);
+    bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
+        || (!is_previous_alive && free_start == p->ObjectAreaStart());
+
+    if (page_is_empty) {
+      // This page is empty. Check whether we are in the middle of
+      // sequence of empty pages and start one if not.
+      if (!first_empty_page->is_valid()) {
+        first_empty_page = p;
+        prec_first_empty_page = prev;
+      }
+
+      if (!is_previous_alive) {
+        // There are dead objects on this page. Update space accounting stats
+        // without putting anything into free list.
+        int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
+        if (size_in_bytes > 0) {
+          dealloc(free_start, size_in_bytes, false);
+        }
+      }
+    } else {
+      // This page is not empty. Sequence of empty pages ended on the previous
+      // one.
+      if (first_empty_page->is_valid()) {
+        space->FreePages(prec_first_empty_page, prev);
+        prec_first_empty_page = first_empty_page = Page::FromAddress(NULL);
+      }
+
+      // If there is a free ending area on one of the previous pages we have
+      // deallocate that area and put it on the free list.
+      if (last_free_size > 0) {
+        dealloc(last_free_start, last_free_size, true);
+        last_free_start = NULL;
+        last_free_size  = 0;
+      }
+
+      // If the last region of this page was not live we remember it.
+      if (!is_previous_alive) {
+        ASSERT(last_free_size == 0);
+        last_free_size = static_cast<int>(p->AllocationTop() - free_start);
+        last_free_start = free_start;
       }
     }
+
+    prev = p;
+  }
+
+  // We reached end of space. See if we need to adjust allocation top.
+  Address new_allocation_top = NULL;
+
+  if (first_empty_page->is_valid()) {
+    // Last used pages in space are empty. We can move allocation top backwards
+    // to the beginning of first empty page.
+    ASSERT(prev == space->AllocationTopPage());
+
+    new_allocation_top = first_empty_page->ObjectAreaStart();
+  }
+
+  if (last_free_size > 0) {
+    // There was a free ending area on the previous page.
+    // Deallocate it without putting it into freelist and move allocation
+    // top to the beginning of this free area.
+    dealloc(last_free_start, last_free_size, false);
+    new_allocation_top = last_free_start;
+  }
+
+  if (new_allocation_top != NULL) {
+#ifdef DEBUG
+    Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
+    if (!first_empty_page->is_valid()) {
+      ASSERT(new_allocation_top_page == space->AllocationTopPage());
+    } else if (last_free_size > 0) {
+      ASSERT(new_allocation_top_page == prec_first_empty_page);
+    } else {
+      ASSERT(new_allocation_top_page == first_empty_page);
+    }
+#endif
+
+    space->SetTop(new_allocation_top);
   }
 }
 
 
 void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
-                                                     int size_in_bytes) {
+                                                     int size_in_bytes,
+                                                     bool add_to_freelist) {
   Heap::ClearRSetRange(start, size_in_bytes);
-  Heap::old_pointer_space()->Free(start, size_in_bytes);
+  Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist);
 }
 
 
 void MarkCompactCollector::DeallocateOldDataBlock(Address start,
-                                                  int size_in_bytes) {
-  Heap::old_data_space()->Free(start, size_in_bytes);
+                                                  int size_in_bytes,
+                                                  bool add_to_freelist) {
+  Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist);
 }
 
 
 void MarkCompactCollector::DeallocateCodeBlock(Address start,
-                                               int size_in_bytes) {
-  Heap::code_space()->Free(start, size_in_bytes);
+                                               int size_in_bytes,
+                                               bool add_to_freelist) {
+  Heap::code_space()->Free(start, size_in_bytes, add_to_freelist);
 }
 
 
 void MarkCompactCollector::DeallocateMapBlock(Address start,
-                                              int size_in_bytes) {
+                                              int size_in_bytes,
+                                              bool add_to_freelist) {
   // Objects in map space are assumed to have size Map::kSize and a
   // valid map in their first word.  Thus, we break the free block up into
   // chunks and free them separately.
@@ -1167,13 +1445,14 @@
   Heap::ClearRSetRange(start, size_in_bytes);
   Address end = start + size_in_bytes;
   for (Address a = start; a < end; a += Map::kSize) {
-    Heap::map_space()->Free(a);
+    Heap::map_space()->Free(a, add_to_freelist);
   }
 }
 
 
 void MarkCompactCollector::DeallocateCellBlock(Address start,
-                                               int size_in_bytes) {
+                                               int size_in_bytes,
+                                               bool add_to_freelist) {
   // Free-list elements in cell space are assumed to have a fixed size.
   // We break the free block into chunks and add them to the free list
   // individually.
@@ -1182,7 +1461,7 @@
   Heap::ClearRSetRange(start, size_in_bytes);
   Address end = start + size_in_bytes;
   for (Address a = start; a < end; a += size) {
-    Heap::cell_space()->Free(a);
+    Heap::cell_space()->Free(a, add_to_freelist);
   }
 }
 
@@ -1382,10 +1661,12 @@
     ASSERT(FreeListNode::IsFreeListNode(vacant_map));
     ASSERT(map_to_evacuate->IsMap());
 
-    memcpy(
-        reinterpret_cast<void*>(vacant_map->address()),
-        reinterpret_cast<void*>(map_to_evacuate->address()),
-        Map::kSize);
+    ASSERT(Map::kSize % 4 == 0);
+
+    Heap::CopyBlock(reinterpret_cast<Object**>(vacant_map->address()),
+                    reinterpret_cast<Object**>(map_to_evacuate->address()),
+                    Map::kSize);
+
     ASSERT(vacant_map->IsMap());  // Due to memcpy above.
 
     MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
@@ -1465,10 +1746,11 @@
   SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock);
   SweepSpace(Heap::code_space(), &DeallocateCodeBlock);
   SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
-  SweepSpace(Heap::new_space());
+  SweepNewSpace(Heap::new_space());
   SweepSpace(Heap::map_space(), &DeallocateMapBlock);
-  int live_maps = Heap::map_space()->Size() / Map::kSize;
-  ASSERT(live_map_objects_ == live_maps);
+  int live_maps_size = Heap::map_space()->Size();
+  int live_maps = live_maps_size / Map::kSize;
+  ASSERT(live_map_objects_size_ == live_maps_size);
 
   if (Heap::map_space()->NeedsCompaction(live_maps)) {
     MapCompact map_compact(live_maps);
@@ -1500,7 +1782,7 @@
     Address start,
     Address end,
     HeapObjectCallback size_func) {
-  int live_objects = 0;
+  int live_objects_size = 0;
   Address current = start;
   while (current < end) {
     uint32_t encoded_map = Memory::uint32_at(current);
@@ -1509,11 +1791,12 @@
     } else if (encoded_map == kMultiFreeEncoding) {
       current += Memory::int_at(current + kIntSize);
     } else {
-      live_objects++;
-      current += size_func(HeapObject::FromAddress(current));
+      int size = size_func(HeapObject::FromAddress(current));
+      current += size;
+      live_objects_size += size;
     }
   }
-  return live_objects;
+  return live_objects_size;
 }
 
 
@@ -1639,36 +1922,36 @@
   Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
   GlobalHandles::IterateWeakRoots(&updating_visitor);
 
-  int live_maps = IterateLiveObjects(Heap::map_space(),
-                                     &UpdatePointersInOldObject);
-  int live_pointer_olds = IterateLiveObjects(Heap::old_pointer_space(),
-                                             &UpdatePointersInOldObject);
-  int live_data_olds = IterateLiveObjects(Heap::old_data_space(),
+  int live_maps_size = IterateLiveObjects(Heap::map_space(),
                                           &UpdatePointersInOldObject);
-  int live_codes = IterateLiveObjects(Heap::code_space(),
-                                      &UpdatePointersInOldObject);
-  int live_cells = IterateLiveObjects(Heap::cell_space(),
-                                      &UpdatePointersInOldObject);
-  int live_news = IterateLiveObjects(Heap::new_space(),
-                                     &UpdatePointersInNewObject);
+  int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
+                                                  &UpdatePointersInOldObject);
+  int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
+                                               &UpdatePointersInOldObject);
+  int live_codes_size = IterateLiveObjects(Heap::code_space(),
+                                           &UpdatePointersInOldObject);
+  int live_cells_size = IterateLiveObjects(Heap::cell_space(),
+                                           &UpdatePointersInOldObject);
+  int live_news_size = IterateLiveObjects(Heap::new_space(),
+                                          &UpdatePointersInNewObject);
 
   // Large objects do not move, the map word can be updated directly.
   LargeObjectIterator it(Heap::lo_space());
   for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
     UpdatePointersInNewObject(obj);
 
-  USE(live_maps);
-  USE(live_pointer_olds);
-  USE(live_data_olds);
-  USE(live_codes);
-  USE(live_cells);
-  USE(live_news);
-  ASSERT(live_maps == live_map_objects_);
-  ASSERT(live_data_olds == live_old_data_objects_);
-  ASSERT(live_pointer_olds == live_old_pointer_objects_);
-  ASSERT(live_codes == live_code_objects_);
-  ASSERT(live_cells == live_cell_objects_);
-  ASSERT(live_news == live_young_objects_);
+  USE(live_maps_size);
+  USE(live_pointer_olds_size);
+  USE(live_data_olds_size);
+  USE(live_codes_size);
+  USE(live_cells_size);
+  USE(live_news_size);
+  ASSERT(live_maps_size == live_map_objects_size_);
+  ASSERT(live_data_olds_size == live_old_data_objects_size_);
+  ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
+  ASSERT(live_codes_size == live_code_objects_size_);
+  ASSERT(live_cells_size == live_cell_objects_size_);
+  ASSERT(live_news_size == live_young_objects_size_);
 }
 
 
@@ -1783,27 +2066,31 @@
 #endif
   // Relocates objects, always relocate map objects first. Relocating
   // objects in other space relies on map objects to get object size.
-  int live_maps = IterateLiveObjects(Heap::map_space(), &RelocateMapObject);
-  int live_pointer_olds = IterateLiveObjects(Heap::old_pointer_space(),
-                                             &RelocateOldPointerObject);
-  int live_data_olds = IterateLiveObjects(Heap::old_data_space(),
-                                          &RelocateOldDataObject);
-  int live_codes = IterateLiveObjects(Heap::code_space(), &RelocateCodeObject);
-  int live_cells = IterateLiveObjects(Heap::cell_space(), &RelocateCellObject);
-  int live_news = IterateLiveObjects(Heap::new_space(), &RelocateNewObject);
+  int live_maps_size = IterateLiveObjects(Heap::map_space(),
+                                          &RelocateMapObject);
+  int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
+                                                  &RelocateOldPointerObject);
+  int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
+                                               &RelocateOldDataObject);
+  int live_codes_size = IterateLiveObjects(Heap::code_space(),
+                                           &RelocateCodeObject);
+  int live_cells_size = IterateLiveObjects(Heap::cell_space(),
+                                           &RelocateCellObject);
+  int live_news_size = IterateLiveObjects(Heap::new_space(),
+                                          &RelocateNewObject);
 
-  USE(live_maps);
-  USE(live_data_olds);
-  USE(live_pointer_olds);
-  USE(live_codes);
-  USE(live_cells);
-  USE(live_news);
-  ASSERT(live_maps == live_map_objects_);
-  ASSERT(live_data_olds == live_old_data_objects_);
-  ASSERT(live_pointer_olds == live_old_pointer_objects_);
-  ASSERT(live_codes == live_code_objects_);
-  ASSERT(live_cells == live_cell_objects_);
-  ASSERT(live_news == live_young_objects_);
+  USE(live_maps_size);
+  USE(live_pointer_olds_size);
+  USE(live_data_olds_size);
+  USE(live_codes_size);
+  USE(live_cells_size);
+  USE(live_news_size);
+  ASSERT(live_maps_size == live_map_objects_size_);
+  ASSERT(live_data_olds_size == live_old_data_objects_size_);
+  ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
+  ASSERT(live_codes_size == live_code_objects_size_);
+  ASSERT(live_cells_size == live_cell_objects_size_);
+  ASSERT(live_news_size == live_young_objects_size_);
 
   // Flip from and to spaces
   Heap::new_space()->Flip();
@@ -1821,6 +2108,9 @@
   PagedSpaces spaces;
   for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
     space->MCCommitRelocationInfo();
+
+  Heap::CheckNewSpaceExpansionCriteria();
+  Heap::IncrementYoungSurvivorsCounter(live_news_size);
 }
 
 
@@ -1840,7 +2130,10 @@
   Address old_addr = obj->address();
 
   if (new_addr != old_addr) {
-    memmove(new_addr, old_addr, Map::kSize);  // copy contents
+    // Move contents.
+    Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
+                    reinterpret_cast<Object**>(old_addr),
+                    Map::kSize);
   }
 
 #ifdef DEBUG
@@ -1896,14 +2189,17 @@
   Address old_addr = obj->address();
 
   if (new_addr != old_addr) {
-    memmove(new_addr, old_addr, obj_size);  // Copy contents
+    // Move contents.
+    Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
+                    reinterpret_cast<Object**>(old_addr),
+                    obj_size);
   }
 
   ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
 
   HeapObject* copied_to = HeapObject::FromAddress(new_addr);
   if (copied_to->IsJSFunction()) {
-    LOG(FunctionMoveEvent(old_addr, new_addr));
+    PROFILE(FunctionMoveEvent(old_addr, new_addr));
   }
 
   return obj_size;
@@ -1940,7 +2236,10 @@
   Address old_addr = obj->address();
 
   if (new_addr != old_addr) {
-    memmove(new_addr, old_addr, obj_size);  // Copy contents.
+    // Move contents.
+    Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
+                    reinterpret_cast<Object**>(old_addr),
+                    obj_size);
   }
 
   HeapObject* copied_to = HeapObject::FromAddress(new_addr);
@@ -1948,7 +2247,7 @@
     // May also update inline cache target.
     Code::cast(copied_to)->Relocate(new_addr - old_addr);
     // Notify the logger that compiled code has moved.
-    LOG(CodeMoveEvent(old_addr, new_addr));
+    PROFILE(CodeMoveEvent(old_addr, new_addr));
   }
 
   return obj_size;
@@ -1976,9 +2275,9 @@
 #endif
 
   // New and old addresses cannot overlap.
-  memcpy(reinterpret_cast<void*>(new_addr),
-         reinterpret_cast<void*>(old_addr),
-         obj_size);
+  Heap::CopyBlock(reinterpret_cast<Object**>(new_addr),
+                  reinterpret_cast<Object**>(old_addr),
+                  obj_size);
 
 #ifdef DEBUG
   if (FLAG_gc_verbose) {
@@ -1988,7 +2287,7 @@
 
   HeapObject* copied_to = HeapObject::FromAddress(new_addr);
   if (copied_to->IsJSFunction()) {
-    LOG(FunctionMoveEvent(old_addr, new_addr));
+    PROFILE(FunctionMoveEvent(old_addr, new_addr));
   }
 
   return obj_size;
@@ -2010,9 +2309,9 @@
 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (obj->IsCode()) {
-    LOG(CodeDeleteEvent(obj->address()));
+    PROFILE(CodeDeleteEvent(obj->address()));
   } else if (obj->IsJSFunction()) {
-    LOG(FunctionDeleteEvent(obj->address()));
+    PROFILE(FunctionDeleteEvent(obj->address()));
   }
 #endif
 }
diff --git a/src/mark-compact.h b/src/mark-compact.h
index ab572f6..3950e75 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -37,7 +37,11 @@
 typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
 
 // Callback function for non-live blocks in the old generation.
-typedef void (*DeallocateFunction)(Address start, int size_in_bytes);
+// If add_to_freelist is false then just accounting stats are updated and
+// no attempt to add area to free list is made.
+typedef void (*DeallocateFunction)(Address start,
+                                   int size_in_bytes,
+                                   bool add_to_freelist);
 
 
 // Forward declarations.
@@ -313,11 +317,25 @@
 
   // Callback functions for deallocating non-live blocks in the old
   // generation.
-  static void DeallocateOldPointerBlock(Address start, int size_in_bytes);
-  static void DeallocateOldDataBlock(Address start, int size_in_bytes);
-  static void DeallocateCodeBlock(Address start, int size_in_bytes);
-  static void DeallocateMapBlock(Address start, int size_in_bytes);
-  static void DeallocateCellBlock(Address start, int size_in_bytes);
+  static void DeallocateOldPointerBlock(Address start,
+                                        int size_in_bytes,
+                                        bool add_to_freelist);
+
+  static void DeallocateOldDataBlock(Address start,
+                                     int size_in_bytes,
+                                     bool add_to_freelist);
+
+  static void DeallocateCodeBlock(Address start,
+                                  int size_in_bytes,
+                                  bool add_to_freelist);
+
+  static void DeallocateMapBlock(Address start,
+                                 int size_in_bytes,
+                                 bool add_to_freelist);
+
+  static void DeallocateCellBlock(Address start,
+                                  int size_in_bytes,
+                                  bool add_to_freelist);
 
   // If we are not compacting the heap, we simply sweep the spaces except
   // for the large object space, clearing mark bits and adding unmarked
@@ -407,26 +425,26 @@
   // Counters used for debugging the marking phase of mark-compact or
   // mark-sweep collection.
 
-  // Number of live objects in Heap::to_space_.
-  static int live_young_objects_;
+  // Size of live objects in Heap::to_space_.
+  static int live_young_objects_size_;
 
-  // Number of live objects in Heap::old_pointer_space_.
-  static int live_old_pointer_objects_;
+  // Size of live objects in Heap::old_pointer_space_.
+  static int live_old_pointer_objects_size_;
 
-  // Number of live objects in Heap::old_data_space_.
-  static int live_old_data_objects_;
+  // Size of live objects in Heap::old_data_space_.
+  static int live_old_data_objects_size_;
 
-  // Number of live objects in Heap::code_space_.
-  static int live_code_objects_;
+  // Size of live objects in Heap::code_space_.
+  static int live_code_objects_size_;
 
-  // Number of live objects in Heap::map_space_.
-  static int live_map_objects_;
+  // Size of live objects in Heap::map_space_.
+  static int live_map_objects_size_;
 
-  // Number of live objects in Heap::cell_space_.
-  static int live_cell_objects_;
+  // Size of live objects in Heap::cell_space_.
+  static int live_cell_objects_size_;
 
-  // Number of live objects in Heap::lo_space_.
-  static int live_lo_objects_;
+  // Size of live objects in Heap::lo_space_.
+  static int live_lo_objects_size_;
 
   // Number of live bytes in this collection.
   static int live_bytes_;
diff --git a/src/math.js b/src/math.js
index 4c9de67..fc3b132 100644
--- a/src/math.js
+++ b/src/math.js
@@ -45,7 +45,8 @@
 function MathAbs(x) {
   if (%_IsSmi(x)) return x >= 0 ? x : -x;
   if (!IS_NUMBER(x)) x = ToNumber(x);
-  return %Math_abs(x);
+  if (x === 0) return 0;  // To handle -0.
+  return x > 0 ? x : -x;
 }
 
 // ECMA 262 - 15.8.2.2
@@ -84,7 +85,7 @@
 // ECMA 262 - 15.8.2.7
 function MathCos(x) {
   if (!IS_NUMBER(x)) x = ToNumber(x);
-  return %_Math_cos(x);
+  return %_MathCos(x);
 }
 
 // ECMA 262 - 15.8.2.8
@@ -159,30 +160,30 @@
 function MathPow(x, y) {
   if (!IS_NUMBER(x)) x = ToNumber(x);
   if (!IS_NUMBER(y)) y = ToNumber(y);
-  return %Math_pow(x, y);
+  return %_MathPow(x, y);
 }
 
 // ECMA 262 - 15.8.2.14
 function MathRandom() {
-  return %_RandomPositiveSmi() / 0x40000000;
+  return %_RandomHeapNumber();
 }
 
 // ECMA 262 - 15.8.2.15
 function MathRound(x) {
   if (!IS_NUMBER(x)) x = ToNumber(x);
-  return %Math_round(x);
+  return %RoundNumber(x);
 }
 
 // ECMA 262 - 15.8.2.16
 function MathSin(x) {
   if (!IS_NUMBER(x)) x = ToNumber(x);
-  return %_Math_sin(x);
+  return %_MathSin(x);
 }
 
 // ECMA 262 - 15.8.2.17
 function MathSqrt(x) {
   if (!IS_NUMBER(x)) x = ToNumber(x);
-  return %Math_sqrt(x);
+  return %_MathSqrt(x);
 }
 
 // ECMA 262 - 15.8.2.18
diff --git a/src/messages.cc b/src/messages.cc
index e16b1b2..7cb1d20 100644
--- a/src/messages.cc
+++ b/src/messages.cc
@@ -30,6 +30,7 @@
 
 #include "api.h"
 #include "execution.h"
+#include "messages.h"
 #include "spaces-inl.h"
 #include "top.h"
 
diff --git a/src/messages.js b/src/messages.js
index 7c939ca..de6a362 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -137,6 +137,7 @@
       malformed_regexp:             "Invalid regular expression: /%0/: %1",
       unterminated_regexp:          "Invalid regular expression: missing /",
       regexp_flags:                 "Cannot supply flags when constructing one RegExp from another",
+      incompatible_method_receiver: "Method %0 called on incompatible receiver %1",
       invalid_lhs_in_assignment:    "Invalid left-hand side in assignment",
       invalid_lhs_in_for_in:        "Invalid left-hand side in for-in",
       invalid_lhs_in_postfix_op:    "Invalid left-hand side expression in postfix operation",
@@ -191,7 +192,8 @@
       invalid_json:                 "String '%0' is not valid JSON",
       circular_structure:           "Converting circular structure to JSON",
       obj_ctor_property_non_object: "Object.%0 called on non-object",
-      array_indexof_not_defined:    "Array.getIndexOf: Argument undefined"
+      array_indexof_not_defined:    "Array.getIndexOf: Argument undefined",
+      illegal_access:               "illegal access"
     };
   }
   var format = kMessages[message.type];
@@ -431,6 +433,30 @@
 
 
 /**
+ * Returns the name of script if available, contents of sourceURL comment
+ * otherwise. See 
+ * http://fbug.googlecode.com/svn/branches/firebug1.1/docs/ReleaseNotes_1.1.txt
+ * for details on using //@ sourceURL comment to identify scritps that don't
+ * have name.
+ * 
+ * @return {?string} script name if present, value for //@ sourceURL comment
+ * otherwise.
+ */
+Script.prototype.nameOrSourceURL = function() {
+  if (this.name)
+    return this.name;
+  // TODO(608): the spaces in a regexp below had to be escaped as \040 
+  // because this file is being processed by js2c whose handling of spaces
+  // in regexps is broken. Also, ['"] are excluded from allowed URLs to
+  // avoid matches against sources that invoke evals with sourceURL.
+  var sourceUrlPattern =
+    /\/\/@[\040\t]sourceURL=[\040\t]*([^\s'"]*)[\040\t]*$/m;
+  var match = sourceUrlPattern.exec(this.source);
+  return match ? match[1] : this.name;
+}
+
+
+/**
  * Class for source location. A source location is a position within some
  * source with the following properties:
  *   script   : script object for the source
@@ -741,7 +767,7 @@
   } else {
     eval_origin +=  "<anonymous>";
   }
-  
+
   var eval_from_script = script.eval_from_script;
   if (eval_from_script) {
     if (eval_from_script.compilation_type == COMPILATION_TYPE_EVAL) {
@@ -762,7 +788,7 @@
       }
     }
   }
-  
+
   return eval_origin;
 };
 
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index 4f5ae3e..cc730f2 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -522,7 +522,9 @@
 
   int32_t pc_offset() const { return pc_ - buffer_; }
   int32_t current_position() const { return current_position_; }
-  int32_t current_statement_position() const { return current_position_; }
+  int32_t current_statement_position() const {
+    return current_statement_position_;
+  }
 
   // Check if there is less than kGap bytes available in the buffer.
   // If this is the case, we need to grow the buffer before emitting
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index 3bd42ed..04bcfeb 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -74,7 +74,99 @@
 
 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
                                              bool is_construct) {
-  UNIMPLEMENTED_MIPS();
+  // Called from JSEntryStub::GenerateBody
+
+  // Registers:
+  // a0: entry_address
+  // a1: function
+  // a2: reveiver_pointer
+  // a3: argc
+  // s0: argv
+  //
+  // Stack:
+  // arguments slots
+  // handler frame
+  // entry frame
+  // callee saved registers + ra
+  // 4 args slots
+  // args
+
+  // Clear the context before we push it when entering the JS frame.
+  __ li(cp, Operand(0));
+
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Set up the context from the function argument.
+  __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+  // Set up the roots register.
+  ExternalReference roots_address = ExternalReference::roots_address();
+  __ li(s6, Operand(roots_address));
+
+  // Push the function and the receiver onto the stack.
+  __ MultiPushReversed(a1.bit() | a2.bit());
+
+  // Copy arguments to the stack in a loop.
+  // a3: argc
+  // s0: argv, ie points to first arg
+  Label loop, entry;
+  __ sll(t0, a3, kPointerSizeLog2);
+  __ add(t2, s0, t0);
+  __ b(&entry);
+  __ nop();   // Branch delay slot nop.
+  // t2 points past last arg.
+  __ bind(&loop);
+  __ lw(t0, MemOperand(s0));  // Read next parameter.
+  __ addiu(s0, s0, kPointerSize);
+  __ lw(t0, MemOperand(t0));  // Dereference handle.
+  __ Push(t0);  // Push parameter.
+  __ bind(&entry);
+  __ Branch(ne, &loop, s0, Operand(t2));
+
+  // Registers:
+  // a0: entry_address
+  // a1: function
+  // a2: reveiver_pointer
+  // a3: argc
+  // s0: argv
+  // s6: roots_address
+  //
+  // Stack:
+  // arguments
+  // receiver
+  // function
+  // arguments slots
+  // handler frame
+  // entry frame
+  // callee saved registers + ra
+  // 4 args slots
+  // args
+
+  // Initialize all JavaScript callee-saved registers, since they will be seen
+  // by the garbage collector as part of handlers.
+  __ LoadRoot(t4, Heap::kUndefinedValueRootIndex);
+  __ mov(s1, t4);
+  __ mov(s2, t4);
+  __ mov(s3, t4);
+  __ mov(s4, s4);
+  __ mov(s5, t4);
+  // s6 holds the root address. Do not clobber.
+  // s7 is cp. Do not init.
+
+  // Invoke the code and pass argc as a0.
+  __ mov(a0, a3);
+  if (is_construct) {
+    UNIMPLEMENTED_MIPS();
+    __ break_(0x164);
+  } else {
+    ParameterCount actual(a0);
+    __ InvokeFunction(a1, actual, CALL_FUNCTION);
+  }
+
+  __ LeaveInternalFrame();
+
+  __ Jump(ra);
 }
 
 
@@ -100,6 +192,7 @@
 
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   UNIMPLEMENTED_MIPS();
+  __ break_(0x201);
 }
 
 
diff --git a/src/mips/codegen-mips-inl.h b/src/mips/codegen-mips-inl.h
index 2a77715..3a511b8 100644
--- a/src/mips/codegen-mips-inl.h
+++ b/src/mips/codegen-mips-inl.h
@@ -36,15 +36,29 @@
 
 // Platform-specific inline functions.
 
-void DeferredCode::Jump() { __ b(&entry_label_); }
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+void DeferredCode::Jump() {
+  __ b(&entry_label_);
+  __ nop();
 }
 
 
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
-  UNIMPLEMENTED_MIPS();
+void Reference::GetValueAndSpill() {
+  GetValue();
+}
+
+
+void CodeGenerator::VisitAndSpill(Statement* statement) {
+  Visit(statement);
+}
+
+
+void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
+  VisitStatements(statements);
+}
+
+
+void CodeGenerator::LoadAndSpill(Expression* expression) {
+  Load(expression);
 }
 
 
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index 2de45f6..ca1edd4 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -30,12 +30,14 @@
 
 #include "bootstrapper.h"
 #include "codegen-inl.h"
+#include "compiler.h"
 #include "debug.h"
+#include "ic-inl.h"
 #include "parser.h"
 #include "register-allocator-inl.h"
 #include "runtime.h"
 #include "scopes.h"
-#include "compiler.h"
+#include "virtual-frame-inl.h"
 
 
 
@@ -46,7 +48,7 @@
 
 
 
-// -------------------------------------------------------------------------
+// -----------------------------------------------------------------------------
 // Platform-specific DeferredCode functions.
 
 
@@ -60,13 +62,41 @@
 }
 
 
-// -------------------------------------------------------------------------
+// -----------------------------------------------------------------------------
+// CodeGenState implementation.
+
+CodeGenState::CodeGenState(CodeGenerator* owner)
+    : owner_(owner),
+      true_target_(NULL),
+      false_target_(NULL),
+      previous_(NULL) {
+  owner_->set_state(this);
+}
+
+
+CodeGenState::CodeGenState(CodeGenerator* owner,
+                           JumpTarget* true_target,
+                           JumpTarget* false_target)
+    : owner_(owner),
+      true_target_(true_target),
+      false_target_(false_target),
+      previous_(owner->state()) {
+  owner_->set_state(this);
+}
+
+
+CodeGenState::~CodeGenState() {
+  ASSERT(owner_->state() == this);
+  owner_->set_state(previous_);
+}
+
+
+// -----------------------------------------------------------------------------
 // CodeGenerator implementation
 
 CodeGenerator::CodeGenerator(MacroAssembler* masm)
     : deferred_(8),
       masm_(masm),
-      scope_(NULL),
       frame_(NULL),
       allocator_(NULL),
       cc_reg_(cc_always),
@@ -76,18 +106,362 @@
 
 
 // Calling conventions:
-// s8_fp: caller's frame pointer
+// fp: caller's frame pointer
 // sp: stack pointer
 // a1: called JS function
 // cp: callee's context
 
-void CodeGenerator::Generate(CompilationInfo* info, Mode mode) {
-  UNIMPLEMENTED_MIPS();
+void CodeGenerator::Generate(CompilationInfo* info) {
+  // Record the position for debugging purposes.
+  CodeForFunctionPosition(info->function());
+
+  // Initialize state.
+  info_ = info;
+  ASSERT(allocator_ == NULL);
+  RegisterAllocator register_allocator(this);
+  allocator_ = &register_allocator;
+  ASSERT(frame_ == NULL);
+  frame_ = new VirtualFrame();
+  cc_reg_ = cc_always;
+
+  {
+    CodeGenState state(this);
+
+    // Registers:
+    // a1: called JS function
+    // ra: return address
+    // fp: caller's frame pointer
+    // sp: stack pointer
+    // cp: callee's context
+    //
+    // Stack:
+    // arguments
+    // receiver
+
+    frame_->Enter();
+
+    // Allocate space for locals and initialize them.
+    frame_->AllocateStackSlots();
+
+    // Initialize the function return target.
+    function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
+    function_return_is_shadowed_ = false;
+
+    VirtualFrame::SpilledScope spilled_scope;
+    if (scope()->num_heap_slots() > 0) {
+      UNIMPLEMENTED_MIPS();
+    }
+
+    {
+      Comment cmnt2(masm_, "[ copy context parameters into .context");
+
+      // Note that iteration order is relevant here! If we have the same
+      // parameter twice (e.g., function (x, y, x)), and that parameter
+      // needs to be copied into the context, it must be the last argument
+      // passed to the parameter that needs to be copied. This is a rare
+      // case so we don't check for it, instead we rely on the copying
+      // order: such a parameter is copied repeatedly into the same
+      // context location and thus the last value is what is seen inside
+      // the function.
+      for (int i = 0; i < scope()->num_parameters(); i++) {
+        UNIMPLEMENTED_MIPS();
+      }
+    }
+
+    // Store the arguments object.  This must happen after context
+    // initialization because the arguments object may be stored in the
+    // context.
+    if (scope()->arguments() != NULL) {
+      UNIMPLEMENTED_MIPS();
+    }
+
+    // Generate code to 'execute' declarations and initialize functions
+    // (source elements). In case of an illegal redeclaration we need to
+    // handle that instead of processing the declarations.
+    if (scope()->HasIllegalRedeclaration()) {
+      Comment cmnt(masm_, "[ illegal redeclarations");
+      scope()->VisitIllegalRedeclaration(this);
+    } else {
+      Comment cmnt(masm_, "[ declarations");
+      ProcessDeclarations(scope()->declarations());
+      // Bail out if a stack-overflow exception occurred when processing
+      // declarations.
+      if (HasStackOverflow()) return;
+    }
+
+    if (FLAG_trace) {
+      UNIMPLEMENTED_MIPS();
+    }
+
+    // Compile the body of the function in a vanilla state. Don't
+    // bother compiling all the code if the scope has an illegal
+    // redeclaration.
+    if (!scope()->HasIllegalRedeclaration()) {
+      Comment cmnt(masm_, "[ function body");
+#ifdef DEBUG
+      bool is_builtin = Bootstrapper::IsActive();
+      bool should_trace =
+          is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
+      if (should_trace) {
+        UNIMPLEMENTED_MIPS();
+      }
+#endif
+      VisitStatementsAndSpill(info->function()->body());
+    }
+  }
+
+  if (has_valid_frame() || function_return_.is_linked()) {
+    if (!function_return_.is_linked()) {
+      CodeForReturnPosition(info->function());
+    }
+    // Registers:
+    // v0: result
+    // sp: stack pointer
+    // fp: frame pointer
+    // cp: callee's context
+
+    __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+
+    function_return_.Bind();
+    if (FLAG_trace) {
+      UNIMPLEMENTED_MIPS();
+    }
+
+    // Add a label for checking the size of the code used for returning.
+    Label check_exit_codesize;
+    masm_->bind(&check_exit_codesize);
+
+    masm_->mov(sp, fp);
+    masm_->lw(fp, MemOperand(sp, 0));
+    masm_->lw(ra, MemOperand(sp, 4));
+    masm_->addiu(sp, sp, 8);
+
+    // Here we use masm_-> instead of the __ macro to avoid the code coverage
+    // tool from instrumenting as we rely on the code size here.
+    // TODO(MIPS): Should we be able to use more than 0x1ffe parameters?
+    masm_->addiu(sp, sp, (scope()->num_parameters() + 1) * kPointerSize);
+    masm_->Jump(ra);
+    // The Jump automatically generates a nop in the branch delay slot.
+
+    // Check that the size of the code used for returning matches what is
+    // expected by the debugger.
+    ASSERT_EQ(kJSReturnSequenceLength,
+              masm_->InstructionsGeneratedSince(&check_exit_codesize));
+  }
+
+  // Code generation state must be reset.
+  ASSERT(!has_cc());
+  ASSERT(state_ == NULL);
+  ASSERT(!function_return_is_shadowed_);
+  function_return_.Unuse();
+  DeleteFrame();
+
+  // Process any deferred code using the register allocator.
+  if (!HasStackOverflow()) {
+    ProcessDeferred();
+  }
+
+  allocator_ = NULL;
+}
+
+
+void CodeGenerator::LoadReference(Reference* ref) {
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ LoadReference");
+  Expression* e = ref->expression();
+  Property* property = e->AsProperty();
+  Variable* var = e->AsVariableProxy()->AsVariable();
+
+  if (property != NULL) {
+    UNIMPLEMENTED_MIPS();
+  } else if (var != NULL) {
+    // The expression is a variable proxy that does not rewrite to a
+    // property.  Global variables are treated as named property references.
+    if (var->is_global()) {
+      LoadGlobal();
+      ref->set_type(Reference::NAMED);
+    } else {
+      ASSERT(var->slot() != NULL);
+      ref->set_type(Reference::SLOT);
+    }
+  } else {
+    UNIMPLEMENTED_MIPS();
+  }
+}
+
+
+void CodeGenerator::UnloadReference(Reference* ref) {
+  VirtualFrame::SpilledScope spilled_scope;
+  // Pop a reference from the stack while preserving TOS.
+  Comment cmnt(masm_, "[ UnloadReference");
+  int size = ref->size();
+  if (size > 0) {
+    frame_->EmitPop(a0);
+    frame_->Drop(size);
+    frame_->EmitPush(a0);
+  }
+  ref->set_unloaded();
+}
+
+
+MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
+  // Currently, this assertion will fail if we try to assign to
+  // a constant variable that is constant because it is read-only
+  // (such as the variable referring to a named function expression).
+  // We need to implement assignments to read-only variables.
+  // Ideally, we should do this during AST generation (by converting
+  // such assignments into expression statements); however, in general
+  // we may not be able to make the decision until past AST generation,
+  // that is when the entire program is known.
+  ASSERT(slot != NULL);
+  int index = slot->index();
+  switch (slot->type()) {
+    case Slot::PARAMETER:
+      UNIMPLEMENTED_MIPS();
+      return MemOperand(no_reg, 0);
+
+    case Slot::LOCAL:
+      return frame_->LocalAt(index);
+
+    case Slot::CONTEXT: {
+      UNIMPLEMENTED_MIPS();
+      return MemOperand(no_reg, 0);
+    }
+
+    default:
+      UNREACHABLE();
+      return MemOperand(no_reg, 0);
+  }
+}
+
+
+// Loads a value on TOS. If it is a boolean value, the result may have been
+// (partially) translated into branches, or it may have set the condition
+// code register. If force_cc is set, the value is forced to set the
+// condition code register and no value is pushed. If the condition code
+// register was set, has_cc() is true and cc_reg_ contains the condition to
+// test for 'true'.
+void CodeGenerator::LoadCondition(Expression* x,
+                                  JumpTarget* true_target,
+                                  JumpTarget* false_target,
+                                  bool force_cc) {
+  ASSERT(!has_cc());
+  int original_height = frame_->height();
+
+  { CodeGenState new_state(this, true_target, false_target);
+    Visit(x);
+
+    // If we hit a stack overflow, we may not have actually visited
+    // the expression. In that case, we ensure that we have a
+    // valid-looking frame state because we will continue to generate
+    // code as we unwind the C++ stack.
+    //
+    // It's possible to have both a stack overflow and a valid frame
+    // state (eg, a subexpression overflowed, visiting it returned
+    // with a dummied frame state, and visiting this expression
+    // returned with a normal-looking state).
+    if (HasStackOverflow() &&
+        has_valid_frame() &&
+        !has_cc() &&
+        frame_->height() == original_height) {
+      true_target->Jump();
+    }
+  }
+  if (force_cc && frame_ != NULL && !has_cc()) {
+    // Convert the TOS value to a boolean in the condition code register.
+    UNIMPLEMENTED_MIPS();
+  }
+  ASSERT(!force_cc || !has_valid_frame() || has_cc());
+  ASSERT(!has_valid_frame() ||
+         (has_cc() && frame_->height() == original_height) ||
+         (!has_cc() && frame_->height() == original_height + 1));
+}
+
+
+void CodeGenerator::Load(Expression* x) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  JumpTarget true_target;
+  JumpTarget false_target;
+  LoadCondition(x, &true_target, &false_target, false);
+
+  if (has_cc()) {
+    UNIMPLEMENTED_MIPS();
+  }
+
+  if (true_target.is_linked() || false_target.is_linked()) {
+    UNIMPLEMENTED_MIPS();
+  }
+  ASSERT(has_valid_frame());
+  ASSERT(!has_cc());
+  ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::LoadGlobal() {
+  VirtualFrame::SpilledScope spilled_scope;
+  __ lw(a0, GlobalObject());
+  frame_->EmitPush(a0);
+}
+
+
+void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
+  VirtualFrame::SpilledScope spilled_scope;
+  if (slot->type() == Slot::LOOKUP) {
+    UNIMPLEMENTED_MIPS();
+  } else {
+    __ lw(a0, SlotOperand(slot, a2));
+    frame_->EmitPush(a0);
+    if (slot->var()->mode() == Variable::CONST) {
+      UNIMPLEMENTED_MIPS();
+    }
+  }
+}
+
+
+void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
+  ASSERT(slot != NULL);
+  if (slot->type() == Slot::LOOKUP) {
+      UNIMPLEMENTED_MIPS();
+  } else {
+    ASSERT(!slot->var()->is_dynamic());
+
+    JumpTarget exit;
+    if (init_state == CONST_INIT) {
+      UNIMPLEMENTED_MIPS();
+    }
+
+    // We must execute the store. Storing a variable must keep the
+    // (new) value on the stack. This is necessary for compiling
+    // assignment expressions.
+    //
+    // Note: We will reach here even with slot->var()->mode() ==
+    // Variable::CONST because of const declarations which will
+    // initialize consts to 'the hole' value and by doing so, end up
+    // calling this code. a2 may be loaded with context; used below in
+    // RecordWrite.
+    frame_->EmitPop(a0);
+    __ sw(a0, SlotOperand(slot, a2));
+    frame_->EmitPush(a0);
+    if (slot->type() == Slot::CONTEXT) {
+      UNIMPLEMENTED_MIPS();
+    }
+    // If we definitely did not jump over the assignment, we do not need
+    // to bind the exit label. Doing so can defeat peephole
+    // optimization.
+    if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
+      exit.Bind();
+    }
+  }
 }
 
 
 void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
-  UNIMPLEMENTED_MIPS();
+  VirtualFrame::SpilledScope spilled_scope;
+  for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
+    VisitAndSpill(statements->at(i));
+  }
 }
 
 
@@ -97,7 +471,14 @@
 
 
 void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
-  UNIMPLEMENTED_MIPS();
+  VirtualFrame::SpilledScope spilled_scope;
+  frame_->EmitPush(cp);
+  __ li(t0, Operand(pairs));
+  frame_->EmitPush(t0);
+  __ li(t0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
+  frame_->EmitPush(t0);
+  frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
+  // The result is discarded.
 }
 
 
@@ -107,7 +488,17 @@
 
 
 void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
-  UNIMPLEMENTED_MIPS();
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ ExpressionStatement");
+  CodeForStatementPosition(node);
+  Expression* expression = node->expression();
+  expression->MarkAsStatement();
+  LoadAndSpill(expression);
+  frame_->Drop();
+  ASSERT(frame_->height() == original_height);
 }
 
 
@@ -132,7 +523,22 @@
 
 
 void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
-  UNIMPLEMENTED_MIPS();
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ ReturnStatement");
+
+  CodeForStatementPosition(node);
+  LoadAndSpill(node->expression());
+  if (function_return_is_shadowed_) {
+    frame_->EmitPop(v0);
+    function_return_.Jump();
+  } else {
+    // Pop the result from the frame and prepare the frame for
+    // returning thus making it easier to merge.
+    frame_->EmitPop(v0);
+    frame_->PrepareForReturn();
+
+    function_return_.Jump();
+  }
 }
 
 
@@ -191,8 +597,8 @@
 }
 
 
-void CodeGenerator::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* node) {
+void CodeGenerator::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* node) {
   UNIMPLEMENTED_MIPS();
 }
 
@@ -203,17 +609,45 @@
 
 
 void CodeGenerator::VisitSlot(Slot* node) {
-  UNIMPLEMENTED_MIPS();
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ Slot");
+  LoadFromSlot(node, typeof_state());
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
 void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
-  UNIMPLEMENTED_MIPS();
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ VariableProxy");
+
+  Variable* var = node->var();
+  Expression* expr = var->rewrite();
+  if (expr != NULL) {
+    Visit(expr);
+  } else {
+    ASSERT(var->is_global());
+    Reference ref(this, node);
+    ref.GetValueAndSpill();
+  }
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
 void CodeGenerator::VisitLiteral(Literal* node) {
-  UNIMPLEMENTED_MIPS();
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ Literal");
+  __ li(t0, Operand(node->handle()));
+  frame_->EmitPush(t0);
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
@@ -238,7 +672,47 @@
 
 
 void CodeGenerator::VisitAssignment(Assignment* node) {
-  UNIMPLEMENTED_MIPS();
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ Assignment");
+
+  { Reference target(this, node->target());
+    if (target.is_illegal()) {
+      // Fool the virtual frame into thinking that we left the assignment's
+      // value on the frame.
+      frame_->EmitPush(zero_reg);
+      ASSERT(frame_->height() == original_height + 1);
+      return;
+    }
+
+    if (node->op() == Token::ASSIGN ||
+        node->op() == Token::INIT_VAR ||
+        node->op() == Token::INIT_CONST) {
+      LoadAndSpill(node->value());
+    } else {
+      UNIMPLEMENTED_MIPS();
+    }
+
+    Variable* var = node->target()->AsVariableProxy()->AsVariable();
+    if (var != NULL &&
+        (var->mode() == Variable::CONST) &&
+        node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
+      // Assignment ignored - leave the value on the stack.
+    } else {
+      CodeForSourcePosition(node->position());
+      if (node->op() == Token::INIT_CONST) {
+        // Dynamic constant initializations must use the function context
+        // and initialize the actual constant declared. Dynamic variable
+        // initializations are simply assignments and use SetValue.
+        target.SetValue(CONST_INIT);
+      } else {
+        target.SetValue(NOT_CONST_INIT);
+      }
+    }
+  }
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
@@ -253,7 +727,73 @@
 
 
 void CodeGenerator::VisitCall(Call* node) {
-  UNIMPLEMENTED_MIPS();
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ Call");
+
+  Expression* function = node->expression();
+  ZoneList<Expression*>* args = node->arguments();
+
+  // Standard function call.
+  // Check if the function is a variable or a property.
+  Variable* var = function->AsVariableProxy()->AsVariable();
+  Property* property = function->AsProperty();
+
+  // ------------------------------------------------------------------------
+  // Fast-case: Use inline caching.
+  // ---
+  // According to ECMA-262, section 11.2.3, page 44, the function to call
+  // must be resolved after the arguments have been evaluated. The IC code
+  // automatically handles this by loading the arguments before the function
+  // is resolved in cache misses (this also holds for megamorphic calls).
+  // ------------------------------------------------------------------------
+
+  if (var != NULL && var->is_possibly_eval()) {
+    UNIMPLEMENTED_MIPS();
+  } else if (var != NULL && !var->is_this() && var->is_global()) {
+    // ----------------------------------
+    // JavaScript example: 'foo(1, 2, 3)'  // foo is global
+    // ----------------------------------
+
+    int arg_count = args->length();
+
+    // We need sp to be 8 bytes aligned when calling the stub.
+    __ SetupAlignedCall(t0, arg_count);
+
+    // Pass the global object as the receiver and let the IC stub
+    // patch the stack to use the global proxy as 'this' in the
+    // invoked function.
+    LoadGlobal();
+
+    // Load the arguments.
+    for (int i = 0; i < arg_count; i++) {
+      LoadAndSpill(args->at(i));
+    }
+
+    // Setup the receiver register and call the IC initialization code.
+    __ li(a2, Operand(var->name()));
+    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+    Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
+    CodeForSourcePosition(node->position());
+    frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
+                           arg_count + 1);
+    __ ReturnFromAlignedCall();
+    __ lw(cp, frame_->Context());
+    // Remove the function from the stack.
+    frame_->EmitPush(v0);
+
+  } else if (var != NULL && var->slot() != NULL &&
+             var->slot()->type() == Slot::LOOKUP) {
+    UNIMPLEMENTED_MIPS();
+  } else if (property != NULL) {
+    UNIMPLEMENTED_MIPS();
+  } else {
+    UNIMPLEMENTED_MIPS();
+  }
+
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
@@ -292,6 +832,26 @@
 }
 
 
+void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
 // This should generate code that performs a charCodeAt() call or returns
 // undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
 // It is not yet implemented on ARM, so it always goes to the slow case.
@@ -300,6 +860,11 @@
 }
 
 
+void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
 void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
   UNIMPLEMENTED_MIPS();
 }
@@ -320,12 +885,12 @@
 }
 
 
-void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
   UNIMPLEMENTED_MIPS();
 }
 
 
-void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateRandomHeapNumber(ZoneList<Expression*>* args) {
   UNIMPLEMENTED_MIPS();
 }
 
@@ -413,8 +978,108 @@
 #undef __
 #define __ ACCESS_MASM(masm)
 
+// -----------------------------------------------------------------------------
+// Reference support
 
-// On entry a0 and a1 are the things to be compared.  On exit v0 is 0,
+Reference::Reference(CodeGenerator* cgen,
+                     Expression* expression,
+                     bool persist_after_get)
+    : cgen_(cgen),
+      expression_(expression),
+      type_(ILLEGAL),
+      persist_after_get_(persist_after_get) {
+  cgen->LoadReference(this);
+}
+
+
+Reference::~Reference() {
+  ASSERT(is_unloaded() || is_illegal());
+}
+
+
+Handle<String> Reference::GetName() {
+  ASSERT(type_ == NAMED);
+  Property* property = expression_->AsProperty();
+  if (property == NULL) {
+    // Global variable reference treated as a named property reference.
+    VariableProxy* proxy = expression_->AsVariableProxy();
+    ASSERT(proxy->AsVariable() != NULL);
+    ASSERT(proxy->AsVariable()->is_global());
+    return proxy->name();
+  } else {
+    Literal* raw_name = property->key()->AsLiteral();
+    ASSERT(raw_name != NULL);
+    return Handle<String>(String::cast(*raw_name->handle()));
+  }
+}
+
+
+void Reference::GetValue() {
+  ASSERT(cgen_->HasValidEntryRegisters());
+  ASSERT(!is_illegal());
+  ASSERT(!cgen_->has_cc());
+  Property* property = expression_->AsProperty();
+  if (property != NULL) {
+    cgen_->CodeForSourcePosition(property->position());
+  }
+
+  switch (type_) {
+    case SLOT: {
+      UNIMPLEMENTED_MIPS();
+      break;
+    }
+
+    case NAMED: {
+      UNIMPLEMENTED_MIPS();
+      break;
+    }
+
+    case KEYED: {
+      UNIMPLEMENTED_MIPS();
+      break;
+    }
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void Reference::SetValue(InitState init_state) {
+  ASSERT(!is_illegal());
+  ASSERT(!cgen_->has_cc());
+  MacroAssembler* masm = cgen_->masm();
+  Property* property = expression_->AsProperty();
+  if (property != NULL) {
+    cgen_->CodeForSourcePosition(property->position());
+  }
+
+  switch (type_) {
+    case SLOT: {
+      Comment cmnt(masm, "[ Store to Slot");
+      Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+      cgen_->StoreToSlot(slot, init_state);
+      cgen_->UnloadReference(this);
+      break;
+    }
+
+    case NAMED: {
+      UNIMPLEMENTED_MIPS();
+      break;
+    }
+
+    case KEYED: {
+      UNIMPLEMENTED_MIPS();
+      break;
+    }
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+// On entry a0 and a1 are the things to be compared. On exit v0 is 0,
 // positive or negative to indicate the result of the comparison.
 void CompareStub::Generate(MacroAssembler* masm) {
   UNIMPLEMENTED_MIPS();
@@ -422,6 +1087,12 @@
 }
 
 
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
+  UNIMPLEMENTED_MIPS();
+  return Handle<Code>::null();
+}
+
+
 void StackCheckStub::Generate(MacroAssembler* masm) {
   UNIMPLEMENTED_MIPS();
   __ break_(0x790);
@@ -446,27 +1117,274 @@
                               Label* throw_out_of_memory_exception,
                               bool do_gc,
                               bool always_allocate) {
-  UNIMPLEMENTED_MIPS();
-  __ break_(0x826);
+  // s0: number of arguments including receiver (C callee-saved)
+  // s1: pointer to the first argument          (C callee-saved)
+  // s2: pointer to builtin function            (C callee-saved)
+
+  if (do_gc) {
+    UNIMPLEMENTED_MIPS();
+  }
+
+  ExternalReference scope_depth =
+      ExternalReference::heap_always_allocate_scope_depth();
+  if (always_allocate) {
+    UNIMPLEMENTED_MIPS();
+  }
+
+  // Call C built-in.
+  // a0 = argc, a1 = argv
+  __ mov(a0, s0);
+  __ mov(a1, s1);
+
+  __ CallBuiltin(s2);
+
+  if (always_allocate) {
+    UNIMPLEMENTED_MIPS();
+  }
+
+  // Check for failure result.
+  Label failure_returned;
+  ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+  __ addiu(a2, v0, 1);
+  __ andi(t0, a2, kFailureTagMask);
+  __ Branch(eq, &failure_returned, t0, Operand(zero_reg));
+
+  // Exit C frame and return.
+  // v0:v1: result
+  // sp: stack pointer
+  // fp: frame pointer
+  __ LeaveExitFrame(mode_);
+
+  // Check if we should retry or throw exception.
+  Label retry;
+  __ bind(&failure_returned);
+  ASSERT(Failure::RETRY_AFTER_GC == 0);
+  __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
+  __ Branch(eq, &retry, t0, Operand(zero_reg));
+
+  // Special handling of out of memory exceptions.
+  Failure* out_of_memory = Failure::OutOfMemoryException();
+  __ Branch(eq, throw_out_of_memory_exception,
+            v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+
+  // Retrieve the pending exception and clear the variable.
+  __ LoadExternalReference(t0, ExternalReference::the_hole_value_location());
+  __ lw(a3, MemOperand(t0));
+  __ LoadExternalReference(t0,
+      ExternalReference(Top::k_pending_exception_address));
+  __ lw(v0, MemOperand(t0));
+  __ sw(a3, MemOperand(t0));
+
+  // Special handling of termination exceptions which are uncatchable
+  // by javascript code.
+  __ Branch(eq, throw_termination_exception,
+            v0, Operand(Factory::termination_exception()));
+
+  // Handle normal exception.
+  __ b(throw_normal_exception);
+  __ nop();   // Branch delay slot nop.
+
+  __ bind(&retry);  // pass last failure (r0) as parameter (r0) when retrying
 }
 
 void CEntryStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
-  __ break_(0x831);
+  // Called from JavaScript; parameters are on stack as if calling JS function
+  // a0: number of arguments including receiver
+  // a1: pointer to builtin function
+  // fp: frame pointer    (restored after C call)
+  // sp: stack pointer    (restored as callee's sp after C call)
+  // cp: current context  (C callee-saved)
+
+  // NOTE: Invocations of builtins may return failure objects
+  // instead of a proper result. The builtin entry handles
+  // this by performing a garbage collection and retrying the
+  // builtin once.
+
+  // Enter the exit frame that transitions from JavaScript to C++.
+  __ EnterExitFrame(mode_, s0, s1, s2);
+
+  // s0: number of arguments (C callee-saved)
+  // s1: pointer to first argument (C callee-saved)
+  // s2: pointer to builtin function (C callee-saved)
+
+  Label throw_normal_exception;
+  Label throw_termination_exception;
+  Label throw_out_of_memory_exception;
+
+  // Call into the runtime system.
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_termination_exception,
+               &throw_out_of_memory_exception,
+               false,
+               false);
+
+  // Do space-specific GC and retry runtime call.
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_termination_exception,
+               &throw_out_of_memory_exception,
+               true,
+               false);
+
+  // Do full GC and retry runtime call one final time.
+  Failure* failure = Failure::InternalError();
+  __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_termination_exception,
+               &throw_out_of_memory_exception,
+               true,
+               true);
+
+  __ bind(&throw_out_of_memory_exception);
+  GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+
+  __ bind(&throw_termination_exception);
+  GenerateThrowUncatchable(masm, TERMINATION);
+
+  __ bind(&throw_normal_exception);
+  GenerateThrowTOS(masm);
 }
 
 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
-  UNIMPLEMENTED_MIPS();
-  // Load a result.
-  __ li(v0, Operand(0x1234));
-  __ jr(ra);
-  // Return
-  __ nop();
+  Label invoke, exit;
+
+  // Registers:
+  // a0: entry address
+  // a1: function
+  // a2: reveiver
+  // a3: argc
+  //
+  // Stack:
+  // 4 args slots
+  // args
+
+  // Save callee saved registers on the stack.
+  __ MultiPush((kCalleeSaved | ra.bit()) & ~sp.bit());
+
+  // We build an EntryFrame.
+  __ li(t3, Operand(-1));  // Push a bad frame pointer to fail if it is used.
+  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+  __ li(t2, Operand(Smi::FromInt(marker)));
+  __ li(t1, Operand(Smi::FromInt(marker)));
+  __ LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
+  __ lw(t0, MemOperand(t0));
+  __ MultiPush(t0.bit() | t1.bit() | t2.bit() | t3.bit());
+
+  // Setup frame pointer for the frame to be pushed.
+  __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+  // Load argv in s0 register.
+  __ lw(s0, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize +
+                           StandardFrameConstants::kCArgsSlotsSize));
+
+  // Registers:
+  // a0: entry_address
+  // a1: function
+  // a2: reveiver_pointer
+  // a3: argc
+  // s0: argv
+  //
+  // Stack:
+  // caller fp          |
+  // function slot      | entry frame
+  // context slot       |
+  // bad fp (0xff...f)  |
+  // callee saved registers + ra
+  // 4 args slots
+  // args
+
+  // Call a faked try-block that does the invoke.
+  __ bal(&invoke);
+  __ nop();   // Branch delay slot nop.
+
+  // Caught exception: Store result (exception) in the pending
+  // exception field in the JSEnv and return a failure sentinel.
+  // Coming in here the fp will be invalid because the PushTryHandler below
+  // sets it to 0 to signal the existence of the JSEntry frame.
+  __ LoadExternalReference(t0,
+      ExternalReference(Top::k_pending_exception_address));
+  __ sw(v0, MemOperand(t0));  // We come back from 'invoke'. result is in v0.
+  __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
+  __ b(&exit);
+  __ nop();   // Branch delay slot nop.
+
+  // Invoke: Link this frame into the handler chain.
+  __ bind(&invoke);
+  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+  // If an exception not caught by another handler occurs, this handler
+  // returns control to the code after the bal(&invoke) above, which
+  // restores all kCalleeSaved registers (including cp and fp) to their
+  // saved values before returning a failure to C.
+
+  // Clear any pending exceptions.
+  __ LoadExternalReference(t0, ExternalReference::the_hole_value_location());
+  __ lw(t1, MemOperand(t0));
+  __ LoadExternalReference(t0,
+      ExternalReference(Top::k_pending_exception_address));
+  __ sw(t1, MemOperand(t0));
+
+  // Invoke the function by calling through JS entry trampoline builtin.
+  // Notice that we cannot store a reference to the trampoline code directly in
+  // this stub, because runtime stubs are not traversed when doing GC.
+
+  // Registers:
+  // a0: entry_address
+  // a1: function
+  // a2: reveiver_pointer
+  // a3: argc
+  // s0: argv
+  //
+  // Stack:
+  // handler frame
+  // entry frame
+  // callee saved registers + ra
+  // 4 args slots
+  // args
+
+  if (is_construct) {
+    ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
+    __ LoadExternalReference(t0, construct_entry);
+  } else {
+    ExternalReference entry(Builtins::JSEntryTrampoline);
+    __ LoadExternalReference(t0, entry);
+  }
+  __ lw(t9, MemOperand(t0));  // deref address
+
+  // Call JSEntryTrampoline.
+  __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
+  __ CallBuiltin(t9);
+
+  // Unlink this frame from the handler chain. When reading the
+  // address of the next handler, there is no need to use the address
+  // displacement since the current stack pointer (sp) points directly
+  // to the stack handler.
+  __ lw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset));
+  __ LoadExternalReference(t0, ExternalReference(Top::k_handler_address));
+  __ sw(t1, MemOperand(t0));
+
+  // This restores sp to its position before PushTryHandler.
+  __ addiu(sp, sp, StackHandlerConstants::kSize);
+
+  __ bind(&exit);  // v0 holds result
+  // Restore the top frame descriptors from the stack.
+  __ Pop(t1);
+  __ LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
+  __ sw(t1, MemOperand(t0));
+
+  // Reset the stack to the callee saved registers.
+  __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+  // Restore callee saved registers from the stack.
+  __ MultiPop((kCalleeSaved | ra.bit()) & ~sp.bit());
+  // Return.
+  __ Jump(ra);
 }
 
 
 // This stub performs an instanceof, calling the builtin function if
-// necessary.  Uses a1 for the object, a0 for the function that it may
+// necessary. Uses a1 for the object, a0 for the function that it may
 // be an instance of (these are fetched from the stack).
 void InstanceofStub::Generate(MacroAssembler* masm) {
   UNIMPLEMENTED_MIPS();
diff --git a/src/mips/codegen-mips.h b/src/mips/codegen-mips.h
index 147b872..0f0a746 100644
--- a/src/mips/codegen-mips.h
+++ b/src/mips/codegen-mips.h
@@ -42,7 +42,77 @@
 enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
 
 
-// -------------------------------------------------------------------------
+// -----------------------------------------------------------------------------
+// Reference support
+
+// A reference is a C++ stack-allocated object that keeps an ECMA
+// reference on the execution stack while in scope. For variables
+// the reference is empty, indicating that it isn't necessary to
+// store state on the stack for keeping track of references to those.
+// For properties, we keep either one (named) or two (indexed) values
+// on the execution stack to represent the reference.
+class Reference BASE_EMBEDDED {
+ public:
+  // The values of the types is important, see size().
+  enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
+  Reference(CodeGenerator* cgen,
+            Expression* expression,
+            bool persist_after_get = false);
+  ~Reference();
+
+  Expression* expression() const { return expression_; }
+  Type type() const { return type_; }
+  void set_type(Type value) {
+    ASSERT_EQ(ILLEGAL, type_);
+    type_ = value;
+  }
+
+  void set_unloaded() {
+    ASSERT_NE(ILLEGAL, type_);
+    ASSERT_NE(UNLOADED, type_);
+    type_ = UNLOADED;
+  }
+  // The size the reference takes up on the stack.
+  int size() const {
+    return (type_ < SLOT) ? 0 : type_;
+  }
+
+  bool is_illegal() const { return type_ == ILLEGAL; }
+  bool is_slot() const { return type_ == SLOT; }
+  bool is_property() const { return type_ == NAMED || type_ == KEYED; }
+  bool is_unloaded() const { return type_ == UNLOADED; }
+
+  // Return the name. Only valid for named property references.
+  Handle<String> GetName();
+
+  // Generate code to push the value of the reference on top of the
+  // expression stack.  The reference is expected to be already on top of
+  // the expression stack, and it is consumed by the call unless the
+  // reference is for a compound assignment.
+  // If the reference is not consumed, it is left in place under its value.
+  void GetValue();
+
+  // Generate code to pop a reference, push the value of the reference,
+  // and then spill the stack frame.
+  inline void GetValueAndSpill();
+
+  // Generate code to store the value on top of the expression stack in the
+  // reference.  The reference is expected to be immediately below the value
+  // on the expression stack.  The  value is stored in the location specified
+  // by the reference, and is left on top of the stack, after the reference
+  // is popped from beneath it (unloaded).
+  void SetValue(InitState init_state);
+
+ private:
+  CodeGenerator* cgen_;
+  Expression* expression_;
+  Type type_;
+  // Keep the reference on the stack after get, so it can be used by set later.
+  bool persist_after_get_;
+};
+
+
+// -----------------------------------------------------------------------------
 // Code generation state
 
 // The state is passed down the AST by the code generator (and back up, in
@@ -89,7 +159,7 @@
 
 
 
-// -------------------------------------------------------------------------
+// -----------------------------------------------------------------------------
 // CodeGenerator
 
 class CodeGenerator: public AstVisitor {
@@ -152,16 +222,19 @@
 
   // Number of instructions used for the JS return sequence. The constant is
   // used by the debugger to patch the JS return sequence.
-  static const int kJSReturnSequenceLength = 6;
+  static const int kJSReturnSequenceLength = 7;
+
+  // If the name is an inline runtime function call return the number of
+  // expected arguments. Otherwise return -1.
+  static int InlineRuntimeCallArgumentsCount(Handle<String> name);
 
  private:
   // Construction/Destruction.
   explicit CodeGenerator(MacroAssembler* masm);
-  virtual ~CodeGenerator() { delete masm_; }
 
   // Accessors.
   inline bool is_eval();
-  Scope* scope() const { return scope_; }
+  inline Scope* scope();
 
   // Generating deferred code.
   void ProcessDeferred();
@@ -183,12 +256,55 @@
   AST_NODE_LIST(DEF_VISIT)
 #undef DEF_VISIT
 
+  // Visit a statement and then spill the virtual frame if control flow can
+  // reach the end of the statement (ie, it does not exit via break,
+  // continue, return, or throw).  This function is used temporarily while
+  // the code generator is being transformed.
+  inline void VisitAndSpill(Statement* statement);
+
+  // Visit a list of statements and then spill the virtual frame if control
+  // flow can reach the end of the list.
+  inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
+
   // Main code generation function
-  void Generate(CompilationInfo* info, Mode mode);
+  void Generate(CompilationInfo* info);
+
+  // The following are used by class Reference.
+  void LoadReference(Reference* ref);
+  void UnloadReference(Reference* ref);
+
+  MemOperand ContextOperand(Register context, int index) const {
+    return MemOperand(context, Context::SlotOffset(index));
+  }
+
+  MemOperand SlotOperand(Slot* slot, Register tmp);
+
+  // Expressions
+  MemOperand GlobalObject() const  {
+    return ContextOperand(cp, Context::GLOBAL_INDEX);
+  }
+
+  void LoadCondition(Expression* x,
+                     JumpTarget* true_target,
+                     JumpTarget* false_target,
+                     bool force_cc);
+  void Load(Expression* x);
+  void LoadGlobal();
+
+  // Generate code to push the value of an expression on top of the frame
+  // and then spill the frame fully to memory.  This function is used
+  // temporarily while the code generator is being transformed.
+  inline void LoadAndSpill(Expression* expression);
+
+  // Read a value from a slot and leave it on top of the expression stack.
+  void LoadFromSlot(Slot* slot, TypeofState typeof_state);
+  // Store the value on top of the stack to a slot.
+  void StoreToSlot(Slot* slot, InitState init_state);
 
   struct InlineRuntimeLUT {
     void (CodeGenerator::*method)(ZoneList<Expression*>*);
     const char* name;
+    int nargs;
   };
 
   static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
@@ -217,7 +333,7 @@
 
   // Support for arguments.length and arguments[?].
   void GenerateArgumentsLength(ZoneList<Expression*>* args);
-  void GenerateArgumentsAccess(ZoneList<Expression*>* args);
+  void GenerateArguments(ZoneList<Expression*>* args);
 
   // Support for accessing the class and value fields of an object.
   void GenerateClassOf(ZoneList<Expression*>* args);
@@ -227,13 +343,16 @@
   // Fast support for charCodeAt(n).
   void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
 
+  // Fast support for string.charAt(n) and string[n].
+  void GenerateCharFromCode(ZoneList<Expression*>* args);
+
   // Fast support for object equality testing.
   void GenerateObjectEquals(ZoneList<Expression*>* args);
 
   void GenerateLog(ZoneList<Expression*>* args);
 
   // Fast support for Math.random().
-  void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
+  void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
 
   void GenerateIsObject(ZoneList<Expression*>* args);
   void GenerateIsFunction(ZoneList<Expression*>* args);
@@ -244,10 +363,11 @@
   void GenerateRegExpExec(ZoneList<Expression*>* args);
   void GenerateNumberToString(ZoneList<Expression*>* args);
 
-
-  // Fast support for Math.sin and Math.cos.
-  inline void GenerateMathSin(ZoneList<Expression*>* args);
-  inline void GenerateMathCos(ZoneList<Expression*>* args);
+  // Fast call to math functions.
+  void GenerateMathPow(ZoneList<Expression*>* args);
+  void GenerateMathSin(ZoneList<Expression*>* args);
+  void GenerateMathCos(ZoneList<Expression*>* args);
+  void GenerateMathSqrt(ZoneList<Expression*>* args);
 
   // Simple condition analysis.
   enum ConditionAnalysis {
@@ -282,7 +402,6 @@
   CompilationInfo* info_;
 
   // Code generation state
-  Scope* scope_;
   VirtualFrame* frame_;
   RegisterAllocator* allocator_;
   Condition cc_reg_;
@@ -302,6 +421,7 @@
   friend class JumpTarget;
   friend class Reference;
   friend class FastCodeGenerator;
+  friend class FullCodeGenerator;
   friend class FullCodeGenSyntaxChecker;
 
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
diff --git a/src/mips/debug-mips.cc b/src/mips/debug-mips.cc
index 772bcc0..cdb35ae 100644
--- a/src/mips/debug-mips.cc
+++ b/src/mips/debug-mips.cc
@@ -104,8 +104,24 @@
 }
 
 
+void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+  masm->Abort("LiveEdit frame dropping is not supported on mips");
+}
+
+void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+  masm->Abort("LiveEdit frame dropping is not supported on mips");
+}
+
 #undef __
 
+
+void Debug::SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
+                                   Handle<Code> code) {
+  UNREACHABLE();
+}
+const int Debug::kFrameDropperFrameSize = -1;
+
+
 #endif  // ENABLE_DEBUGGER_SUPPORT
 
 } }  // namespace v8::internal
diff --git a/src/mips/fast-codegen-mips.cc b/src/mips/fast-codegen-mips.cc
index c47f632..48a0ce6 100644
--- a/src/mips/fast-codegen-mips.cc
+++ b/src/mips/fast-codegen-mips.cc
@@ -35,6 +35,14 @@
 
 #define __ ACCESS_MASM(masm_)
 
+Register FastCodeGenerator::accumulator0() { return no_reg; }
+Register FastCodeGenerator::accumulator1() { return no_reg; }
+Register FastCodeGenerator::scratch0() { return no_reg; }
+Register FastCodeGenerator::scratch1() { return no_reg; }
+Register FastCodeGenerator::receiver_reg() { return no_reg; }
+Register FastCodeGenerator::context_reg() { return no_reg; }
+
+
 void FastCodeGenerator::Generate(CompilationInfo* info) {
   UNIMPLEMENTED_MIPS();
 }
@@ -45,7 +53,17 @@
 }
 
 
-void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) {
+void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> name) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void FastCodeGenerator::EmitBitOr() {
   UNIMPLEMENTED_MIPS();
 }
 
diff --git a/src/mips/frames-mips.cc b/src/mips/frames-mips.cc
index d2c717c..cdc880d 100644
--- a/src/mips/frames-mips.cc
+++ b/src/mips/frames-mips.cc
@@ -91,8 +91,7 @@
 
 
 Address InternalFrame::GetCallerStackPointer() const {
-  UNIMPLEMENTED_MIPS();
-  return static_cast<Address>(NULL);  // UNIMPLEMENTED RETURN
+  return fp() + StandardFrameConstants::kCallerSPOffset;
 }
 
 
diff --git a/src/mips/frames-mips.h b/src/mips/frames-mips.h
index ec1949d..06e9979 100644
--- a/src/mips/frames-mips.h
+++ b/src/mips/frames-mips.h
@@ -104,7 +104,7 @@
   static const int kCallerPCOffset = +1 * kPointerSize;
 
   // FP-relative displacement of the caller's SP.
-  static const int kCallerSPDisplacement = +4 * kPointerSize;
+  static const int kCallerSPDisplacement = +3 * kPointerSize;
 };
 
 
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 920329e..3c29e99 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -146,6 +146,11 @@
 }
 
 
+void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
 void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
   UNIMPLEMENTED_MIPS();
 }
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index 5598cdf..8c90921 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -74,6 +74,47 @@
 
 void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
   UNIMPLEMENTED_MIPS();
+    // Registers:
+    // a2: name
+    // ra: return address
+
+  // Get the receiver of the function from the stack.
+  __ lw(a3, MemOperand(sp, argc*kPointerSize));
+
+  __ EnterInternalFrame();
+
+  // Push the receiver and the name of the function.
+  __ MultiPush(a2.bit() | a3.bit());
+
+  // Call the entry.
+  __ li(a0, Operand(2));
+  __ li(a1, Operand(ExternalReference(IC_Utility(kCallIC_Miss))));
+
+  CEntryStub stub(1);
+  __ CallStub(&stub);
+
+  // Move result to r1 and leave the internal frame.
+  __ mov(a1, v0);
+  __ LeaveInternalFrame();
+
+  // Check if the receiver is a global object of some sort.
+  Label invoke, global;
+  __ lw(a2, MemOperand(sp, argc * kPointerSize));
+  __ andi(t0, a2, kSmiTagMask);
+  __ Branch(eq, &invoke, t0, Operand(zero_reg));
+  __ GetObjectType(a2, a3, a3);
+  __ Branch(eq, &global, a3, Operand(JS_GLOBAL_OBJECT_TYPE));
+  __ Branch(ne, &invoke, a3, Operand(JS_BUILTINS_OBJECT_TYPE));
+
+  // Patch the receiver on the stack.
+  __ bind(&global);
+  __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
+  __ sw(a2, MemOperand(sp, argc * kPointerSize));
+
+  // Invoke the function.
+  ParameterCount actual(argc);
+  __ bind(&invoke);
+  __ InvokeFunction(a1, actual, JUMP_FUNCTION);
 }
 
 // Defined in ic.cc.
@@ -90,11 +131,6 @@
 
 
 void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
-}
-
-
-void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
   UNIMPLEMENTED_MIPS();
 }
 
@@ -120,11 +156,6 @@
 
 
 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
-}
-
-
-void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
   UNIMPLEMENTED_MIPS();
 }
 
@@ -145,12 +176,6 @@
 }
 
 
-void KeyedStoreIC::Generate(MacroAssembler* masm,
-                            const ExternalReference& f) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
   UNIMPLEMENTED_MIPS();
 }
@@ -162,7 +187,12 @@
 }
 
 
-void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
+  UNIMPLEMENTED_MIPS();
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
   UNIMPLEMENTED_MIPS();
 }
 
@@ -172,12 +202,12 @@
 }
 
 
-void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
   UNIMPLEMENTED_MIPS();
 }
 
 
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
+void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
   UNIMPLEMENTED_MIPS();
 }
 
diff --git a/src/mips/jump-target-mips.cc b/src/mips/jump-target-mips.cc
index 3301d19..4bd9102 100644
--- a/src/mips/jump-target-mips.cc
+++ b/src/mips/jump-target-mips.cc
@@ -31,6 +31,7 @@
 #include "codegen-inl.h"
 #include "jump-target-inl.h"
 #include "register-allocator-inl.h"
+#include "virtual-frame-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -41,7 +42,37 @@
 #define __ ACCESS_MASM(cgen()->masm())
 
 void JumpTarget::DoJump() {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(cgen()->has_valid_frame());
+  // Live non-frame registers are not allowed at unconditional jumps
+  // because we have no way of invalidating the corresponding results
+  // which are still live in the C++ code.
+  ASSERT(cgen()->HasValidEntryRegisters());
+
+  if (is_bound()) {
+    // Backward jump.  There already a frame expectation at the target.
+    ASSERT(direction_ == BIDIRECTIONAL);
+    cgen()->frame()->MergeTo(entry_frame_);
+    cgen()->DeleteFrame();
+  } else {
+    // Use the current frame as the expected one at the target if necessary.
+    if (entry_frame_ == NULL) {
+      entry_frame_ = cgen()->frame();
+      RegisterFile empty;
+      cgen()->SetFrame(NULL, &empty);
+    } else {
+      cgen()->frame()->MergeTo(entry_frame_);
+      cgen()->DeleteFrame();
+    }
+
+    // The predicate is_linked() should be made true.  Its implementation
+    // detects the presence of a frame pointer in the reaching_frames_ list.
+    if (!is_linked()) {
+      reaching_frames_.Add(NULL);
+      ASSERT(is_linked());
+    }
+  }
+  __ b(&entry_label_);
+  __ nop();   // Branch delay slot nop.
 }
 
 
@@ -56,12 +87,47 @@
 
 
 void JumpTarget::DoBind() {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(!is_bound());
+
+  // Live non-frame registers are not allowed at the start of a basic
+  // block.
+  ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
+
+  if (cgen()->has_valid_frame()) {
+    // If there is a current frame we can use it on the fall through.
+    if (entry_frame_ == NULL) {
+      entry_frame_ = new VirtualFrame(cgen()->frame());
+    } else {
+      ASSERT(cgen()->frame()->Equals(entry_frame_));
+    }
+  } else {
+    // If there is no current frame we must have an entry frame which we can
+    // copy.
+    ASSERT(entry_frame_ != NULL);
+    RegisterFile empty;
+    cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
+  }
+
+  // The predicate is_linked() should be made false.  Its implementation
+  // detects the presence (or absence) of frame pointers in the
+  // reaching_frames_ list.  If we inserted a bogus frame to make
+  // is_linked() true, remove it now.
+  if (is_linked()) {
+    reaching_frames_.Clear();
+  }
+
+  __ bind(&entry_label_);
 }
 
 
 void BreakTarget::Jump() {
-  UNIMPLEMENTED_MIPS();
+  // On ARM we do not currently emit merge code for jumps, so we need to do
+  // it explicitly here.  The only merging necessary is to drop extra
+  // statement state from the stack.
+  ASSERT(cgen()->has_valid_frame());
+  int count = cgen()->frame()->height() - expected_height_;
+  cgen()->frame()->Drop(count);
+  DoJump();
 }
 
 
@@ -71,7 +137,26 @@
 
 
 void BreakTarget::Bind() {
-  UNIMPLEMENTED_MIPS();
+#ifdef DEBUG
+  // All the forward-reaching frames should have been adjusted at the
+  // jumps to this target.
+  for (int i = 0; i < reaching_frames_.length(); i++) {
+    ASSERT(reaching_frames_[i] == NULL ||
+           reaching_frames_[i]->height() == expected_height_);
+  }
+#endif
+  // Drop leftover statement state from the frame before merging, even
+  // on the fall through.  This is so we can bind the return target
+  // with state on the frame.
+  if (cgen()->has_valid_frame()) {
+    int count = cgen()->frame()->height() - expected_height_;
+    // On ARM we do not currently emit merge code at binding sites, so we need
+    // to do it explicitly here.  The only merging necessary is to drop extra
+    // statement state from the stack.
+    cgen()->frame()->Drop(count);
+  }
+
+  DoBind();
 }
 
 
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index b733bdd..c276af5 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -55,7 +55,7 @@
 
 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
                           Condition cond, Register r1, const Operand& r2) {
-  Jump(Operand(target), cond, r1, r2);
+  Jump(Operand(target, rmode), cond, r1, r2);
 }
 
 
@@ -81,7 +81,7 @@
 
 void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
                           Condition cond, Register r1, const Operand& r2) {
-  Call(Operand(target), cond, r1, r2);
+  Call(Operand(target, rmode), cond, r1, r2);
 }
 
 
@@ -106,7 +106,7 @@
 
 void MacroAssembler::LoadRoot(Register destination,
                               Heap::RootListIndex index) {
-  lw(destination, MemOperand(s4, index << kPointerSizeLog2));
+  lw(destination, MemOperand(s6, index << kPointerSizeLog2));
 }
 
 void MacroAssembler::LoadRoot(Register destination,
@@ -114,8 +114,7 @@
                               Condition cond,
                               Register src1, const Operand& src2) {
   Branch(NegateCondition(cond), 2, src1, src2);
-  nop();
-  lw(destination, MemOperand(s4, index << kPointerSizeLog2));
+  lw(destination, MemOperand(s6, index << kPointerSizeLog2));
 }
 
 
@@ -320,7 +319,6 @@
 }
 
 
-// load wartd in a register
 void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
   ASSERT(!j.is_reg());
 
@@ -372,7 +370,7 @@
   int16_t NumToPush = NumberOfBitsSet(regs);
 
   addiu(sp, sp, -4 * NumToPush);
-  for (int16_t i = 0; i < kNumRegisters; i++) {
+  for (int16_t i = kNumRegisters; i > 0; i--) {
     if ((regs & (1 << i)) != 0) {
       sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
     }
@@ -385,7 +383,7 @@
   int16_t NumToPush = NumberOfBitsSet(regs);
 
   addiu(sp, sp, -4 * NumToPush);
-  for (int16_t i = kNumRegisters; i > 0; i--) {
+  for (int16_t i = 0; i < kNumRegisters; i++) {
     if ((regs & (1 << i)) != 0) {
       sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
     }
@@ -396,7 +394,7 @@
 void MacroAssembler::MultiPop(RegList regs) {
   int16_t NumSaved = 0;
 
-  for (int16_t i = kNumRegisters; i > 0; i--) {
+  for (int16_t i = 0; i < kNumRegisters; i++) {
     if ((regs & (1 << i)) != 0) {
       lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
     }
@@ -408,7 +406,7 @@
 void MacroAssembler::MultiPopReversed(RegList regs) {
   int16_t NumSaved = 0;
 
-  for (int16_t i = 0; i < kNumRegisters; i++) {
+  for (int16_t i = kNumRegisters; i > 0; i--) {
     if ((regs & (1 << i)) != 0) {
       lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
     }
@@ -422,7 +420,7 @@
 // Trashes the at register if no scratch register is provided.
 void MacroAssembler::Branch(Condition cond, int16_t offset, Register rs,
                             const Operand& rt, Register scratch) {
-  Register r2;
+  Register r2 = no_reg;
   if (rt.is_reg()) {
     // We don't want any other register but scratch clobbered.
     ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
@@ -484,12 +482,14 @@
     default:
       UNREACHABLE();
   }
+  // Emit a nop in the branch delay slot.
+  nop();
 }
 
 
 void MacroAssembler::Branch(Condition cond,  Label* L, Register rs,
                             const Operand& rt, Register scratch) {
-  Register r2;
+  Register r2 = no_reg;
   if (rt.is_reg()) {
     r2 = rt.rm_;
   } else if (cond != cc_always) {
@@ -550,6 +550,8 @@
     default:
       UNREACHABLE();
   }
+  // Emit a nop in the branch delay slot.
+  nop();
 }
 
 
@@ -559,7 +561,7 @@
 // cases, so we keep slt and add an intermediate third instruction.
 void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs,
                                    const Operand& rt, Register scratch) {
-  Register r2;
+  Register r2 = no_reg;
   if (rt.is_reg()) {
     r2 = rt.rm_;
   } else if (cond != cc_always) {
@@ -629,12 +631,14 @@
     default:
       UNREACHABLE();
   }
+  // Emit a nop in the branch delay slot.
+  nop();
 }
 
 
 void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs,
                                    const Operand& rt, Register scratch) {
-  Register r2;
+  Register r2 = no_reg;
   if (rt.is_reg()) {
     r2 = rt.rm_;
   } else if (cond != cc_always) {
@@ -704,6 +708,8 @@
     default:
       UNREACHABLE();
   }
+  // Emit a nop in the branch delay slot.
+  nop();
 }
 
 
@@ -714,7 +720,6 @@
       jr(target.rm());
     } else {
       Branch(NegateCondition(cond), 2, rs, rt);
-      nop();
       jr(target.rm());
     }
   } else {    // !target.is_reg()
@@ -723,20 +728,20 @@
         j(target.imm32_);
       } else {
         Branch(NegateCondition(cond), 2, rs, rt);
-        nop();
-        j(target.imm32_);  // will generate only one instruction.
+        j(target.imm32_);  // Will generate only one instruction.
       }
     } else {  // MustUseAt(target)
-      li(at, rt);
+      li(at, target);
       if (cond == cc_always) {
         jr(at);
       } else {
         Branch(NegateCondition(cond), 2, rs, rt);
-        nop();
-        jr(at);  // will generate only one instruction.
+        jr(at);  // Will generate only one instruction.
       }
     }
   }
+  // Emit a nop in the branch delay slot.
+  nop();
 }
 
 
@@ -747,7 +752,6 @@
       jalr(target.rm());
     } else {
       Branch(NegateCondition(cond), 2, rs, rt);
-      nop();
       jalr(target.rm());
     }
   } else {    // !target.is_reg()
@@ -756,20 +760,20 @@
         jal(target.imm32_);
       } else {
         Branch(NegateCondition(cond), 2, rs, rt);
-        nop();
-        jal(target.imm32_);  // will generate only one instruction.
+        jal(target.imm32_);  // Will generate only one instruction.
       }
     } else {  // MustUseAt(target)
-      li(at, rt);
+      li(at, target);
       if (cond == cc_always) {
         jalr(at);
       } else {
         Branch(NegateCondition(cond), 2, rs, rt);
-        nop();
-        jalr(at);  // will generate only one instruction.
+        jalr(at);  // Will generate only one instruction.
       }
     }
   }
+  // Emit a nop in the branch delay slot.
+  nop();
 }
 
 void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
@@ -787,12 +791,73 @@
 }
 
 
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // ---------------------------------------------------------------------------
+  // Debugger Support
+
+  void MacroAssembler::DebugBreak() {
+    UNIMPLEMENTED_MIPS();
+  }
+#endif
+
+
 // ---------------------------------------------------------------------------
 // Exception handling
 
 void MacroAssembler::PushTryHandler(CodeLocation try_location,
                                     HandlerType type) {
-  UNIMPLEMENTED_MIPS();
+  // Adjust this code if not the case.
+  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+  // The return address is passed in register ra.
+  if (try_location == IN_JAVASCRIPT) {
+    if (type == TRY_CATCH_HANDLER) {
+      li(t0, Operand(StackHandler::TRY_CATCH));
+    } else {
+      li(t0, Operand(StackHandler::TRY_FINALLY));
+    }
+    ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
+           && StackHandlerConstants::kFPOffset == 2 * kPointerSize
+           && StackHandlerConstants::kPCOffset == 3 * kPointerSize
+           && StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+    // Save the current handler as the next handler.
+    LoadExternalReference(t2, ExternalReference(Top::k_handler_address));
+    lw(t1, MemOperand(t2));
+
+    addiu(sp, sp, -StackHandlerConstants::kSize);
+    sw(ra, MemOperand(sp, 12));
+    sw(fp, MemOperand(sp, 8));
+    sw(t0, MemOperand(sp, 4));
+    sw(t1, MemOperand(sp, 0));
+
+    // Link this handler as the new current one.
+    sw(sp, MemOperand(t2));
+
+  } else {
+    // Must preserve a0-a3, and s0 (argv).
+    ASSERT(try_location == IN_JS_ENTRY);
+    ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
+           && StackHandlerConstants::kFPOffset == 2 * kPointerSize
+           && StackHandlerConstants::kPCOffset == 3 * kPointerSize
+           && StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+
+    // The frame pointer does not point to a JS frame so we save NULL
+    // for fp. We expect the code throwing an exception to check fp
+    // before dereferencing it to restore the context.
+    li(t0, Operand(StackHandler::ENTRY));
+
+    // Save the current handler as the next handler.
+    LoadExternalReference(t2, ExternalReference(Top::k_handler_address));
+    lw(t1, MemOperand(t2));
+
+    addiu(sp, sp, -StackHandlerConstants::kSize);
+    sw(ra, MemOperand(sp, 12));
+    sw(zero_reg, MemOperand(sp, 8));
+    sw(t0, MemOperand(sp, 4));
+    sw(t1, MemOperand(sp, 0));
+
+    // Link this handler as the new current one.
+    sw(sp, MemOperand(t2));
+  }
 }
 
 
@@ -802,12 +867,233 @@
 
 
 
-// ---------------------------------------------------------------------------
+// -----------------------------------------------------------------------------
 // Activation frames
 
+void MacroAssembler::SetupAlignedCall(Register scratch, int arg_count) {
+  Label extra_push, end;
+
+  andi(scratch, sp, 7);
+
+  // We check for args and receiver size on the stack, all of them word sized.
+  // We add one for sp, that we also want to store on the stack.
+  if (((arg_count + 1) % kPointerSizeLog2) == 0) {
+    Branch(ne, &extra_push, at, Operand(zero_reg));
+  } else {  // ((arg_count + 1) % 2) == 1
+    Branch(eq, &extra_push, at, Operand(zero_reg));
+  }
+
+  // Save sp on the stack.
+  mov(scratch, sp);
+  Push(scratch);
+  b(&end);
+
+  // Align before saving sp on the stack.
+  bind(&extra_push);
+  mov(scratch, sp);
+  addiu(sp, sp, -8);
+  sw(scratch, MemOperand(sp));
+
+  // The stack is aligned and sp is stored on the top.
+  bind(&end);
+}
+
+
+void MacroAssembler::ReturnFromAlignedCall() {
+  lw(sp, MemOperand(sp));
+}
+
+
+// -----------------------------------------------------------------------------
+// JavaScript invokes
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+                                    const ParameterCount& actual,
+                                    Handle<Code> code_constant,
+                                    Register code_reg,
+                                    Label* done,
+                                    InvokeFlag flag) {
+  bool definitely_matches = false;
+  Label regular_invoke;
+
+  // Check whether the expected and actual arguments count match. If not,
+  // setup registers according to contract with ArgumentsAdaptorTrampoline:
+  //  a0: actual arguments count
+  //  a1: function (passed through to callee)
+  //  a2: expected arguments count
+  //  a3: callee code entry
+
+  // The code below is made a lot easier because the calling code already sets
+  // up actual and expected registers according to the contract if values are
+  // passed in registers.
+  ASSERT(actual.is_immediate() || actual.reg().is(a0));
+  ASSERT(expected.is_immediate() || expected.reg().is(a2));
+  ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
+
+  if (expected.is_immediate()) {
+    ASSERT(actual.is_immediate());
+    if (expected.immediate() == actual.immediate()) {
+      definitely_matches = true;
+    } else {
+      li(a0, Operand(actual.immediate()));
+      const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+      if (expected.immediate() == sentinel) {
+        // Don't worry about adapting arguments for builtins that
+        // don't want that done. Skip adaption code by making it look
+        // like we have a match between expected and actual number of
+        // arguments.
+        definitely_matches = true;
+      } else {
+        li(a2, Operand(expected.immediate()));
+      }
+    }
+  } else if (actual.is_immediate()) {
+    Branch(eq, &regular_invoke, expected.reg(), Operand(actual.immediate()));
+    li(a0, Operand(actual.immediate()));
+  } else {
+    Branch(eq, &regular_invoke, expected.reg(), Operand(actual.reg()));
+  }
+
+  if (!definitely_matches) {
+    if (!code_constant.is_null()) {
+      li(a3, Operand(code_constant));
+      addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
+    }
+
+    ExternalReference adaptor(Builtins::ArgumentsAdaptorTrampoline);
+    if (flag == CALL_FUNCTION) {
+      CallBuiltin(adaptor);
+      b(done);
+      nop();
+    } else {
+      JumpToBuiltin(adaptor);
+    }
+    bind(&regular_invoke);
+  }
+}
+
+void MacroAssembler::InvokeCode(Register code,
+                                const ParameterCount& expected,
+                                const ParameterCount& actual,
+                                InvokeFlag flag) {
+  Label done;
+
+  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
+  if (flag == CALL_FUNCTION) {
+    Call(code);
+  } else {
+    ASSERT(flag == JUMP_FUNCTION);
+    Jump(code);
+  }
+  // Continue here if InvokePrologue does handle the invocation due to
+  // mismatched parameter counts.
+  bind(&done);
+}
+
+
+void MacroAssembler::InvokeCode(Handle<Code> code,
+                                const ParameterCount& expected,
+                                const ParameterCount& actual,
+                                RelocInfo::Mode rmode,
+                                InvokeFlag flag) {
+  Label done;
+
+  InvokePrologue(expected, actual, code, no_reg, &done, flag);
+  if (flag == CALL_FUNCTION) {
+    Call(code, rmode);
+  } else {
+    Jump(code, rmode);
+  }
+  // Continue here if InvokePrologue does handle the invocation due to
+  // mismatched parameter counts.
+  bind(&done);
+}
+
+
+void MacroAssembler::InvokeFunction(Register function,
+                                    const ParameterCount& actual,
+                                    InvokeFlag flag) {
+  // Contract with called JS functions requires that function is passed in a1.
+  ASSERT(function.is(a1));
+  Register expected_reg = a2;
+  Register code_reg = a3;
+
+  lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+  lw(expected_reg,
+      FieldMemOperand(code_reg,
+                      SharedFunctionInfo::kFormalParameterCountOffset));
+  lw(code_reg,
+      MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
+  addiu(code_reg, code_reg, Code::kHeaderSize - kHeapObjectTag);
+
+  ParameterCount expected(expected_reg);
+  InvokeCode(code_reg, expected, actual, flag);
+}
+
+
+// ---------------------------------------------------------------------------
+// Support functions.
+
+  void MacroAssembler::GetObjectType(Register function,
+                                     Register map,
+                                     Register type_reg) {
+    lw(map, FieldMemOperand(function, HeapObject::kMapOffset));
+    lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  }
+
+
+  void MacroAssembler::CallBuiltin(ExternalReference builtin_entry) {
+    // Load builtin address.
+    LoadExternalReference(t9, builtin_entry);
+    lw(t9, MemOperand(t9));  // Deref address.
+    addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
+    // Call and allocate arguments slots.
+    jalr(t9);
+    // Use the branch delay slot to allocated argument slots.
+    addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
+    addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize);
+  }
+
+
+  void MacroAssembler::CallBuiltin(Register target) {
+    // Target already holds target address.
+    // Call and allocate arguments slots.
+    jalr(target);
+    // Use the branch delay slot to allocated argument slots.
+    addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
+    addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize);
+  }
+
+
+  void MacroAssembler::JumpToBuiltin(ExternalReference builtin_entry) {
+    // Load builtin address.
+    LoadExternalReference(t9, builtin_entry);
+    lw(t9, MemOperand(t9));  // Deref address.
+    addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
+    // Call and allocate arguments slots.
+    jr(t9);
+    // Use the branch delay slot to allocated argument slots.
+    addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
+  }
+
+
+  void MacroAssembler::JumpToBuiltin(Register target) {
+    // t9 already holds target address.
+    // Call and allocate arguments slots.
+    jr(t9);
+    // Use the branch delay slot to allocated argument slots.
+    addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
+  }
+
+
+// -----------------------------------------------------------------------------
+// Runtime calls
+
 void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
                               Register r1, const Operand& r2) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2);
 }
 
 
@@ -816,24 +1102,56 @@
 }
 
 
+void MacroAssembler::IllegalOperation(int num_arguments) {
+  if (num_arguments > 0) {
+    addiu(sp, sp, num_arguments * kPointerSize);
+  }
+  LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+}
+
+
 void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
-  UNIMPLEMENTED_MIPS();
+  // All parameters are on the stack. v0 has the return value after call.
+
+  // If the expected number of arguments of the runtime function is
+  // constant, we check that the actual number of arguments match the
+  // expectation.
+  if (f->nargs >= 0 && f->nargs != num_arguments) {
+    IllegalOperation(num_arguments);
+    return;
+  }
+
+  // TODO(1236192): Most runtime routines don't need the number of
+  // arguments passed in because it is constant. At some point we
+  // should remove this need and make the runtime routine entry code
+  // smarter.
+  li(a0, num_arguments);
+  LoadExternalReference(a1, ExternalReference(f));
+  CEntryStub stub(1);
+  CallStub(&stub);
 }
 
 
 void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
+  CallRuntime(Runtime::FunctionForId(fid), num_arguments);
+}
+
+
+void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
+                                               int num_arguments,
+                                               int result_size) {
   UNIMPLEMENTED_MIPS();
 }
 
 
-void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
                                      int num_arguments,
                                      int result_size) {
-  UNIMPLEMENTED_MIPS();
+  TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
 }
 
 
-void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) {
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
   UNIMPLEMENTED_MIPS();
 }
 
@@ -874,6 +1192,8 @@
 }
 
 
+// -----------------------------------------------------------------------------
+// Debugging
 
 void MacroAssembler::Assert(Condition cc, const char* msg,
                             Register rs, Operand rt) {
@@ -891,5 +1211,113 @@
   UNIMPLEMENTED_MIPS();
 }
 
+
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
+  addiu(sp, sp, -5 * kPointerSize);
+  li(t0, Operand(Smi::FromInt(type)));
+  li(t1, Operand(CodeObject()));
+  sw(ra, MemOperand(sp, 4 * kPointerSize));
+  sw(fp, MemOperand(sp, 3 * kPointerSize));
+  sw(cp, MemOperand(sp, 2 * kPointerSize));
+  sw(t0, MemOperand(sp, 1 * kPointerSize));
+  sw(t1, MemOperand(sp, 0 * kPointerSize));
+  addiu(fp, sp, 3 * kPointerSize);
+}
+
+
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
+  mov(sp, fp);
+  lw(fp, MemOperand(sp, 0 * kPointerSize));
+  lw(ra, MemOperand(sp, 1 * kPointerSize));
+  addiu(sp, sp, 2 * kPointerSize);
+}
+
+
+void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode,
+                                    Register hold_argc,
+                                    Register hold_argv,
+                                    Register hold_function) {
+  // Compute the argv pointer and keep it in a callee-saved register.
+  // a0 is argc.
+  sll(t0, a0, kPointerSizeLog2);
+  add(hold_argv, sp, t0);
+  addi(hold_argv, hold_argv, -kPointerSize);
+
+  // Compute callee's stack pointer before making changes and save it as
+  // t1 register so that it is restored as sp register on exit, thereby
+  // popping the args.
+  // t1 = sp + kPointerSize * #args
+  add(t1, sp, t0);
+
+  // Align the stack at this point.
+  AlignStack(0);
+
+  // Save registers.
+  addiu(sp, sp, -12);
+  sw(t1, MemOperand(sp, 8));
+  sw(ra, MemOperand(sp, 4));
+  sw(fp, MemOperand(sp, 0));
+  mov(fp, sp);  // Setup new frame pointer.
+
+  // Push debug marker.
+  if (mode == ExitFrame::MODE_DEBUG) {
+    Push(zero_reg);
+  } else {
+    li(t0, Operand(CodeObject()));
+    Push(t0);
+  }
+
+  // Save the frame pointer and the context in top.
+  LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
+  sw(fp, MemOperand(t0));
+  LoadExternalReference(t0, ExternalReference(Top::k_context_address));
+  sw(cp, MemOperand(t0));
+
+  // Setup argc and the builtin function in callee-saved registers.
+  mov(hold_argc, a0);
+  mov(hold_function, a1);
+}
+
+
+void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
+  // Clear top frame.
+  LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
+  sw(zero_reg, MemOperand(t0));
+
+  // Restore current context from top and clear it in debug mode.
+  LoadExternalReference(t0, ExternalReference(Top::k_context_address));
+  lw(cp, MemOperand(t0));
+#ifdef DEBUG
+  sw(a3, MemOperand(t0));
+#endif
+
+  // Pop the arguments, restore registers, and return.
+  mov(sp, fp);  // Respect ABI stack constraint.
+  lw(fp, MemOperand(sp, 0));
+  lw(ra, MemOperand(sp, 4));
+  lw(sp, MemOperand(sp, 8));
+  jr(ra);
+  nop();  // Branch delay slot nop.
+}
+
+
+void MacroAssembler::AlignStack(int offset) {
+  // On MIPS an offset of 0 aligns to 0 modulo 8 bytes,
+  //     and an offset of 1 aligns to 4 modulo 8 bytes.
+  int activation_frame_alignment = OS::ActivationFrameAlignment();
+  if (activation_frame_alignment != kPointerSize) {
+    // This code needs to be made more general if this assert doesn't hold.
+    ASSERT(activation_frame_alignment == 2 * kPointerSize);
+    if (offset == 0) {
+      andi(t0, sp, activation_frame_alignment - 1);
+      Push(zero_reg, eq, t0, zero_reg);
+    } else {
+      andi(t0, sp, activation_frame_alignment - 1);
+      addiu(t0, t0, -4);
+      Push(zero_reg, eq, t0, zero_reg);
+    }
+  }
+}
+
 } }  // namespace v8::internal
 
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index aea9836..0f0365b 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -41,6 +41,7 @@
 // unless we know exactly what we do.
 
 // Registers aliases
+// cp is assumed to be a callee saved register.
 const Register cp = s7;     // JavaScript context pointer
 const Register fp = s8_fp;  // Alias fp
 
@@ -102,10 +103,10 @@
   // Jump unconditionally to given label.
   // We NEED a nop in the branch delay slot, as it used by v8, for example in
   // CodeGenerator::ProcessDeferred().
+  // Currently the branch delay slot is filled by the MacroAssembler.
   // Use rather b(Label) for code generation.
   void jmp(Label* L) {
     Branch(cc_always, L);
-    nop();
   }
 
   // Load an object from the root table.
@@ -115,11 +116,12 @@
                 Heap::RootListIndex index,
                 Condition cond, Register src1, const Operand& src2);
 
-  // Sets the remembered set bit for [address+offset], where address is the
-  // address of the heap object 'object'.  The address must be in the first 8K
-  // of an allocated page. The 'scratch' register is used in the
-  // implementation and all 3 registers are clobbered by the operation, as
-  // well as the ip register.
+  // Load an external reference.
+  void LoadExternalReference(Register reg, ExternalReference ext) {
+    li(reg, Operand(ext));
+  }
+
+  // Sets the remembered set bit for [address+offset].
   void RecordWrite(Register object, Register offset, Register scratch);
 
 
@@ -182,19 +184,8 @@
 
 
   // Push multiple registers on the stack.
-  // With MultiPush, lower registers are pushed first on the stack.
-  // For example if you push t0, t1, s0, and ra you get:
-  // |                       |
-  // |-----------------------|
-  // |         t0            |                     +
-  // |-----------------------|                    |
-  // |         t1            |                    |
-  // |-----------------------|                    |
-  // |         s0            |                    v
-  // |-----------------------|                     -
-  // |         ra            |
-  // |-----------------------|
-  // |                       |
+  // Registers are saved in numerical order, with higher numbered registers
+  // saved in higher memory addresses
   void MultiPush(RegList regs);
   void MultiPushReversed(RegList regs);
   void Push(Register src) {
@@ -206,7 +197,6 @@
   void Push(Register src, Condition cond, Register tst1, Register tst2) {
     // Since we don't have conditionnal execution we use a Branch.
     Branch(cond, 3, tst1, Operand(tst2));
-    nop();
     Addu(sp, sp, Operand(-kPointerSize));
     sw(src, MemOperand(sp, 0));
   }
@@ -225,11 +215,71 @@
 
 
   // ---------------------------------------------------------------------------
+  // Activation frames
+
+  void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
+  void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
+
+  // Enter specific kind of exit frame; either EXIT or
+  // EXIT_DEBUG. Expects the number of arguments in register a0 and
+  // the builtin function to call in register a1.
+  // On output hold_argc, hold_function, and hold_argv are setup.
+  void EnterExitFrame(ExitFrame::Mode mode,
+                      Register hold_argc,
+                      Register hold_argv,
+                      Register hold_function);
+
+  // Leave the current exit frame. Expects the return value in v0.
+  void LeaveExitFrame(ExitFrame::Mode mode);
+
+  // Align the stack by optionally pushing a Smi zero.
+  void AlignStack(int offset);
+
+  void SetupAlignedCall(Register scratch, int arg_count = 0);
+  void ReturnFromAlignedCall();
+
+
+  // ---------------------------------------------------------------------------
+  // JavaScript invokes
+
+  // Invoke the JavaScript function code by either calling or jumping.
+  void InvokeCode(Register code,
+                  const ParameterCount& expected,
+                  const ParameterCount& actual,
+                  InvokeFlag flag);
+
+  void InvokeCode(Handle<Code> code,
+                  const ParameterCount& expected,
+                  const ParameterCount& actual,
+                  RelocInfo::Mode rmode,
+                  InvokeFlag flag);
+
+  // Invoke the JavaScript function in the given register. Changes the
+  // current context to the context in the function before invoking.
+  void InvokeFunction(Register function,
+                      const ParameterCount& actual,
+                      InvokeFlag flag);
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // ---------------------------------------------------------------------------
+  // Debugger Support
+
+  void SaveRegistersToMemory(RegList regs);
+  void RestoreRegistersFromMemory(RegList regs);
+  void CopyRegistersFromMemoryToStack(Register base, RegList regs);
+  void CopyRegistersFromStackToMemory(Register base,
+                                      Register scratch,
+                                      RegList regs);
+  void DebugBreak();
+#endif
+
+
+  // ---------------------------------------------------------------------------
   // Exception handling
 
   // Push a new try handler and link into try handler chain.
-  // The return address must be passed in register lr.
-  // On exit, r0 contains TOS (code slot).
+  // The return address must be passed in register ra.
   void PushTryHandler(CodeLocation try_location, HandlerType type);
 
   // Unlink the stack handler on top of the stack from the try handler chain.
@@ -240,6 +290,10 @@
   // ---------------------------------------------------------------------------
   // Support functions.
 
+  void GetObjectType(Register function,
+                     Register map,
+                     Register type_reg);
+
   inline void BranchOnSmi(Register value, Label* smi_label,
                           Register scratch = at) {
     ASSERT_EQ(0, kSmiTag);
@@ -255,6 +309,15 @@
     Branch(ne, not_smi_label, scratch, Operand(zero_reg));
   }
 
+  void CallBuiltin(ExternalReference builtin_entry);
+  void CallBuiltin(Register target);
+  void JumpToBuiltin(ExternalReference builtin_entry);
+  void JumpToBuiltin(Register target);
+
+  // Generates code for reporting that an illegal operation has
+  // occurred.
+  void IllegalOperation(int num_arguments);
+
 
   // ---------------------------------------------------------------------------
   // Runtime calls
@@ -268,21 +331,25 @@
   void StubReturn(int argc);
 
   // Call a runtime routine.
-  // Eventually this should be used for all C calls.
   void CallRuntime(Runtime::Function* f, int num_arguments);
 
   // Convenience function: Same as above, but takes the fid instead.
   void CallRuntime(Runtime::FunctionId fid, int num_arguments);
 
   // Tail call of a runtime routine (jump).
-  // Like JumpToRuntime, but also takes care of passing the number
+  // Like JumpToExternalReference, but also takes care of passing the number
   // of parameters.
-  void TailCallRuntime(const ExternalReference& ext,
+  void TailCallExternalReference(const ExternalReference& ext,
+                                 int num_arguments,
+                                 int result_size);
+
+  // Convenience function: tail call a runtime routine (jump).
+  void TailCallRuntime(Runtime::FunctionId fid,
                        int num_arguments,
                        int result_size);
 
   // Jump to the builtin routine.
-  void JumpToRuntime(const ExternalReference& builtin);
+  void JumpToExternalReference(const ExternalReference& builtin);
 
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
@@ -339,20 +406,33 @@
   bool allow_stub_calls() { return allow_stub_calls_; }
 
  private:
-  void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
-            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
-  void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
-            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
-
-  // Get the code for the given builtin. Returns if able to resolve
-  // the function in the 'resolved' flag.
-  Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
-
   List<Unresolved> unresolved_;
   bool generating_stub_;
   bool allow_stub_calls_;
   // This handle will be patched with the code object on installation.
   Handle<Object> code_object_;
+
+  void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
+            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+  void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
+            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+
+  // Helper functions for generating invokes.
+  void InvokePrologue(const ParameterCount& expected,
+                      const ParameterCount& actual,
+                      Handle<Code> code_constant,
+                      Register code_reg,
+                      Label* done,
+                      InvokeFlag flag);
+
+  // Get the code for the given builtin. Returns if able to resolve
+  // the function in the 'resolved' flag.
+  Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
+
+  // Activation support.
+  // EnterFrame clobbers t0 and t1.
+  void EnterFrame(StackFrame::Type type);
+  void LeaveFrame(StackFrame::Type type);
 };
 
 
diff --git a/src/mips/simulator-mips.cc b/src/mips/simulator-mips.cc
index 2e2dc86..bdb3b7f 100644
--- a/src/mips/simulator-mips.cc
+++ b/src/mips/simulator-mips.cc
@@ -31,7 +31,7 @@
 
 #include "disasm.h"
 #include "assembler.h"
-#include "globals.h"    // Need the bit_cast
+#include "globals.h"    // Need the BitCast
 #include "mips/constants-mips.h"
 #include "mips/simulator-mips.h"
 
@@ -139,7 +139,7 @@
   sim_->set_pc(sim_->get_pc() + Instruction::kInstructionSize);
   Debug();
 }
-#endif  // def GENERATED_CODE_COVERAGE
+#endif  // GENERATED_CODE_COVERAGE
 
 
 int32_t Debugger::GetRegisterValue(int regnum) {
@@ -604,7 +604,7 @@
 
 void Simulator::set_fpu_register_double(int fpureg, double value) {
   ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
-  *v8i::bit_cast<double*, int32_t*>(&FPUregisters_[fpureg]) = value;
+  *v8i::BitCast<double*, int32_t*>(&FPUregisters_[fpureg]) = value;
 }
 
 
@@ -625,7 +625,7 @@
 
 double Simulator::get_fpu_register_double(int fpureg) const {
   ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
-  return *v8i::bit_cast<double*, int32_t*>(
+  return *v8i::BitCast<double*, int32_t*>(
       const_cast<int32_t*>(&FPUregisters_[fpureg]));
 }
 
@@ -901,7 +901,7 @@
           break;
         case MFHC1:
           fp_out = get_fpu_register_double(fs_reg);
-          alu_out = *v8i::bit_cast<int32_t*, double*>(&fp_out);
+          alu_out = *v8i::BitCast<int32_t*, double*>(&fp_out);
           break;
         case MTC1:
         case MTHC1:
@@ -1644,5 +1644,5 @@
 
 } }  // namespace assembler::mips
 
-#endif  // !defined(__mips)
+#endif  // __mips
 
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index a87a49b..0b2d2c3 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -72,20 +72,6 @@
 }
 
 
-// Generate code to load the length from a string object and return the length.
-// If the receiver object is not a string or a wrapped string object the
-// execution continues at the miss label. The register containing the
-// receiver is potentially clobbered.
-void StubCompiler::GenerateLoadStringLength2(MacroAssembler* masm,
-                                             Register receiver,
-                                             Register scratch1,
-                                             Register scratch2,
-                                             Label* miss) {
-  UNIMPLEMENTED_MIPS();
-  __ break_(0x249);
-}
-
-
 void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
                                                  Register receiver,
                                                  Register scratch1,
@@ -99,7 +85,6 @@
 // After executing generated code, the receiver_reg and name_reg
 // may be clobbered.
 void StubCompiler::GenerateStoreField(MacroAssembler* masm,
-                                      Builtins::Name storage_extend,
                                       JSObject* object,
                                       int index,
                                       Map* transition,
@@ -120,18 +105,6 @@
 #define __ ACCESS_MASM(masm())
 
 
-Register StubCompiler::CheckPrototypes(JSObject* object,
-                                       Register object_reg,
-                                       JSObject* holder,
-                                       Register holder_reg,
-                                       Register scratch,
-                                       String* name,
-                                       Label* miss) {
-  UNIMPLEMENTED_MIPS();
-  return at;    // UNIMPLEMENTED RETURN
-}
-
-
 void StubCompiler::GenerateLoadField(JSObject* object,
                                      JSObject* holder,
                                      Register receiver,
@@ -187,15 +160,58 @@
 
 
 Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
+  // Registers:
+  // a1: function
+  // ra: return address
+
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+  // Preserve the function.
+  __ Push(a1);
+  // Setup aligned call.
+  __ SetupAlignedCall(t0, 1);
+  // Push the function on the stack as the argument to the runtime function.
+  __ Push(a1);
+  // Call the runtime function
+  __ CallRuntime(Runtime::kLazyCompile, 1);
+  __ ReturnFromAlignedCall();
+  // Calculate the entry point.
+  __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
+  // Restore saved function.
+  __ Pop(a1);
+  // Tear down temporary frame.
+  __ LeaveInternalFrame();
+  // Do a tail-call of the compiled function.
+  __ Jump(t9);
+
+  return GetCodeWithFlags(flags, "LazyCompileStub");
+}
+
+
+Object* CallStubCompiler::CompileCallField(JSObject* object,
+                                           JSObject* holder,
+                                           int index,
+                                           String* name) {
   UNIMPLEMENTED_MIPS();
   return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
 }
 
 
-Object* CallStubCompiler::CompileCallField(Object* object,
-                                           JSObject* holder,
-                                           int index,
-                                           String* name) {
+Object* CallStubCompiler::CompileArrayPushCall(Object* object,
+                                               JSObject* holder,
+                                               JSFunction* function,
+                                               String* name,
+                                               CheckType check) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* CallStubCompiler::CompileArrayPopCall(Object* object,
+                                              JSObject* holder,
+                                              JSFunction* function,
+                                              String* name,
+                                              CheckType check) {
   UNIMPLEMENTED_MIPS();
   return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
 }
@@ -211,7 +227,7 @@
 }
 
 
-Object* CallStubCompiler::CompileCallInterceptor(Object* object,
+Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
                                                  JSObject* holder,
                                                  String* name) {
   UNIMPLEMENTED_MIPS();
diff --git a/src/mips/virtual-frame-mips.cc b/src/mips/virtual-frame-mips.cc
index fad7ec4..c2116de 100644
--- a/src/mips/virtual-frame-mips.cc
+++ b/src/mips/virtual-frame-mips.cc
@@ -32,6 +32,7 @@
 #include "codegen-inl.h"
 #include "register-allocator-inl.h"
 #include "scopes.h"
+#include "virtual-frame-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -41,17 +42,6 @@
 
 #define __ ACCESS_MASM(masm())
 
-
-// On entry to a function, the virtual frame already contains the
-// receiver and the parameters.  All initial frame elements are in
-// memory.
-VirtualFrame::VirtualFrame()
-    : elements_(parameter_count() + local_count() + kPreallocatedElements),
-      stack_pointer_(parameter_count()) {  // 0-based index of TOS.
-  UNIMPLEMENTED_MIPS();
-}
-
-
 void VirtualFrame::SyncElementBelowStackPointer(int index) {
   UNREACHABLE();
 }
@@ -63,7 +53,12 @@
 
 
 void VirtualFrame::SyncRange(int begin, int end) {
-  UNIMPLEMENTED_MIPS();
+  // All elements are in memory on MIPS (ie, synced).
+#ifdef DEBUG
+  for (int i = begin; i <= end; i++) {
+    ASSERT(elements_[i].is_synced());
+  }
+#endif
 }
 
 
@@ -73,7 +68,13 @@
 
 
 void VirtualFrame::Enter() {
-  UNIMPLEMENTED_MIPS();
+  // TODO(MIPS): Implement DEBUG
+
+  // We are about to push four values to the frame.
+  Adjust(4);
+  __ MultiPush(ra.bit() | fp.bit() | cp.bit() | a1.bit());
+  // Adjust FP to point to saved FP.
+  __ addiu(fp, sp, 2 * kPointerSize);
 }
 
 
@@ -83,7 +84,17 @@
 
 
 void VirtualFrame::AllocateStackSlots() {
-  UNIMPLEMENTED_MIPS();
+  int count = local_count();
+  if (count > 0) {
+    Comment cmnt(masm(), "[ Allocate space for locals");
+    Adjust(count);
+      // Initialize stack slots with 'undefined' value.
+    __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+    __ addiu(sp, sp, -count * kPointerSize);
+    for (int i = 0; i < count; i++) {
+      __ sw(t0, MemOperand(sp, (count-i-1)*kPointerSize));
+    }
+  }
 }
 
 
@@ -138,12 +149,16 @@
 
 
 void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
-  UNIMPLEMENTED_MIPS();
+  PrepareForCall(arg_count, arg_count);
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ CallRuntime(f, arg_count);
 }
 
 
 void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
-  UNIMPLEMENTED_MIPS();
+  PrepareForCall(arg_count, arg_count);
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ CallRuntime(id, arg_count);
 }
 
 
@@ -165,16 +180,37 @@
 }
 
 
-void VirtualFrame::RawCallCodeObject(Handle<Code> code,
-                                       RelocInfo::Mode rmode) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
 void VirtualFrame::CallCodeObject(Handle<Code> code,
                                   RelocInfo::Mode rmode,
                                   int dropped_args) {
-  UNIMPLEMENTED_MIPS();
+  switch (code->kind()) {
+    case Code::CALL_IC:
+      break;
+    case Code::FUNCTION:
+      UNIMPLEMENTED_MIPS();
+      break;
+    case Code::KEYED_LOAD_IC:
+      UNIMPLEMENTED_MIPS();
+      break;
+    case Code::LOAD_IC:
+      UNIMPLEMENTED_MIPS();
+      break;
+    case Code::KEYED_STORE_IC:
+      UNIMPLEMENTED_MIPS();
+      break;
+    case Code::STORE_IC:
+      UNIMPLEMENTED_MIPS();
+      break;
+    case Code::BUILTIN:
+      UNIMPLEMENTED_MIPS();
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  Forget(dropped_args);
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ Call(code, rmode);
 }
 
 
@@ -197,7 +233,24 @@
 
 
 void VirtualFrame::Drop(int count) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(count >= 0);
+  ASSERT(height() >= count);
+  int num_virtual_elements = (element_count() - 1) - stack_pointer_;
+
+  // Emit code to lower the stack pointer if necessary.
+  if (num_virtual_elements < count) {
+    int num_dropped = count - num_virtual_elements;
+    stack_pointer_ -= num_dropped;
+    __ addiu(sp, sp, num_dropped * kPointerSize);
+  }
+
+  // Discard elements from the virtual frame and free any registers.
+  for (int i = 0; i < count; i++) {
+    FrameElement dropped = elements_.RemoveLast();
+    if (dropped.is_register()) {
+      Unuse(dropped.reg());
+    }
+  }
 }
 
 
@@ -209,27 +262,50 @@
 Result VirtualFrame::Pop() {
   UNIMPLEMENTED_MIPS();
   Result res = Result();
-  return res;    // UNIMPLEMENTED RETUR
+  return res;    // UNIMPLEMENTED RETURN
 }
 
 
 void VirtualFrame::EmitPop(Register reg) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(stack_pointer_ == element_count() - 1);
+  stack_pointer_--;
+  elements_.RemoveLast();
+  __ Pop(reg);
 }
 
+
 void VirtualFrame::EmitMultiPop(RegList regs) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(stack_pointer_ == element_count() - 1);
+  for (int16_t i = 0; i < kNumRegisters; i++) {
+    if ((regs & (1 << i)) != 0) {
+      stack_pointer_--;
+      elements_.RemoveLast();
+    }
+  }
+  __ MultiPop(regs);
 }
 
 
 void VirtualFrame::EmitPush(Register reg) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(stack_pointer_ == element_count() - 1);
+  elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown()));
+  stack_pointer_++;
+  __ Push(reg);
 }
 
+
 void VirtualFrame::EmitMultiPush(RegList regs) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(stack_pointer_ == element_count() - 1);
+  for (int16_t i = kNumRegisters; i > 0; i--) {
+    if ((regs & (1 << i)) != 0) {
+      elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown()));
+      stack_pointer_++;
+    }
+  }
+  __ MultiPush(regs);
 }
 
+
 void VirtualFrame::EmitArgumentSlots(RegList reglist) {
   UNIMPLEMENTED_MIPS();
 }
diff --git a/src/mips/virtual-frame-mips.h b/src/mips/virtual-frame-mips.h
index 79f973f..b32e2ae 100644
--- a/src/mips/virtual-frame-mips.h
+++ b/src/mips/virtual-frame-mips.h
@@ -39,18 +39,18 @@
 // -------------------------------------------------------------------------
 // Virtual frames
 //
-// The virtual frame is an abstraction of the physical stack frame.  It
+// The virtual frame is an abstraction of the physical stack frame. It
 // encapsulates the parameters, frame-allocated locals, and the expression
-// stack.  It supports push/pop operations on the expression stack, as well
+// stack. It supports push/pop operations on the expression stack, as well
 // as random access to the expression stack elements, locals, and
 // parameters.
 
 class VirtualFrame : public ZoneObject {
  public:
   // A utility class to introduce a scope where the virtual frame is
-  // expected to remain spilled.  The constructor spills the code
+  // expected to remain spilled. The constructor spills the code
   // generator's current frame, but no attempt is made to require it
-  // to stay spilled.  It is intended as documentation while the code
+  // to stay spilled. It is intended as documentation while the code
   // generator is being transformed.
   class SpilledScope BASE_EMBEDDED {
    public:
@@ -61,16 +61,17 @@
   static const int kIllegalIndex = -1;
 
   // Construct an initial virtual frame on entry to a JS function.
-  VirtualFrame();
+  inline VirtualFrame();
 
   // Construct a virtual frame as a clone of an existing one.
-  explicit VirtualFrame(VirtualFrame* original);
+  explicit inline VirtualFrame(VirtualFrame* original);
 
   CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
   MacroAssembler* masm() { return cgen()->masm(); }
 
   // Create a duplicate of an existing valid frame element.
-  FrameElement CopyElementAt(int index);
+  FrameElement CopyElementAt(int index,
+                             NumberInfo info = NumberInfo::Unknown());
 
   // The number of elements on the virtual frame.
   int element_count() { return elements_.length(); }
@@ -104,12 +105,12 @@
   }
 
   // Add extra in-memory elements to the top of the frame to match an actual
-  // frame (eg, the frame after an exception handler is pushed).  No code is
+  // frame (eg, the frame after an exception handler is pushed). No code is
   // emitted.
   void Adjust(int count);
 
   // Forget elements from the top of the frame to match an actual frame (eg,
-  // the frame after a runtime call).  No code is emitted.
+  // the frame after a runtime call). No code is emitted.
   void Forget(int count) {
     ASSERT(count >= 0);
     ASSERT(stack_pointer_ == element_count() - 1);
@@ -120,7 +121,7 @@
   }
 
   // Forget count elements from the top of the frame and adjust the stack
-  // pointer downward.  This is used, for example, before merging frames at
+  // pointer downward. This is used, for example, before merging frames at
   // break, continue, and return targets.
   void ForgetElements(int count);
 
@@ -132,24 +133,24 @@
     if (is_used(reg)) SpillElementAt(register_location(reg));
   }
 
-  // Spill all occurrences of an arbitrary register if possible.  Return the
+  // Spill all occurrences of an arbitrary register if possible. Return the
   // register spilled or no_reg if it was not possible to free any register
   // (ie, they all have frame-external references).
   Register SpillAnyRegister();
 
   // Prepare this virtual frame for merging to an expected frame by
   // performing some state changes that do not require generating
-  // code.  It is guaranteed that no code will be generated.
+  // code. It is guaranteed that no code will be generated.
   void PrepareMergeTo(VirtualFrame* expected);
 
   // Make this virtual frame have a state identical to an expected virtual
-  // frame.  As a side effect, code may be emitted to make this frame match
+  // frame. As a side effect, code may be emitted to make this frame match
   // the expected one.
   void MergeTo(VirtualFrame* expected);
 
-  // Detach a frame from its code generator, perhaps temporarily.  This
+  // Detach a frame from its code generator, perhaps temporarily. This
   // tells the register allocator that it is free to use frame-internal
-  // registers.  Used when the code generator's frame is switched from this
+  // registers. Used when the code generator's frame is switched from this
   // one to NULL by an unconditional jump.
   void DetachFromCodeGenerator() {
     RegisterAllocator* cgen_allocator = cgen()->allocator();
@@ -158,7 +159,7 @@
     }
   }
 
-  // (Re)attach a frame to its code generator.  This informs the register
+  // (Re)attach a frame to its code generator. This informs the register
   // allocator that the frame-internal register references are active again.
   // Used when a code generator's frame is switched from NULL to this one by
   // binding a label.
@@ -169,17 +170,17 @@
     }
   }
 
-  // Emit code for the physical JS entry and exit frame sequences.  After
+  // Emit code for the physical JS entry and exit frame sequences. After
   // calling Enter, the virtual frame is ready for use; and after calling
-  // Exit it should not be used.  Note that Enter does not allocate space in
+  // Exit it should not be used. Note that Enter does not allocate space in
   // the physical frame for storing frame-allocated locals.
   void Enter();
   void Exit();
 
   // Prepare for returning from the frame by spilling locals and
-  // dropping all non-locals elements in the virtual frame.  This
+  // dropping all non-locals elements in the virtual frame. This
   // avoids generating unnecessary merge code when jumping to the
-  // shared return site.  Emits code for spills.
+  // shared return site. Emits code for spills.
   void PrepareForReturn();
 
   // Allocate and initialize the frame-allocated locals.
@@ -193,11 +194,11 @@
     return MemOperand(sp, index * kPointerSize);
   }
 
-  // Random-access store to a frame-top relative frame element.  The result
+  // Random-access store to a frame-top relative frame element. The result
   // becomes owned by the frame and is invalidated.
   void SetElementAt(int index, Result* value);
 
-  // Set a frame element to a constant.  The index is frame-top relative.
+  // Set a frame element to a constant. The index is frame-top relative.
   void SetElementAt(int index, Handle<Object> value) {
     Result temp(value);
     SetElementAt(index, &temp);
@@ -220,13 +221,13 @@
   }
 
   // Push the value of a local frame slot on top of the frame and invalidate
-  // the local slot.  The slot should be written to before trying to read
+  // the local slot. The slot should be written to before trying to read
   // from it again.
   void TakeLocalAt(int index) {
     TakeFrameSlotAt(local0_index() + index);
   }
 
-  // Store the top value on the virtual frame into a local frame slot.  The
+  // Store the top value on the virtual frame into a local frame slot. The
   // value is left in place on top of the frame.
   void StoreToLocalAt(int index) {
     StoreToFrameSlotAt(local0_index() + index);
@@ -266,7 +267,7 @@
   }
 
   // Push the value of a paramter frame slot on top of the frame and
-  // invalidate the parameter slot.  The slot should be written to before
+  // invalidate the parameter slot. The slot should be written to before
   // trying to read from it again.
   void TakeParameterAt(int index) {
     TakeFrameSlotAt(param0_index() + index);
@@ -291,12 +292,8 @@
     RawCallStub(stub);
   }
 
-  // Call stub that expects its argument in r0.  The argument is given
-  // as a result which must be the register r0.
   void CallStub(CodeStub* stub, Result* arg);
 
-  // Call stub that expects its arguments in r1 and r0.  The arguments
-  // are given as results which must be the appropriate registers.
   void CallStub(CodeStub* stub, Result* arg0, Result* arg1);
 
   // Call runtime given the number of arguments expected on (and
@@ -316,7 +313,7 @@
                      int arg_count);
 
   // Call into an IC stub given the number of arguments it removes
-  // from the stack.  Register arguments are passed as results and
+  // from the stack. Register arguments are passed as results and
   // consumed by the call.
   void CallCodeObject(Handle<Code> ic,
                       RelocInfo::Mode rmode,
@@ -332,8 +329,8 @@
                       int dropped_args,
                       bool set_auto_args_slots = false);
 
-  // Drop a number of elements from the top of the expression stack.  May
-  // emit code to affect the physical frame.  Does not clobber any registers
+  // Drop a number of elements from the top of the expression stack. May
+  // emit code to affect the physical frame. Does not clobber any registers
   // excepting possibly the stack pointer.
   void Drop(int count);
   // Similar to VirtualFrame::Drop but we don't modify the actual stack.
@@ -347,7 +344,7 @@
   // Duplicate the top element of the frame.
   void Dup() { PushFrameSlotAt(element_count() - 1); }
 
-  // Pop an element from the top of the expression stack.  Returns a
+  // Pop an element from the top of the expression stack. Returns a
   // Result, which may be a constant or a register.
   Result Pop();
 
@@ -355,20 +352,20 @@
   // emit a corresponding pop instruction.
   void EmitPop(Register reg);
   // Same but for multiple registers
-  void EmitMultiPop(RegList regs);  // higher indexed registers popped first
-  void EmitMultiPopReversed(RegList regs);  // lower first
+  void EmitMultiPop(RegList regs);
+  void EmitMultiPopReversed(RegList regs);
 
   // Push an element on top of the expression stack and emit a
   // corresponding push instruction.
   void EmitPush(Register reg);
   // Same but for multiple registers.
-  void EmitMultiPush(RegList regs);  // lower indexed registers are pushed first
-  void EmitMultiPushReversed(RegList regs);  // higher first
+  void EmitMultiPush(RegList regs);
+  void EmitMultiPushReversed(RegList regs);
 
   // Push an element on the virtual frame.
-  void Push(Register reg);
-  void Push(Handle<Object> value);
-  void Push(Smi* value) { Push(Handle<Object>(value)); }
+  inline void Push(Register reg, NumberInfo info = NumberInfo::Unknown());
+  inline void Push(Handle<Object> value);
+  inline void Push(Smi* value);
 
   // Pushing a result invalidates it (its contents become owned by the frame).
   void Push(Result* result) {
@@ -383,13 +380,16 @@
 
   // Nip removes zero or more elements from immediately below the top
   // of the frame, leaving the previous top-of-frame value on top of
-  // the frame.  Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
-  void Nip(int num_dropped);
+  // the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
+  inline void Nip(int num_dropped);
 
   // This pushes 4 arguments slots on the stack and saves asked 'a' registers
   // 'a' registers are arguments register a0 to a3.
   void EmitArgumentSlots(RegList reglist);
 
+  inline void SetTypeForLocalAt(int index, NumberInfo info);
+  inline void SetTypeForParamAt(int index, NumberInfo info);
+
  private:
   static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
   static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
@@ -413,23 +413,23 @@
   int local_count() { return cgen()->scope()->num_stack_slots(); }
 
   // The index of the element that is at the processor's frame pointer
-  // (the fp register).  The parameters, receiver, function, and context
+  // (the fp register). The parameters, receiver, function, and context
   // are below the frame pointer.
   int frame_pointer() { return parameter_count() + 3; }
 
-  // The index of the first parameter.  The receiver lies below the first
+  // The index of the first parameter. The receiver lies below the first
   // parameter.
   int param0_index() { return 1; }
 
-  // The index of the context slot in the frame.  It is immediately
+  // The index of the context slot in the frame. It is immediately
   // below the frame pointer.
   int context_index() { return frame_pointer() - 1; }
 
-  // The index of the function slot in the frame.  It is below the frame
+  // The index of the function slot in the frame. It is below the frame
   // pointer and context slot.
   int function_index() { return frame_pointer() - 2; }
 
-  // The index of the first local.  Between the frame pointer and the
+  // The index of the first local. Between the frame pointer and the
   // locals lies the return address.
   int local0_index() { return frame_pointer() + 2; }
 
@@ -444,7 +444,7 @@
     return (frame_pointer() - index) * kPointerSize;
   }
 
-  // Record an occurrence of a register in the virtual frame.  This has the
+  // Record an occurrence of a register in the virtual frame. This has the
   // effect of incrementing the register's external reference count and
   // of updating the index of the register's location in the frame.
   void Use(Register reg, int index) {
@@ -453,7 +453,7 @@
     cgen()->allocator()->Use(reg);
   }
 
-  // Record that a register reference has been dropped from the frame.  This
+  // Record that a register reference has been dropped from the frame. This
   // decrements the register's external reference count and invalidates the
   // index of the register's location in the frame.
   void Unuse(Register reg) {
@@ -467,7 +467,7 @@
   // constant.
   void SpillElementAt(int index);
 
-  // Sync the element at a particular index.  If it is a register or
+  // Sync the element at a particular index. If it is a register or
   // constant that disagrees with the value on the stack, write it to memory.
   // Keep the element type as register or constant, and clear the dirty bit.
   void SyncElementAt(int index);
@@ -483,7 +483,7 @@
 
   // Push a copy of a frame slot (typically a local or parameter) on top of
   // the frame.
-  void PushFrameSlotAt(int index);
+  inline void PushFrameSlotAt(int index);
 
   // Push a the value of a frame slot (typically a local or parameter) on
   // top of the frame and invalidate the slot.
@@ -494,7 +494,7 @@
   void StoreToFrameSlotAt(int index);
 
   // Spill all elements in registers. Spill the top spilled_args elements
-  // on the frame.  Sync all other frame elements.
+  // on the frame. Sync all other frame elements.
   // Then drop dropped_args elements from the virtual frame, to match
   // the effect of an upcoming call that will drop them from the stack.
   void PrepareForCall(int spilled_args, int dropped_args);
@@ -515,14 +515,14 @@
   // Make the memory-to-register and constant-to-register moves
   // needed to make this frame equal the expected frame.
   // Called after all register-to-memory and register-to-register
-  // moves have been made.  After this function returns, the frames
+  // moves have been made. After this function returns, the frames
   // should be equal.
   void MergeMoveMemoryToRegisters(VirtualFrame* expected);
 
   // Invalidates a frame slot (puts an invalid frame element in it).
   // Copies on the frame are correctly handled, and if this slot was
   // the backing store of copies, the index of the new backing store
-  // is returned.  Otherwise, returns kIllegalIndex.
+  // is returned. Otherwise, returns kIllegalIndex.
   // Register counts are correctly updated.
   int InvalidateFrameSlotAt(int index);
 
@@ -534,7 +534,7 @@
   // (via PrepareForCall).
   void RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
 
-  bool Equals(VirtualFrame* other);
+  inline bool Equals(VirtualFrame* other);
 
   // Classes that need raw access to the elements_ array.
   friend class DeferredCode;
diff --git a/src/mirror-debugger.js b/src/mirror-debugger.js
index dfe297b..29d0069 100644
--- a/src/mirror-debugger.js
+++ b/src/mirror-debugger.js
@@ -67,7 +67,7 @@
       }
     }
   }
-  
+
   if (IS_UNDEFINED(value)) {
     mirror = new UndefinedMirror();
   } else if (IS_NULL(value)) {
@@ -110,7 +110,7 @@
   return mirror_cache_[handle];
 }
 
-  
+
 /**
  * Returns the mirror for the undefined value.
  *
@@ -622,7 +622,7 @@
   var propertyNames;
   var elementNames;
   var total = 0;
-  
+
   // Find all the named properties.
   if (kind & PropertyKind.Named) {
     // Get the local property names.
@@ -1223,7 +1223,7 @@
 /**
  * Returns whether this property is natively implemented by the host or a set
  * through JavaScript code.
- * @return {boolean} True if the property is 
+ * @return {boolean} True if the property is
  *     UndefinedMirror if there is no setter for this property
  */
 PropertyMirror.prototype.isNative = function() {
@@ -1390,7 +1390,7 @@
 FrameMirror.prototype.func = function() {
   // Get the function for this frame from the VM.
   var f = this.details_.func();
-  
+
   // Create a function mirror. NOTE: MakeMirror cannot be used here as the
   // value returned from the VM might be a string if the function for the
   // frame is unresolved.
@@ -1728,8 +1728,7 @@
 
 
 ScriptMirror.prototype.name = function() {
-  // If we have name, we trust it more than sourceURL from comments
-  return this.script_.name || this.sourceUrlFromComment_();
+  return this.script_.name || this.script_.nameOrSourceURL();
 };
 
 
@@ -1825,29 +1824,6 @@
 
 
 /**
- * Returns a suggested script URL from comments in script code (if found), 
- * undefined otherwise. Used primarily by debuggers for identifying eval()'ed
- * scripts. See 
- * http://fbug.googlecode.com/svn/branches/firebug1.1/docs/ReleaseNotes_1.1.txt
- * for details.
- * 
- * @return {?string} value for //@ sourceURL comment
- */
-ScriptMirror.prototype.sourceUrlFromComment_ = function() {
-  if (!('sourceUrl_' in this) && this.source()) {
-    // TODO(608): the spaces in a regexp below had to be escaped as \040 
-    // because this file is being processed by js2c whose handling of spaces
-    // in regexps is broken.
-    // We're not using \s here to prevent \n from matching.
-    var sourceUrlPattern = /\/\/@[\040\t]sourceURL=[\040\t]*(\S+)[\040\t]*$/m;
-    var match = sourceUrlPattern.exec(this.source());
-    this.sourceUrl_ = match ? match[1] : undefined;
-  }
-  return this.sourceUrl_;
-};
-
-
-/**
  * Mirror object for context.
  * @param {Object} data The context data
  * @constructor
@@ -1928,10 +1904,10 @@
 JSONProtocolSerializer.prototype.serializeReferencedObjects = function() {
   // Collect the protocol representation of the referenced objects in an array.
   var content = [];
-  
+
   // Get the number of referenced objects.
   var count = this.mirrors_.length;
-  
+
   for (var i = 0; i < count; i++) {
     content.push(this.serialize_(this.mirrors_[i], false, false));
   }
@@ -1966,7 +1942,7 @@
       return;
     }
   }
-  
+
   // Add the mirror to the list of mirrors to be serialized.
   this.mirrors_.push(mirror);
 }
@@ -1978,7 +1954,7 @@
  * @param {Mirror} mirror Mirror to serialize.
  * @return {Object} Protocol reference object.
  */
-JSONProtocolSerializer.prototype.serializeReferenceWithDisplayData_ = 
+JSONProtocolSerializer.prototype.serializeReferenceWithDisplayData_ =
     function(mirror) {
   var o = {};
   o.ref = mirror.handle();
@@ -2025,7 +2001,7 @@
       return {'ref' : mirror.handle()};
     }
   }
-  
+
   // Collect the JSON property/value pairs.
   var content = {};
 
@@ -2137,7 +2113,7 @@
 
   // Always add the text representation.
   content.text = mirror.toText();
-  
+
   // Create and return the JSON string.
   return content;
 }
@@ -2170,7 +2146,7 @@
   if (mirror.hasIndexedInterceptor()) {
     content.indexedInterceptor = true;
   }
-  
+
   // Add function specific properties.
   if (mirror.isFunction()) {
     // Add function specific properties.
@@ -2185,7 +2161,7 @@
     if (mirror.script()) {
       content.script = this.serializeReference(mirror.script());
       content.scriptId = mirror.script().id();
-      
+
       serializeLocationFields(mirror.sourceLocation(), content);
     }
   }
@@ -2224,13 +2200,13 @@
  *   "position":"<position>",
  *   "line":"<line>",
  *   "column":"<column>",
- * 
+ *
  * @param {SourceLocation} location The location to serialize, may be undefined.
  */
 function serializeLocationFields (location, content) {
   if (!location) {
     return;
-  }                                                                     
+  }
   content.position = location.position;
   var line = location.line;
   if (!IS_UNDEFINED(line)) {
@@ -2264,7 +2240,7 @@
  */
 JSONProtocolSerializer.prototype.serializeProperty_ = function(propertyMirror) {
   var result = {};
-  
+
   result.name = propertyMirror.name();
   var propertyValue = propertyMirror.value();
   if (this.inlineRefs_() && propertyValue.isValue()) {
@@ -2316,7 +2292,7 @@
   if (!IS_UNDEFINED(source_line_text)) {
     content.sourceLineText = source_line_text;
   }
-  
+
   content.scopes = [];
   for (var i = 0; i < mirror.scopeCount(); i++) {
     var scope = mirror.scope(i);
@@ -2358,5 +2334,5 @@
       return '-Infinity';
     }
   }
-  return value; 
+  return value;
 }
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 8f26f74..b0a3fd6 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -641,6 +641,24 @@
 }
 
 
+void CodeCache::CodeCachePrint() {
+  HeapObject::PrintHeader("CodeCache");
+  PrintF("\n - default_cache: ");
+  default_cache()->ShortPrint();
+  PrintF("\n - normal_type_cache: ");
+  normal_type_cache()->ShortPrint();
+}
+
+
+void CodeCache::CodeCacheVerify() {
+  VerifyHeapPointer(default_cache());
+  VerifyHeapPointer(normal_type_cache());
+  ASSERT(default_cache()->IsFixedArray());
+  ASSERT(normal_type_cache()->IsUndefined()
+         || normal_type_cache()->IsCodeCacheHashTable());
+}
+
+
 void FixedArray::FixedArrayPrint() {
   HeapObject::PrintHeader("FixedArray");
   PrintF(" - length: %d", length());
@@ -707,7 +725,6 @@
 void JSFunction::JSFunctionPrint() {
   HeapObject::PrintHeader("Function");
   PrintF(" - map = 0x%p\n", map());
-  PrintF(" - is boilerplate: %s\n", IsBoilerplate() ? "yes" : "no");
   PrintF(" - initial_map = ");
   if (has_initial_map()) {
     initial_map()->ShortPrint();
@@ -768,7 +785,7 @@
   VerifyObjectField(kNameOffset);
   VerifyObjectField(kCodeOffset);
   VerifyObjectField(kInstanceClassNameOffset);
-  VerifyObjectField(kExternalReferenceDataOffset);
+  VerifyObjectField(kFunctionDataOffset);
   VerifyObjectField(kScriptOffset);
   VerifyObjectField(kDebugInfoOffset);
 }
@@ -1311,6 +1328,32 @@
 }
 
 
+void JSFunctionResultCache::JSFunctionResultCacheVerify() {
+  JSFunction::cast(get(kFactoryIndex))->Verify();
+
+  int size = Smi::cast(get(kCacheSizeIndex))->value();
+  ASSERT(kEntriesIndex <= size);
+  ASSERT(size <= length());
+  ASSERT_EQ(0, size % kEntrySize);
+
+  int finger = Smi::cast(get(kFingerIndex))->value();
+  ASSERT(kEntriesIndex <= finger);
+  ASSERT(finger < size || finger == kEntriesIndex);
+  ASSERT_EQ(0, finger % kEntrySize);
+
+  if (FLAG_enable_slow_asserts) {
+    for (int i = kEntriesIndex; i < size; i++) {
+      ASSERT(!get(i)->IsTheHole());
+      get(i)->Verify();
+    }
+    for (int i = size; i < length(); i++) {
+      ASSERT(get(i)->IsTheHole());
+      get(i)->Verify();
+    }
+  }
+}
+
+
 #endif  // DEBUG
 
 } }  // namespace v8::internal
diff --git a/src/objects-inl.h b/src/objects-inl.h
index cc971f3..ae7d2c2 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -255,6 +255,16 @@
 }
 
 
+bool String::IsExternalTwoByteStringWithAsciiChars() {
+  if (!IsExternalTwoByteString()) return false;
+  const uc16* data = ExternalTwoByteString::cast(this)->resource()->data();
+  for (int i = 0, len = length(); i < len; i++) {
+    if (data[i] > kMaxAsciiCharCode) return false;
+  }
+  return true;
+}
+
+
 bool StringShape::IsCons() {
   return (type_ & kStringRepresentationMask) == kConsStringTag;
 }
@@ -559,11 +569,32 @@
 }
 
 
+bool Object::IsJSFunctionResultCache() {
+  if (!IsFixedArray()) return false;
+  FixedArray* self = FixedArray::cast(this);
+  int length = self->length();
+  if (length < JSFunctionResultCache::kEntriesIndex) return false;
+  if ((length - JSFunctionResultCache::kEntriesIndex)
+      % JSFunctionResultCache::kEntrySize != 0) {
+    return false;
+  }
+#ifdef DEBUG
+  reinterpret_cast<JSFunctionResultCache*>(this)->JSFunctionResultCacheVerify();
+#endif
+  return true;
+}
+
+
 bool Object::IsCompilationCacheTable() {
   return IsHashTable();
 }
 
 
+bool Object::IsCodeCacheHashTable() {
+  return IsHashTable();
+}
+
+
 bool Object::IsMapCache() {
   return IsHashTable();
 }
@@ -727,7 +758,8 @@
   } else { \
     ASSERT(mode == SKIP_WRITE_BARRIER); \
     ASSERT(Heap::InNewSpace(object) || \
-           !Heap::InNewSpace(READ_FIELD(object, offset))); \
+           !Heap::InNewSpace(READ_FIELD(object, offset)) || \
+           Page::IsRSetSet(object->address(), offset)); \
   }
 
 #define READ_DOUBLE_FIELD(p, offset) \
@@ -840,15 +872,17 @@
 
 
 intptr_t Failure::value() const {
-  return reinterpret_cast<intptr_t>(this) >> kFailureTagSize;
+  return static_cast<intptr_t>(
+      reinterpret_cast<uintptr_t>(this) >> kFailureTagSize);
 }
 
 
 Failure* Failure::RetryAfterGC(int requested_bytes) {
   // Assert that the space encoding fits in the three bytes allotted for it.
   ASSERT((LAST_SPACE & ~kSpaceTagMask) == 0);
-  intptr_t requested = requested_bytes >> kObjectAlignmentBits;
-  int tag_bits = kSpaceTagSize + kFailureTypeTagSize;
+  uintptr_t requested =
+      static_cast<uintptr_t>(requested_bytes >> kObjectAlignmentBits);
+  int tag_bits = kSpaceTagSize + kFailureTypeTagSize + kFailureTagSize;
   if (((requested << tag_bits) >> tag_bits) != requested) {
     // No room for entire requested size in the bits. Round down to
     // maximally representable size.
@@ -861,7 +895,8 @@
 
 
 Failure* Failure::Construct(Type type, intptr_t value) {
-  intptr_t info = (static_cast<intptr_t>(value) << kFailureTypeTagSize) | type;
+  uintptr_t info =
+      (static_cast<uintptr_t>(value) << kFailureTypeTagSize) | type;
   ASSERT(((info << kFailureTagSize) >> kFailureTagSize) == info);
   return reinterpret_cast<Failure*>((info << kFailureTagSize) | kFailureTag);
 }
@@ -1113,6 +1148,17 @@
 }
 
 
+int HeapNumber::get_exponent() {
+  return ((READ_INT_FIELD(this, kExponentOffset) & kExponentMask) >>
+          kExponentShift) - kExponentBias;
+}
+
+
+int HeapNumber::get_sign() {
+  return READ_INT_FIELD(this, kExponentOffset) & kSignMask;
+}
+
+
 ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
 
 
@@ -1394,6 +1440,11 @@
 }
 
 
+Object** FixedArray::data_start() {
+  return HeapObject::RawField(this, kHeaderSize);
+}
+
+
 bool DescriptorArray::IsEmpty() {
   ASSERT(this == Heap::empty_descriptor_array() ||
          this->length() > 2);
@@ -1559,7 +1610,9 @@
 CAST_ACCESSOR(FixedArray)
 CAST_ACCESSOR(DescriptorArray)
 CAST_ACCESSOR(SymbolTable)
+CAST_ACCESSOR(JSFunctionResultCache)
 CAST_ACCESSOR(CompilationCacheTable)
+CAST_ACCESSOR(CodeCacheHashTable)
 CAST_ACCESSOR(MapCache)
 CAST_ACCESSOR(String)
 CAST_ACCESSOR(SeqString)
@@ -1615,7 +1668,7 @@
 INT_ACCESSORS(Array, length, kLengthOffset)
 
 
-INT_ACCESSORS(String, length, kLengthOffset)
+SMI_ACCESSORS(String, length, kLengthOffset)
 
 
 uint32_t String::hash_field() {
@@ -1637,13 +1690,11 @@
 }
 
 
-Object* String::TryFlattenIfNotFlat() {
+Object* String::TryFlatten(PretenureFlag pretenure) {
   // We don't need to flatten strings that are already flat.  Since this code
   // is inlined, it can be helpful in the flat case to not call out to Flatten.
-  if (!IsFlat()) {
-    return TryFlatten();
-  }
-  return this;
+  if (IsFlat()) return this;
+  return SlowTryFlatten(pretenure);
 }
 
 
@@ -1739,14 +1790,12 @@
 
 
 int SeqTwoByteString::SeqTwoByteStringSize(InstanceType instance_type) {
-  uint32_t length = READ_INT_FIELD(this, kLengthOffset);
-  return SizeFor(length);
+  return SizeFor(length());
 }
 
 
 int SeqAsciiString::SeqAsciiStringSize(InstanceType instance_type) {
-  uint32_t length = READ_INT_FIELD(this, kLengthOffset);
-  return SizeFor(length);
+  return SizeFor(length());
 }
 
 
@@ -1804,6 +1853,20 @@
 }
 
 
+void JSFunctionResultCache::MakeZeroSize() {
+  set(kFingerIndex, Smi::FromInt(kEntriesIndex));
+  set(kCacheSizeIndex, Smi::FromInt(kEntriesIndex));
+}
+
+
+void JSFunctionResultCache::Clear() {
+  int cache_size = Smi::cast(get(kCacheSizeIndex))->value();
+  Object** entries_start = RawField(this, OffsetOfElementAt(kEntriesIndex));
+  MemsetPointer(entries_start, Heap::the_hole_value(), cache_size);
+  MakeZeroSize();
+}
+
+
 byte ByteArray::get(int index) {
   ASSERT(index >= 0 && index < this->length());
   return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
@@ -2079,6 +2142,20 @@
 }
 
 
+void Map::set_function_with_prototype(bool value) {
+  if (value) {
+    set_bit_field2(bit_field2() | (1 << kFunctionWithPrototype));
+  } else {
+    set_bit_field2(bit_field2() & ~(1 << kFunctionWithPrototype));
+  }
+}
+
+
+bool Map::function_with_prototype() {
+  return ((1 << kFunctionWithPrototype) & bit_field2()) != 0;
+}
+
+
 void Map::set_is_access_check_needed(bool access_check_needed) {
   if (access_check_needed) {
     set_bit_field(bit_field() | (1 << kIsAccessCheckNeeded));
@@ -2143,14 +2220,14 @@
 
 
 CodeStub::Major Code::major_key() {
-  ASSERT(kind() == STUB);
+  ASSERT(kind() == STUB || kind() == BINARY_OP_IC);
   return static_cast<CodeStub::Major>(READ_BYTE_FIELD(this,
                                                       kStubMajorKeyOffset));
 }
 
 
 void Code::set_major_key(CodeStub::Major major) {
-  ASSERT(kind() == STUB);
+  ASSERT(kind() == STUB || kind() == BINARY_OP_IC);
   ASSERT(0 <= major && major < 256);
   WRITE_BYTE_FIELD(this, kStubMajorKeyOffset, major);
 }
@@ -2252,7 +2329,7 @@
 
 ACCESSORS(Map, instance_descriptors, DescriptorArray,
           kInstanceDescriptorsOffset)
-ACCESSORS(Map, code_cache, FixedArray, kCodeCacheOffset)
+ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
 ACCESSORS(Map, constructor, Object, kConstructorOffset)
 
 ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
@@ -2345,12 +2422,11 @@
 ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
 #endif
 
-ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
 ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
+ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
 ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
           kInstanceClassNameOffset)
-ACCESSORS(SharedFunctionInfo, function_data, Object,
-          kExternalReferenceDataOffset)
+ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
 ACCESSORS(SharedFunctionInfo, script, Object, kScriptOffset)
 ACCESSORS(SharedFunctionInfo, debug_info, Object, kDebugInfoOffset)
 ACCESSORS(SharedFunctionInfo, inferred_name, String, kInferredNameOffset)
@@ -2379,6 +2455,7 @@
               kFormalParameterCountOffset)
 INT_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
               kExpectedNofPropertiesOffset)
+INT_ACCESSORS(SharedFunctionInfo, num_literals, kNumLiteralsOffset)
 INT_ACCESSORS(SharedFunctionInfo, start_position_and_type,
               kStartPositionAndTypeOffset)
 INT_ACCESSORS(SharedFunctionInfo, end_position, kEndPositionOffset)
@@ -2390,6 +2467,9 @@
               kThisPropertyAssignmentsCountOffset)
 
 
+ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
+ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
+
 bool Script::HasValidSource() {
   Object* src = this->source();
   if (!src->IsString()) return true;
@@ -2438,8 +2518,19 @@
 }
 
 
-bool JSFunction::IsBoilerplate() {
-  return map() == Heap::boilerplate_function_map();
+bool SharedFunctionInfo::IsApiFunction() {
+  return function_data()->IsFunctionTemplateInfo();
+}
+
+
+FunctionTemplateInfo* SharedFunctionInfo::get_api_func_data() {
+  ASSERT(IsApiFunction());
+  return FunctionTemplateInfo::cast(function_data());
+}
+
+
+bool SharedFunctionInfo::HasCustomCallGenerator() {
+  return function_data()->IsProxy();
 }
 
 
@@ -2520,6 +2611,10 @@
   return instance_prototype();
 }
 
+bool JSFunction::should_have_prototype() {
+  return map()->function_with_prototype();
+}
+
 
 bool JSFunction::is_compiled() {
   return shared()->is_compiled();
@@ -2533,15 +2628,29 @@
 
 Object* JSBuiltinsObject::javascript_builtin(Builtins::JavaScript id) {
   ASSERT(0 <= id && id < kJSBuiltinsCount);
-  return READ_FIELD(this, kJSBuiltinsOffset + (id * kPointerSize));
+  return READ_FIELD(this, OffsetOfFunctionWithId(id));
 }
 
 
 void JSBuiltinsObject::set_javascript_builtin(Builtins::JavaScript id,
                                               Object* value) {
   ASSERT(0 <= id && id < kJSBuiltinsCount);
-  WRITE_FIELD(this, kJSBuiltinsOffset + (id * kPointerSize), value);
-  WRITE_BARRIER(this, kJSBuiltinsOffset + (id * kPointerSize));
+  WRITE_FIELD(this, OffsetOfFunctionWithId(id), value);
+  WRITE_BARRIER(this, OffsetOfFunctionWithId(id));
+}
+
+
+Code* JSBuiltinsObject::javascript_builtin_code(Builtins::JavaScript id) {
+  ASSERT(0 <= id && id < kJSBuiltinsCount);
+  return Code::cast(READ_FIELD(this, OffsetOfCodeWithId(id)));
+}
+
+
+void JSBuiltinsObject::set_javascript_builtin_code(Builtins::JavaScript id,
+                                                   Code* value) {
+  ASSERT(0 <= id && id < kJSBuiltinsCount);
+  WRITE_FIELD(this, OffsetOfCodeWithId(id), value);
+  ASSERT(!Heap::InNewSpace(value));
 }
 
 
@@ -2768,6 +2877,13 @@
 }
 
 
+bool JSObject::AllowsSetElementsLength() {
+  bool result = elements()->IsFixedArray();
+  ASSERT(result == (!HasPixelElements() && !HasExternalArrayElements()));
+  return result;
+}
+
+
 StringDictionary* JSObject::property_dictionary() {
   ASSERT(!HasFastProperties());
   return StringDictionary::cast(properties());
diff --git a/src/objects.cc b/src/objects.cc
index 99532ac..459c8aa 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -431,7 +431,7 @@
   if (!cons_obj->IsJSFunction())
     return true;
   JSFunction* fun = JSFunction::cast(cons_obj);
-  if (!fun->shared()->function_data()->IsFunctionTemplateInfo())
+  if (!fun->shared()->IsApiFunction())
     return true;
   // If the object is fully fast case and has the same map it was
   // created with then no changes can have been made to it.
@@ -618,7 +618,7 @@
 }
 
 
-Object* String::TryFlatten() {
+Object* String::SlowTryFlatten(PretenureFlag pretenure) {
 #ifdef DEBUG
   // Do not attempt to flatten in debug mode when allocation is not
   // allowed.  This is to avoid an assertion failure when allocating.
@@ -636,7 +636,7 @@
       // There's little point in putting the flat string in new space if the
       // cons string is in old space.  It can never get GCed until there is
       // an old space GC.
-      PretenureFlag tenure = Heap::InNewSpace(this) ? NOT_TENURED : TENURED;
+      PretenureFlag tenure = Heap::InNewSpace(this) ? pretenure : TENURED;
       int len = length();
       Object* object;
       String* result;
@@ -1189,8 +1189,7 @@
 
 String* JSObject::constructor_name() {
   if (IsJSFunction()) {
-    return JSFunction::cast(this)->IsBoilerplate() ?
-      Heap::function_class_symbol() : Heap::closure_symbol();
+    return Heap::closure_symbol();
   }
   if (map()->constructor()->IsJSFunction()) {
     JSFunction* constructor = JSFunction::cast(map()->constructor());
@@ -1935,6 +1934,7 @@
     // Neither properties nor transitions found.
     return AddProperty(name, value, attributes);
   }
+
   PropertyDetails details = PropertyDetails(attributes, NORMAL);
 
   // Check of IsReadOnly removed from here in clone.
@@ -2118,7 +2118,7 @@
     property_count += 2;  // Make space for two more properties.
   }
   Object* obj =
-      StringDictionary::Allocate(property_count * 2);
+      StringDictionary::Allocate(property_count);
   if (obj->IsFailure()) return obj;
   StringDictionary* dictionary = StringDictionary::cast(obj);
 
@@ -2518,9 +2518,8 @@
       break;
   }
 
-  // For functions check the context. Boilerplate functions do
-  // not have to be traversed since they have no real context.
-  if (IsJSFunction() && !JSFunction::cast(this)->IsBoilerplate()) {
+  // For functions check the context.
+  if (IsJSFunction()) {
     // Get the constructor function for arguments array.
     JSObject* arguments_boilerplate =
         Top::context()->global_context()->arguments_boilerplate();
@@ -2701,7 +2700,7 @@
   }
 
   // Try to flatten before operating on the string.
-  name->TryFlattenIfNotFlat();
+  name->TryFlatten();
 
   // Check if there is an API defined callback object which prohibits
   // callback overwriting in this object or it's prototype chain.
@@ -2966,19 +2965,79 @@
 
 
 Object* Map::UpdateCodeCache(String* name, Code* code) {
-  ASSERT(code->ic_state() == MONOMORPHIC);
-  FixedArray* cache = code_cache();
+  // Allocate the code cache if not present.
+  if (code_cache()->IsFixedArray()) {
+    Object* result = Heap::AllocateCodeCache();
+    if (result->IsFailure()) return result;
+    set_code_cache(result);
+  }
 
-  // When updating the code cache we disregard the type encoded in the
+  // Update the code cache.
+  return CodeCache::cast(code_cache())->Update(name, code);
+}
+
+
+Object* Map::FindInCodeCache(String* name, Code::Flags flags) {
+  // Do a lookup if a code cache exists.
+  if (!code_cache()->IsFixedArray()) {
+    return CodeCache::cast(code_cache())->Lookup(name, flags);
+  } else {
+    return Heap::undefined_value();
+  }
+}
+
+
+int Map::IndexInCodeCache(Object* name, Code* code) {
+  // Get the internal index if a code cache exists.
+  if (!code_cache()->IsFixedArray()) {
+    return CodeCache::cast(code_cache())->GetIndex(name, code);
+  }
+  return -1;
+}
+
+
+void Map::RemoveFromCodeCache(String* name, Code* code, int index) {
+  // No GC is supposed to happen between a call to IndexInCodeCache and
+  // RemoveFromCodeCache so the code cache must be there.
+  ASSERT(!code_cache()->IsFixedArray());
+  CodeCache::cast(code_cache())->RemoveByIndex(name, code, index);
+}
+
+
+Object* CodeCache::Update(String* name, Code* code) {
+  ASSERT(code->ic_state() == MONOMORPHIC);
+
+  // The number of monomorphic stubs for normal load/store/call IC's can grow to
+  // a large number and therefore they need to go into a hash table. They are
+  // used to load global properties from cells.
+  if (code->type() == NORMAL) {
+    // Make sure that a hash table is allocated for the normal load code cache.
+    if (normal_type_cache()->IsUndefined()) {
+      Object* result =
+          CodeCacheHashTable::Allocate(CodeCacheHashTable::kInitialSize);
+      if (result->IsFailure()) return result;
+      set_normal_type_cache(result);
+    }
+    return UpdateNormalTypeCache(name, code);
+  } else {
+    ASSERT(default_cache()->IsFixedArray());
+    return UpdateDefaultCache(name, code);
+  }
+}
+
+
+Object* CodeCache::UpdateDefaultCache(String* name, Code* code) {
+  // When updating the default code cache we disregard the type encoded in the
   // flags. This allows call constant stubs to overwrite call field
   // stubs, etc.
   Code::Flags flags = Code::RemoveTypeFromFlags(code->flags());
 
   // First check whether we can update existing code cache without
   // extending it.
+  FixedArray* cache = default_cache();
   int length = cache->length();
   int deleted_index = -1;
-  for (int i = 0; i < length; i += 2) {
+  for (int i = 0; i < length; i += kCodeCacheEntrySize) {
     Object* key = cache->get(i);
     if (key->IsNull()) {
       if (deleted_index < 0) deleted_index = i;
@@ -2986,14 +3045,15 @@
     }
     if (key->IsUndefined()) {
       if (deleted_index >= 0) i = deleted_index;
-      cache->set(i + 0, name);
-      cache->set(i + 1, code);
+      cache->set(i + kCodeCacheEntryNameOffset, name);
+      cache->set(i + kCodeCacheEntryCodeOffset, code);
       return this;
     }
     if (name->Equals(String::cast(key))) {
-      Code::Flags found = Code::cast(cache->get(i + 1))->flags();
+      Code::Flags found =
+          Code::cast(cache->get(i + kCodeCacheEntryCodeOffset))->flags();
       if (Code::RemoveTypeFromFlags(found) == flags) {
-        cache->set(i + 1, code);
+        cache->set(i + kCodeCacheEntryCodeOffset, code);
         return this;
       }
     }
@@ -3002,61 +3062,206 @@
   // Reached the end of the code cache.  If there were deleted
   // elements, reuse the space for the first of them.
   if (deleted_index >= 0) {
-    cache->set(deleted_index + 0, name);
-    cache->set(deleted_index + 1, code);
+    cache->set(deleted_index + kCodeCacheEntryNameOffset, name);
+    cache->set(deleted_index + kCodeCacheEntryCodeOffset, code);
     return this;
   }
 
-  // Extend the code cache with some new entries (at least one).
-  int new_length = length + ((length >> 1) & ~1) + 2;
-  ASSERT((new_length & 1) == 0);  // must be a multiple of two
+  // Extend the code cache with some new entries (at least one). Must be a
+  // multiple of the entry size.
+  int new_length = length + ((length >> 1)) + kCodeCacheEntrySize;
+  new_length = new_length - new_length % kCodeCacheEntrySize;
+  ASSERT((new_length % kCodeCacheEntrySize) == 0);
   Object* result = cache->CopySize(new_length);
   if (result->IsFailure()) return result;
 
   // Add the (name, code) pair to the new cache.
   cache = FixedArray::cast(result);
-  cache->set(length + 0, name);
-  cache->set(length + 1, code);
-  set_code_cache(cache);
+  cache->set(length + kCodeCacheEntryNameOffset, name);
+  cache->set(length + kCodeCacheEntryCodeOffset, code);
+  set_default_cache(cache);
   return this;
 }
 
 
-Object* Map::FindInCodeCache(String* name, Code::Flags flags) {
-  FixedArray* cache = code_cache();
+Object* CodeCache::UpdateNormalTypeCache(String* name, Code* code) {
+  // Adding a new entry can cause a new cache to be allocated.
+  CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
+  Object* new_cache = cache->Put(name, code);
+  if (new_cache->IsFailure()) return new_cache;
+  set_normal_type_cache(new_cache);
+  return this;
+}
+
+
+Object* CodeCache::Lookup(String* name, Code::Flags flags) {
+  if (Code::ExtractTypeFromFlags(flags) == NORMAL) {
+    return LookupNormalTypeCache(name, flags);
+  } else {
+    return LookupDefaultCache(name, flags);
+  }
+}
+
+
+Object* CodeCache::LookupDefaultCache(String* name, Code::Flags flags) {
+  FixedArray* cache = default_cache();
   int length = cache->length();
-  for (int i = 0; i < length; i += 2) {
-    Object* key = cache->get(i);
+  for (int i = 0; i < length; i += kCodeCacheEntrySize) {
+    Object* key = cache->get(i + kCodeCacheEntryNameOffset);
     // Skip deleted elements.
     if (key->IsNull()) continue;
     if (key->IsUndefined()) return key;
     if (name->Equals(String::cast(key))) {
-      Code* code = Code::cast(cache->get(i + 1));
-      if (code->flags() == flags) return code;
+      Code* code = Code::cast(cache->get(i + kCodeCacheEntryCodeOffset));
+      if (code->flags() == flags) {
+        return code;
+      }
     }
   }
   return Heap::undefined_value();
 }
 
 
-int Map::IndexInCodeCache(Code* code) {
-  FixedArray* array = code_cache();
+Object* CodeCache::LookupNormalTypeCache(String* name, Code::Flags flags) {
+  if (!normal_type_cache()->IsUndefined()) {
+    CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
+    return cache->Lookup(name, flags);
+  } else {
+    return Heap::undefined_value();
+  }
+}
+
+
+int CodeCache::GetIndex(Object* name, Code* code) {
+  if (code->type() == NORMAL) {
+    if (normal_type_cache()->IsUndefined()) return -1;
+    CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
+    return cache->GetIndex(String::cast(name), code->flags());
+  }
+
+  FixedArray* array = default_cache();
   int len = array->length();
-  for (int i = 0; i < len; i += 2) {
-    if (array->get(i + 1) == code) return i + 1;
+  for (int i = 0; i < len; i += kCodeCacheEntrySize) {
+    if (array->get(i + kCodeCacheEntryCodeOffset) == code) return i + 1;
   }
   return -1;
 }
 
 
-void Map::RemoveFromCodeCache(int index) {
-  FixedArray* array = code_cache();
-  ASSERT(array->length() >= index && array->get(index)->IsCode());
-  // Use null instead of undefined for deleted elements to distinguish
-  // deleted elements from unused elements.  This distinction is used
-  // when looking up in the cache and when updating the cache.
-  array->set_null(index - 1);  // key
-  array->set_null(index);  // code
+void CodeCache::RemoveByIndex(Object* name, Code* code, int index) {
+  if (code->type() == NORMAL) {
+    ASSERT(!normal_type_cache()->IsUndefined());
+    CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
+    ASSERT(cache->GetIndex(String::cast(name), code->flags()) == index);
+    cache->RemoveByIndex(index);
+  } else {
+    FixedArray* array = default_cache();
+    ASSERT(array->length() >= index && array->get(index)->IsCode());
+    // Use null instead of undefined for deleted elements to distinguish
+    // deleted elements from unused elements.  This distinction is used
+    // when looking up in the cache and when updating the cache.
+    ASSERT_EQ(1, kCodeCacheEntryCodeOffset - kCodeCacheEntryNameOffset);
+    array->set_null(index - 1);  // Name.
+    array->set_null(index);  // Code.
+  }
+}
+
+
+// The key in the code cache hash table consists of the property name and the
+// code object. The actual match is on the name and the code flags. If a key
+// is created using the flags and not a code object it can only be used for
+// lookup not to create a new entry.
+class CodeCacheHashTableKey : public HashTableKey {
+ public:
+  CodeCacheHashTableKey(String* name, Code::Flags flags)
+      : name_(name), flags_(flags), code_(NULL) { }
+
+  CodeCacheHashTableKey(String* name, Code* code)
+      : name_(name),
+        flags_(code->flags()),
+        code_(code) { }
+
+
+  bool IsMatch(Object* other) {
+    if (!other->IsFixedArray()) return false;
+    FixedArray* pair = FixedArray::cast(other);
+    String* name = String::cast(pair->get(0));
+    Code::Flags flags = Code::cast(pair->get(1))->flags();
+    if (flags != flags_) {
+      return false;
+    }
+    return name_->Equals(name);
+  }
+
+  static uint32_t NameFlagsHashHelper(String* name, Code::Flags flags) {
+    return name->Hash() ^ flags;
+  }
+
+  uint32_t Hash() { return NameFlagsHashHelper(name_, flags_); }
+
+  uint32_t HashForObject(Object* obj) {
+    FixedArray* pair = FixedArray::cast(obj);
+    String* name = String::cast(pair->get(0));
+    Code* code = Code::cast(pair->get(1));
+    return NameFlagsHashHelper(name, code->flags());
+  }
+
+  Object* AsObject() {
+    ASSERT(code_ != NULL);
+    Object* obj = Heap::AllocateFixedArray(2);
+    if (obj->IsFailure()) return obj;
+    FixedArray* pair = FixedArray::cast(obj);
+    pair->set(0, name_);
+    pair->set(1, code_);
+    return pair;
+  }
+
+ private:
+  String* name_;
+  Code::Flags flags_;
+  Code* code_;
+};
+
+
+Object* CodeCacheHashTable::Lookup(String* name, Code::Flags flags) {
+  CodeCacheHashTableKey key(name, flags);
+  int entry = FindEntry(&key);
+  if (entry == kNotFound) return Heap::undefined_value();
+  return get(EntryToIndex(entry) + 1);
+}
+
+
+Object* CodeCacheHashTable::Put(String* name, Code* code) {
+  CodeCacheHashTableKey key(name, code);
+  Object* obj = EnsureCapacity(1, &key);
+  if (obj->IsFailure()) return obj;
+
+  // Don't use this, as the table might have grown.
+  CodeCacheHashTable* cache = reinterpret_cast<CodeCacheHashTable*>(obj);
+
+  int entry = cache->FindInsertionEntry(key.Hash());
+  Object* k = key.AsObject();
+  if (k->IsFailure()) return k;
+
+  cache->set(EntryToIndex(entry), k);
+  cache->set(EntryToIndex(entry) + 1, code);
+  cache->ElementAdded();
+  return cache;
+}
+
+
+int CodeCacheHashTable::GetIndex(String* name, Code::Flags flags) {
+  CodeCacheHashTableKey key(name, flags);
+  int entry = FindEntry(&key);
+  return (entry == kNotFound) ? -1 : entry;
+}
+
+
+void CodeCacheHashTable::RemoveByIndex(int index) {
+  ASSERT(index >= 0);
+  set(EntryToIndex(index), Heap::null_value());
+  set(EntryToIndex(index) + 1, Heap::null_value());
+  ElementRemoved();
 }
 
 
@@ -3363,18 +3568,25 @@
   int len = number_of_descriptors();
 
   // Bottom-up max-heap construction.
-  for (int i = 1; i < len; ++i) {
-    int child_index = i;
-    while (child_index > 0) {
-      int parent_index = ((child_index + 1) >> 1) - 1;
-      uint32_t parent_hash = GetKey(parent_index)->Hash();
+  // Index of the last node with children
+  const int max_parent_index = (len / 2) - 1;
+  for (int i = max_parent_index; i >= 0; --i) {
+    int parent_index = i;
+    const uint32_t parent_hash = GetKey(i)->Hash();
+    while (parent_index <= max_parent_index) {
+      int child_index = 2 * parent_index + 1;
       uint32_t child_hash = GetKey(child_index)->Hash();
-      if (parent_hash < child_hash) {
-        Swap(parent_index, child_index);
-      } else {
-        break;
+      if (child_index + 1 < len) {
+        uint32_t right_child_hash = GetKey(child_index + 1)->Hash();
+        if (right_child_hash > child_hash) {
+          child_index++;
+          child_hash = right_child_hash;
+        }
       }
-      child_index = parent_index;
+      if (child_hash <= parent_hash) break;
+      Swap(parent_index, child_index);
+      // Now element at child_index could be < its children.
+      parent_index = child_index;  // parent_hash remains correct.
     }
   }
 
@@ -3384,21 +3596,21 @@
     Swap(0, i);
     // Sift down the new top element.
     int parent_index = 0;
-    while (true) {
-      int child_index = ((parent_index + 1) << 1) - 1;
-      if (child_index >= i) break;
-      uint32_t child1_hash = GetKey(child_index)->Hash();
-      uint32_t child2_hash = GetKey(child_index + 1)->Hash();
-      uint32_t parent_hash = GetKey(parent_index)->Hash();
-      if (child_index + 1 >= i || child1_hash > child2_hash) {
-        if (parent_hash > child1_hash) break;
-        Swap(parent_index, child_index);
-        parent_index = child_index;
-      } else {
-        if (parent_hash > child2_hash) break;
-        Swap(parent_index, child_index + 1);
-        parent_index = child_index + 1;
+    const uint32_t parent_hash = GetKey(parent_index)->Hash();
+    const int max_parent_index = (i / 2) - 1;
+    while (parent_index <= max_parent_index) {
+      int child_index = parent_index * 2 + 1;
+      uint32_t child_hash = GetKey(child_index)->Hash();
+      if (child_index + 1 < i) {
+        uint32_t right_child_hash = GetKey(child_index + 1)->Hash();
+        if (right_child_hash > child_hash) {
+          child_index++;
+          child_hash = right_child_hash;
+        }
       }
+      if (child_hash <= parent_hash) break;
+      Swap(parent_index, child_index);
+      parent_index = child_index;
     }
   }
 
@@ -3479,7 +3691,7 @@
   // doesn't make Utf8Length faster, but it is very likely that
   // the string will be accessed later (for example by WriteUtf8)
   // so it's still a good idea.
-  TryFlattenIfNotFlat();
+  TryFlatten();
   Access<StringInputBuffer> buffer(&string_input_buffer);
   buffer->Reset(0, this);
   int result = 0;
@@ -4446,13 +4658,38 @@
 }
 
 
+template <typename schar>
+static inline uint32_t HashSequentialString(const schar* chars, int length) {
+  StringHasher hasher(length);
+  if (!hasher.has_trivial_hash()) {
+    int i;
+    for (i = 0; hasher.is_array_index() && (i < length); i++) {
+      hasher.AddCharacter(chars[i]);
+    }
+    for (; i < length; i++) {
+      hasher.AddCharacterNoIndex(chars[i]);
+    }
+  }
+  return hasher.GetHashField();
+}
+
+
 uint32_t String::ComputeAndSetHash() {
   // Should only be called if hash code has not yet been computed.
   ASSERT(!(hash_field() & kHashComputedMask));
 
+  const int len = length();
+
   // Compute the hash code.
-  StringInputBuffer buffer(this);
-  uint32_t field = ComputeHashField(&buffer, length());
+  uint32_t field = 0;
+  if (StringShape(this).IsSequentialAscii()) {
+    field = HashSequentialString(SeqAsciiString::cast(this)->GetChars(), len);
+  } else if (StringShape(this).IsSequentialTwoByte()) {
+    field = HashSequentialString(SeqTwoByteString::cast(this)->GetChars(), len);
+  } else {
+    StringInputBuffer buffer(this);
+    field = ComputeHashField(&buffer, len);
+  }
 
   // Store the hash code in the object.
   set_hash_field(field);
@@ -4570,9 +4807,9 @@
 }
 
 
-Object* String::SubString(int start, int end) {
+Object* String::SubString(int start, int end, PretenureFlag pretenure) {
   if (start == 0 && end == length()) return this;
-  Object* result = Heap::AllocateSubString(this, start, end);
+  Object* result = Heap::AllocateSubString(this, start, end, pretenure);
   return result;
 }
 
@@ -4669,6 +4906,7 @@
 
 
 Object* JSFunction::SetPrototype(Object* value) {
+  ASSERT(should_have_prototype());
   Object* construct_prototype = value;
 
   // If the value is not a JSObject, store the value in the map's
@@ -4694,6 +4932,14 @@
 }
 
 
+Object* JSFunction::RemovePrototype() {
+  ASSERT(map() == context()->global_context()->function_map());
+  set_map(context()->global_context()->function_without_prototype_map());
+  set_prototype_or_initial_map(Heap::the_hole_value());
+  return this;
+}
+
+
 Object* JSFunction::SetInstanceClassName(String* name) {
   shared()->set_instance_class_name(name);
   return this;
@@ -4884,11 +5130,9 @@
 
 
 void SharedFunctionInfo::SharedFunctionInfoIterateBody(ObjectVisitor* v) {
-  IteratePointers(v, kNameOffset, kConstructStubOffset + kPointerSize);
-  IteratePointers(v, kInstanceClassNameOffset, kScriptOffset + kPointerSize);
-  IteratePointers(v, kDebugInfoOffset, kInferredNameOffset + kPointerSize);
-  IteratePointers(v, kThisPropertyAssignmentsOffset,
-      kThisPropertyAssignmentsOffset + kPointerSize);
+  IteratePointers(v,
+                  kNameOffset,
+                  kThisPropertyAssignmentsOffset + kPointerSize);
 }
 
 
@@ -5059,6 +5303,7 @@
     case STORE_IC: return "STORE_IC";
     case KEYED_STORE_IC: return "KEYED_STORE_IC";
     case CALL_IC: return "CALL_IC";
+    case BINARY_OP_IC: return "BINARY_OP_IC";
   }
   UNREACHABLE();
   return NULL;
@@ -5180,7 +5425,7 @@
     case DICTIONARY_ELEMENTS: {
       if (IsJSArray()) {
         uint32_t old_length =
-        static_cast<uint32_t>(JSArray::cast(this)->length()->Number());
+            static_cast<uint32_t>(JSArray::cast(this)->length()->Number());
         element_dictionary()->RemoveNumberEntries(new_length, old_length),
         JSArray::cast(this)->set_length(len);
       }
@@ -5238,7 +5483,7 @@
 
 Object* JSObject::SetElementsLength(Object* len) {
   // We should never end in here with a pixel or external array.
-  ASSERT(!HasPixelElements() && !HasExternalArrayElements());
+  ASSERT(AllowsSetElementsLength());
 
   Object* smi_length = len->ToSmi();
   if (smi_length->IsSmi()) {
@@ -6154,9 +6399,9 @@
 InterceptorInfo* JSObject::GetNamedInterceptor() {
   ASSERT(map()->has_named_interceptor());
   JSFunction* constructor = JSFunction::cast(map()->constructor());
-  Object* template_info = constructor->shared()->function_data();
+  ASSERT(constructor->shared()->IsApiFunction());
   Object* result =
-      FunctionTemplateInfo::cast(template_info)->named_property_handler();
+      constructor->shared()->get_api_func_data()->named_property_handler();
   return InterceptorInfo::cast(result);
 }
 
@@ -6164,9 +6409,9 @@
 InterceptorInfo* JSObject::GetIndexedInterceptor() {
   ASSERT(map()->has_indexed_interceptor());
   JSFunction* constructor = JSFunction::cast(map()->constructor());
-  Object* template_info = constructor->shared()->function_data();
+  ASSERT(constructor->shared()->IsApiFunction());
   Object* result =
-      FunctionTemplateInfo::cast(template_info)->indexed_property_handler();
+      constructor->shared()->get_api_func_data()->indexed_property_handler();
   return InterceptorInfo::cast(result);
 }
 
@@ -6836,15 +7081,17 @@
 
 
 template<typename Shape, typename Key>
-Object* HashTable<Shape, Key>::Allocate(int at_least_space_for) {
-  int capacity = RoundUpToPowerOf2(at_least_space_for);
-  if (capacity < 4) {
-    capacity = 4;  // Guarantee min capacity.
+Object* HashTable<Shape, Key>::Allocate(int at_least_space_for,
+                                        PretenureFlag pretenure) {
+  const int kMinCapacity = 32;
+  int capacity = RoundUpToPowerOf2(at_least_space_for * 2);
+  if (capacity < kMinCapacity) {
+    capacity = kMinCapacity;  // Guarantee min capacity.
   } else if (capacity > HashTable::kMaxCapacity) {
     return Failure::OutOfMemoryException();
   }
 
-  Object* obj = Heap::AllocateHashTable(EntryToIndex(capacity));
+  Object* obj = Heap::AllocateHashTable(EntryToIndex(capacity), pretenure);
   if (!obj->IsFailure()) {
     HashTable::cast(obj)->SetNumberOfElements(0);
     HashTable::cast(obj)->SetNumberOfDeletedElements(0);
@@ -6879,10 +7126,15 @@
   // Return if:
   //   50% is still free after adding n elements and
   //   at most 50% of the free elements are deleted elements.
-  if ((nof + (nof >> 1) <= capacity) &&
-      (nod <= (capacity - nof) >> 1)) return this;
+  if (nod <= (capacity - nof) >> 1) {
+    int needed_free = nof >> 1;
+    if (nof + needed_free <= capacity) return this;
+  }
 
-  Object* obj = Allocate(nof * 2);
+  const int kMinCapacityForPretenure = 256;
+  bool pretenure =
+      (capacity > kMinCapacityForPretenure) && !Heap::InNewSpace(this);
+  Object* obj = Allocate(nof * 2, pretenure ? TENURED : NOT_TENURED);
   if (obj->IsFailure()) return obj;
 
   AssertNoAllocation no_gc;
@@ -6914,7 +7166,6 @@
 }
 
 
-
 template<typename Shape, typename Key>
 uint32_t HashTable<Shape, Key>::FindInsertionEntry(uint32_t hash) {
   uint32_t capacity = Capacity();
@@ -7024,8 +7275,7 @@
     result_double = HeapNumber::cast(new_double);
   }
 
-  int capacity = dict->Capacity();
-  Object* obj = NumberDictionary::Allocate(dict->Capacity());
+  Object* obj = NumberDictionary::Allocate(dict->NumberOfElements());
   if (obj->IsFailure()) return obj;
   NumberDictionary* new_dict = NumberDictionary::cast(obj);
 
@@ -7033,6 +7283,7 @@
 
   uint32_t pos = 0;
   uint32_t undefs = 0;
+  int capacity = dict->Capacity();
   for (int i = 0; i < capacity; i++) {
     Object* k = dict->KeyAt(i);
     if (dict->IsKey(k)) {
diff --git a/src/objects.h b/src/objects.h
index 0030496..dcfb2ee 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -72,9 +72,11 @@
 //             - Dictionary
 //             - SymbolTable
 //             - CompilationCacheTable
+//             - CodeCacheHashTable
 //             - MapCache
 //           - Context
 //           - GlobalContext
+//           - JSFunctionResultCache
 //       - String
 //         - SeqString
 //           - SeqAsciiString
@@ -102,6 +104,7 @@
 //         - TypeSwitchInfo
 //         - DebugInfo
 //         - BreakPointInfo
+//         - CodeCache
 //
 // Formats of Object*:
 //  Smi:        [31 bit signed int] 0
@@ -269,6 +272,7 @@
   V(SIGNATURE_INFO_TYPE)                                                       \
   V(TYPE_SWITCH_INFO_TYPE)                                                     \
   V(SCRIPT_TYPE)                                                               \
+  V(CODE_CACHE_TYPE)                                                           \
                                                                                \
   V(JS_VALUE_TYPE)                                                             \
   V(JS_OBJECT_TYPE)                                                            \
@@ -364,7 +368,8 @@
   V(OBJECT_TEMPLATE_INFO, ObjectTemplateInfo, object_template_info)            \
   V(SIGNATURE_INFO, SignatureInfo, signature_info)                             \
   V(TYPE_SWITCH_INFO, TypeSwitchInfo, type_switch_info)                        \
-  V(SCRIPT, Script, script)
+  V(SCRIPT, Script, script)                                                    \
+  V(CODE_CACHE, CodeCache, code_cache)
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 #define STRUCT_LIST_DEBUGGER(V)                                                \
@@ -468,6 +473,7 @@
   SIGNATURE_INFO_TYPE,
   TYPE_SWITCH_INFO_TYPE,
   SCRIPT_TYPE,
+  CODE_CACHE_TYPE,
 #ifdef ENABLE_DEBUGGER_SUPPORT
   DEBUG_INFO_TYPE,
   BREAK_POINT_INFO_TYPE,
@@ -600,7 +606,9 @@
   inline bool IsHashTable();
   inline bool IsDictionary();
   inline bool IsSymbolTable();
+  inline bool IsJSFunctionResultCache();
   inline bool IsCompilationCacheTable();
+  inline bool IsCodeCacheHashTable();
   inline bool IsMapCache();
   inline bool IsPrimitive();
   inline bool IsGlobalObject();
@@ -1088,6 +1096,9 @@
   void HeapNumberVerify();
 #endif
 
+  inline int get_exponent();
+  inline int get_sign();
+
   // Layout description.
   static const int kValueOffset = HeapObject::kHeaderSize;
   // IEEE doubles are two 32 bit words.  The first is just mantissa, the second
@@ -1106,6 +1117,8 @@
   static const uint32_t kSignMask = 0x80000000u;
   static const uint32_t kExponentMask = 0x7ff00000u;
   static const uint32_t kMantissaMask = 0xfffffu;
+  static const int kMantissaBits = 52;
+  static const int KExponentBits = 11;
   static const int kExponentBias = 1023;
   static const int kExponentShift = 20;
   static const int kMantissaBitsInTopWord = 20;
@@ -1161,6 +1174,7 @@
   inline bool HasExternalIntElements();
   inline bool HasExternalUnsignedIntElements();
   inline bool HasExternalFloatElements();
+  inline bool AllowsSetElementsLength();
   inline NumberDictionary* element_dictionary();  // Gets slow elements.
 
   // Collects elements starting at index 0.
@@ -1609,6 +1623,9 @@
   inline void set_null(int index);
   inline void set_the_hole(int index);
 
+  // Gives access to raw memory which stores the array's data.
+  inline Object** data_start();
+
   // Copy operations.
   inline Object* Copy();
   Object* CopySize(int new_length);
@@ -1916,7 +1933,8 @@
   }
 
   // Returns a new HashTable object. Might return Failure.
-  static Object* Allocate(int at_least_space_for);
+  static Object* Allocate(int at_least_space_for,
+                          PretenureFlag pretenure = NOT_TENURED);
 
   // Returns the key at entry.
   Object* KeyAt(int entry) { return get(EntryToIndex(entry)); }
@@ -1948,6 +1966,8 @@
   static const int kEntrySize = Shape::kEntrySize;
   static const int kElementsStartOffset =
       kHeaderSize + kElementsStartIndex * kPointerSize;
+  static const int kCapacityOffset =
+      kHeaderSize + kCapacityIndex * kPointerSize;
 
   // Constant used for denoting a absent entry.
   static const int kNotFound = -1;
@@ -2125,24 +2145,24 @@
 
   // Returns the value at entry.
   Object* ValueAt(int entry) {
-    return get(HashTable<Shape, Key>::EntryToIndex(entry)+1);
+    return this->get(HashTable<Shape, Key>::EntryToIndex(entry)+1);
   }
 
   // Set the value for entry.
   void ValueAtPut(int entry, Object* value) {
-    set(HashTable<Shape, Key>::EntryToIndex(entry)+1, value);
+    this->set(HashTable<Shape, Key>::EntryToIndex(entry)+1, value);
   }
 
   // Returns the property details for the property at entry.
   PropertyDetails DetailsAt(int entry) {
     ASSERT(entry >= 0);  // Not found is -1, which is not caught by get().
     return PropertyDetails(
-        Smi::cast(get(HashTable<Shape, Key>::EntryToIndex(entry) + 2)));
+        Smi::cast(this->get(HashTable<Shape, Key>::EntryToIndex(entry) + 2)));
   }
 
   // Set the details for entry.
   void DetailsAtPut(int entry, PropertyDetails value) {
-    set(HashTable<Shape, Key>::EntryToIndex(entry) + 2, value.AsSmi());
+    this->set(HashTable<Shape, Key>::EntryToIndex(entry) + 2, value.AsSmi());
   }
 
   // Sorting support
@@ -2165,7 +2185,7 @@
 
   // Accessors for next enumeration index.
   void SetNextEnumerationIndex(int index) {
-    fast_set(this, kNextEnumerationIndexIndex, Smi::FromInt(index));
+    this->fast_set(this, kNextEnumerationIndexIndex, Smi::FromInt(index));
   }
 
   int NextEnumerationIndex() {
@@ -2291,6 +2311,35 @@
 };
 
 
+// JSFunctionResultCache caches results of some JSFunction invocation.
+// It is a fixed array with fixed structure:
+//   [0]: factory function
+//   [1]: finger index
+//   [2]: current cache size
+//   [3]: dummy field.
+// The rest of array are key/value pairs.
+class JSFunctionResultCache: public FixedArray {
+ public:
+  static const int kFactoryIndex = 0;
+  static const int kFingerIndex = kFactoryIndex + 1;
+  static const int kCacheSizeIndex = kFingerIndex + 1;
+  static const int kDummyIndex = kCacheSizeIndex + 1;
+  static const int kEntriesIndex = kDummyIndex + 1;
+
+  static const int kEntrySize = 2;  // key + value
+
+  inline void MakeZeroSize();
+  inline void Clear();
+
+  // Casting
+  static inline JSFunctionResultCache* cast(Object* obj);
+
+#ifdef DEBUG
+  void JSFunctionResultCacheVerify();
+#endif
+};
+
+
 // ByteArray represents fixed sized byte arrays.  Used by the outside world,
 // such as PCRE, and also by the memory allocator and garbage collector to
 // fill in free blocks in the heap.
@@ -2598,13 +2647,14 @@
     CALL_IC,
     STORE_IC,
     KEYED_STORE_IC,
-    // No more than eight kinds. The value currently encoded in three bits in
+    BINARY_OP_IC,
+    // No more than 16 kinds. The value currently encoded in four bits in
     // Flags.
 
     // Pseudo-kinds.
     REGEXP = BUILTIN,
     FIRST_IC_KIND = LOAD_IC,
-    LAST_IC_KIND = KEYED_STORE_IC
+    LAST_IC_KIND = BINARY_OP_IC
   };
 
   enum {
@@ -2650,7 +2700,7 @@
   inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
   inline bool is_call_stub() { return kind() == CALL_IC; }
 
-  // [major_key]: For kind STUB, the major key.
+  // [major_key]: For kind STUB or BINARY_OP_IC, the major key.
   inline CodeStub::Major major_key();
   inline void set_major_key(CodeStub::Major major);
 
@@ -2757,14 +2807,14 @@
   static const int kFlagsICStateShift        = 0;
   static const int kFlagsICInLoopShift       = 3;
   static const int kFlagsKindShift           = 4;
-  static const int kFlagsTypeShift           = 7;
-  static const int kFlagsArgumentsCountShift = 10;
+  static const int kFlagsTypeShift           = 8;
+  static const int kFlagsArgumentsCountShift = 11;
 
-  static const int kFlagsICStateMask        = 0x00000007;  // 0000000111
-  static const int kFlagsICInLoopMask       = 0x00000008;  // 0000001000
-  static const int kFlagsKindMask           = 0x00000070;  // 0001110000
-  static const int kFlagsTypeMask           = 0x00000380;  // 1110000000
-  static const int kFlagsArgumentsCountMask = 0xFFFFFC00;
+  static const int kFlagsICStateMask        = 0x00000007;  // 00000000111
+  static const int kFlagsICInLoopMask       = 0x00000008;  // 00000001000
+  static const int kFlagsKindMask           = 0x000000F0;  // 00011110000
+  static const int kFlagsTypeMask           = 0x00000700;  // 11100000000
+  static const int kFlagsArgumentsCountMask = 0xFFFFF800;
 
   static const int kFlagsNotUsedInLookup =
       (kFlagsICInLoopMask | kFlagsTypeMask);
@@ -2817,6 +2867,12 @@
   inline void set_non_instance_prototype(bool value);
   inline bool has_non_instance_prototype();
 
+  // Tells whether function has special prototype property. If not, prototype
+  // property will not be created when accessed (will return undefined),
+  // and construction from this function will not be allowed.
+  inline void set_function_with_prototype(bool value);
+  inline bool function_with_prototype();
+
   // Tells whether the instance with this map should be ignored by the
   // __proto__ accessor.
   inline void set_is_hidden_prototype() {
@@ -2891,7 +2947,7 @@
   DECL_ACCESSORS(instance_descriptors, DescriptorArray)
 
   // [stub cache]: contains stubs compiled for this map.
-  DECL_ACCESSORS(code_cache, FixedArray)
+  DECL_ACCESSORS(code_cache, Object)
 
   Object* CopyDropDescriptors();
 
@@ -2927,10 +2983,10 @@
 
   // Returns the non-negative index of the code object if it is in the
   // cache and -1 otherwise.
-  int IndexInCodeCache(Code* code);
+  int IndexInCodeCache(Object* name, Code* code);
 
   // Removes a code object from the code cache at the given index.
-  void RemoveFromCodeCache(int index);
+  void RemoveFromCodeCache(String* name, Code* code, int index);
 
   // For every transition in this map, makes the transition's
   // target's prototype pointer point back to this map.
@@ -2993,6 +3049,12 @@
 
   // Bit positions for bit field 2
   static const int kIsExtensible = 0;
+  static const int kFunctionWithPrototype = 1;
+
+  // Layout of the default cache. It holds alternating name and code objects.
+  static const int kCodeCacheEntrySize = 2;
+  static const int kCodeCacheEntryNameOffset = 0;
+  static const int kCodeCacheEntryCodeOffset = 1;
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(Map);
@@ -3136,15 +3198,25 @@
   // [instance class name]: class name for instances.
   DECL_ACCESSORS(instance_class_name, Object)
 
-  // [function data]: This field has been added for make benefit the API.
+  // [function data]: This field holds some additional data for function.
+  // Currently it either has FunctionTemplateInfo to make benefit the API
+  // or Proxy wrapping CustomCallGenerator.
   // In the long run we don't want all functions to have this field but
   // we can fix that when we have a better model for storing hidden data
   // on objects.
   DECL_ACCESSORS(function_data, Object)
 
+  inline bool IsApiFunction();
+  inline FunctionTemplateInfo* get_api_func_data();
+  inline bool HasCustomCallGenerator();
+
   // [script info]: Script from which the function originates.
   DECL_ACCESSORS(script, Object)
 
+  // [num_literals]: Number of literals used by this function.
+  inline int num_literals();
+  inline void set_num_literals(int value);
+
   // [start_position_and_type]: Field used to store both the source code
   // position, whether or not the function is a function expression,
   // and whether or not the function is a toplevel function. The two
@@ -3243,39 +3315,39 @@
   static const int kDontAdaptArgumentsSentinel = -1;
 
   // Layout description.
-  // (An even number of integers has a size that is a multiple of a pointer.)
+  // Pointer fields.
   static const int kNameOffset = HeapObject::kHeaderSize;
   static const int kCodeOffset = kNameOffset + kPointerSize;
   static const int kConstructStubOffset = kCodeOffset + kPointerSize;
-  static const int kLengthOffset = kConstructStubOffset + kPointerSize;
+  static const int kInstanceClassNameOffset =
+      kConstructStubOffset + kPointerSize;
+  static const int kFunctionDataOffset =
+      kInstanceClassNameOffset + kPointerSize;
+  static const int kScriptOffset = kFunctionDataOffset + kPointerSize;
+  static const int kDebugInfoOffset = kScriptOffset + kPointerSize;
+  static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize;
+  static const int kThisPropertyAssignmentsOffset =
+      kInferredNameOffset + kPointerSize;
+  // Integer fields.
+  static const int kLengthOffset =
+      kThisPropertyAssignmentsOffset + kPointerSize;
   static const int kFormalParameterCountOffset = kLengthOffset + kIntSize;
   static const int kExpectedNofPropertiesOffset =
       kFormalParameterCountOffset + kIntSize;
+  static const int kNumLiteralsOffset = kExpectedNofPropertiesOffset + kIntSize;
   static const int kStartPositionAndTypeOffset =
-      kExpectedNofPropertiesOffset + kIntSize;
+      kNumLiteralsOffset + kIntSize;
   static const int kEndPositionOffset = kStartPositionAndTypeOffset + kIntSize;
   static const int kFunctionTokenPositionOffset = kEndPositionOffset + kIntSize;
-  static const int kInstanceClassNameOffset =
+  static const int kCompilerHintsOffset =
       kFunctionTokenPositionOffset + kIntSize;
-  static const int kExternalReferenceDataOffset =
-      kInstanceClassNameOffset + kPointerSize;
-  static const int kScriptOffset = kExternalReferenceDataOffset + kPointerSize;
-  static const int kDebugInfoOffset = kScriptOffset + kPointerSize;
-  static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize;
-  static const int kCompilerHintsOffset = kInferredNameOffset + kPointerSize;
-  static const int kThisPropertyAssignmentsOffset =
-      kCompilerHintsOffset + kPointerSize;
   static const int kThisPropertyAssignmentsCountOffset =
-      kThisPropertyAssignmentsOffset + kPointerSize;
-  static const int kSize = kThisPropertyAssignmentsCountOffset + kPointerSize;
+      kCompilerHintsOffset + kIntSize;
+  // Total size.
+  static const int kSize = kThisPropertyAssignmentsCountOffset + kIntSize;
+  static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
 
  private:
-  // Bit positions in length_and_flg.
-  // The least significant bit is used as the flag.
-  static const int kFlagBit         = 0;
-  static const int kLengthShift     = 1;
-  static const int kLengthMask      = ~((1 << kLengthShift) - 1);
-
   // Bit positions in start_position_and_type.
   // The source code start position is in the 30 most significant bits of
   // the start_position_and_type field.
@@ -3314,10 +3386,6 @@
   inline Code* code();
   inline void set_code(Code* value);
 
-  // Tells whether this function is a context-independent boilerplate
-  // function.
-  inline bool IsBoilerplate();
-
   // Tells whether this function is builtin.
   inline bool IsBuiltin();
 
@@ -3348,6 +3416,11 @@
   Object* SetInstancePrototype(Object* value);
   Object* SetPrototype(Object* value);
 
+  // After prototype is removed, it will not be created when accessed, and
+  // [[Construct]] from this function will not be allowed.
+  Object* RemovePrototype();
+  inline bool should_have_prototype();
+
   // Accessor for this function's initial map's [[class]]
   // property. This is primarily used by ECMA native functions.  This
   // method sets the class_name field of this function's initial map
@@ -3493,6 +3566,10 @@
   inline Object* javascript_builtin(Builtins::JavaScript id);
   inline void set_javascript_builtin(Builtins::JavaScript id, Object* value);
 
+  // Accessors for code of the runtime routines written in JavaScript.
+  inline Code* javascript_builtin_code(Builtins::JavaScript id);
+  inline void set_javascript_builtin_code(Builtins::JavaScript id, Code* value);
+
   // Casting.
   static inline JSBuiltinsObject* cast(Object* obj);
 
@@ -3503,11 +3580,23 @@
 #endif
 
   // Layout description.  The size of the builtins object includes
-  // room for one pointer per runtime routine written in javascript.
+  // room for two pointers per runtime routine written in javascript
+  // (function and code object).
   static const int kJSBuiltinsCount = Builtins::id_count;
   static const int kJSBuiltinsOffset = GlobalObject::kHeaderSize;
+  static const int kJSBuiltinsCodeOffset =
+      GlobalObject::kHeaderSize + (kJSBuiltinsCount * kPointerSize);
   static const int kSize =
-      kJSBuiltinsOffset + (kJSBuiltinsCount * kPointerSize);
+      kJSBuiltinsCodeOffset + (kJSBuiltinsCount * kPointerSize);
+
+  static int OffsetOfFunctionWithId(Builtins::JavaScript id) {
+    return kJSBuiltinsOffset + id * kPointerSize;
+  }
+
+  static int OffsetOfCodeWithId(Builtins::JavaScript id) {
+    return kJSBuiltinsCodeOffset + id * kPointerSize;
+  }
+
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSBuiltinsObject);
 };
@@ -3635,6 +3724,13 @@
       FixedArray::kHeaderSize + kIrregexpUC16CodeIndex * kPointerSize;
   static const int kIrregexpCaptureCountOffset =
       FixedArray::kHeaderSize + kIrregexpCaptureCountIndex * kPointerSize;
+
+  // In-object fields.
+  static const int kSourceFieldIndex = 0;
+  static const int kGlobalFieldIndex = 1;
+  static const int kIgnoreCaseFieldIndex = 2;
+  static const int kMultilineFieldIndex = 3;
+  static const int kLastIndexFieldIndex = 4;
 };
 
 
@@ -3679,6 +3775,97 @@
 };
 
 
+class CodeCache: public Struct {
+ public:
+  DECL_ACCESSORS(default_cache, FixedArray)
+  DECL_ACCESSORS(normal_type_cache, Object)
+
+  // Add the code object to the cache.
+  Object* Update(String* name, Code* code);
+
+  // Lookup code object in the cache. Returns code object if found and undefined
+  // if not.
+  Object* Lookup(String* name, Code::Flags flags);
+
+  // Get the internal index of a code object in the cache. Returns -1 if the
+  // code object is not in that cache. This index can be used to later call
+  // RemoveByIndex. The cache cannot be modified between a call to GetIndex and
+  // RemoveByIndex.
+  int GetIndex(Object* name, Code* code);
+
+  // Remove an object from the cache with the provided internal index.
+  void RemoveByIndex(Object* name, Code* code, int index);
+
+  static inline CodeCache* cast(Object* obj);
+
+#ifdef DEBUG
+  void CodeCachePrint();
+  void CodeCacheVerify();
+#endif
+
+  static const int kDefaultCacheOffset = HeapObject::kHeaderSize;
+  static const int kNormalTypeCacheOffset =
+      kDefaultCacheOffset + kPointerSize;
+  static const int kSize = kNormalTypeCacheOffset + kPointerSize;
+
+ private:
+  Object* UpdateDefaultCache(String* name, Code* code);
+  Object* UpdateNormalTypeCache(String* name, Code* code);
+  Object* LookupDefaultCache(String* name, Code::Flags flags);
+  Object* LookupNormalTypeCache(String* name, Code::Flags flags);
+
+  // Code cache layout of the default cache. Elements are alternating name and
+  // code objects for non normal load/store/call IC's.
+  static const int kCodeCacheEntrySize = 2;
+  static const int kCodeCacheEntryNameOffset = 0;
+  static const int kCodeCacheEntryCodeOffset = 1;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(CodeCache);
+};
+
+
+class CodeCacheHashTableShape {
+ public:
+  static inline bool IsMatch(HashTableKey* key, Object* value) {
+    return key->IsMatch(value);
+  }
+
+  static inline uint32_t Hash(HashTableKey* key) {
+    return key->Hash();
+  }
+
+  static inline uint32_t HashForObject(HashTableKey* key, Object* object) {
+    return key->HashForObject(object);
+  }
+
+  static Object* AsObject(HashTableKey* key) {
+    return key->AsObject();
+  }
+
+  static const int kPrefixSize = 0;
+  static const int kEntrySize = 2;
+};
+
+
+class CodeCacheHashTable: public HashTable<CodeCacheHashTableShape,
+                                           HashTableKey*> {
+ public:
+  Object* Lookup(String* name, Code::Flags flags);
+  Object* Put(String* name, Code* code);
+
+  int GetIndex(String* name, Code::Flags flags);
+  void RemoveByIndex(int index);
+
+  static inline CodeCacheHashTable* cast(Object* obj);
+
+  // Initial size of the fixed array backing the hash table.
+  static const int kInitialSize = 64;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(CodeCacheHashTable);
+};
+
+
 enum AllowNullsFlag {ALLOW_NULLS, DISALLOW_NULLS};
 enum RobustnessFlag {ROBUST_STRING_TRAVERSAL, FAST_STRING_TRAVERSAL};
 
@@ -3796,6 +3983,13 @@
   inline bool IsAsciiRepresentation();
   inline bool IsTwoByteRepresentation();
 
+  // Check whether this string is an external two-byte string that in
+  // fact contains only ascii characters.
+  //
+  // Such strings may appear when the embedder prefers two-byte
+  // representations even for ascii data.
+  inline bool IsExternalTwoByteStringWithAsciiChars();
+
   // Get and set individual two byte chars in the string.
   inline void Set(int index, uint16_t value);
   // Get individual two byte char in the string.  Repeated calls
@@ -3805,13 +3999,13 @@
   // Try to flatten the top level ConsString that is hiding behind this
   // string.  This is a no-op unless the string is a ConsString.  Flatten
   // mutates the ConsString and might return a failure.
-  Object* TryFlatten();
+  Object* SlowTryFlatten(PretenureFlag pretenure);
 
   // Try to flatten the string.  Checks first inline to see if it is necessary.
-  // Do not handle allocation failures.  After calling TryFlattenIfNotFlat, the
+  // Do not handle allocation failures.  After calling TryFlatten, the
   // string could still be a ConsString, in which case a failure is returned.
   // Use FlattenString from Handles.cc to be sure to flatten.
-  inline Object* TryFlattenIfNotFlat();
+  inline Object* TryFlatten(PretenureFlag pretenure = NOT_TENURED);
 
   Vector<const char> ToAsciiVector();
   Vector<const uc16> ToUC16Vector();
@@ -3821,7 +4015,7 @@
   bool MarkAsUndetectable();
 
   // Return a substring.
-  Object* SubString(int from, int to);
+  Object* SubString(int from, int to, PretenureFlag pretenure = NOT_TENURED);
 
   // String equality operations.
   inline bool Equals(String* other);
@@ -3894,7 +4088,7 @@
 
   // Layout description.
   static const int kLengthOffset = HeapObject::kHeaderSize;
-  static const int kHashFieldOffset = kLengthOffset + kIntSize;
+  static const int kHashFieldOffset = kLengthOffset + kPointerSize;
   static const int kSize = kHashFieldOffset + kIntSize;
   // Notice: kSize is not pointer-size aligned if pointers are 64-bit.
 
@@ -4019,10 +4213,6 @@
   // Casting.
   static inline SeqString* cast(Object* obj);
 
-  // Dispatched behaviour.
-  // For regexp code.
-  uint16_t* SeqStringGetTwoByteAddress();
-
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(SeqString);
 };
@@ -4499,6 +4689,26 @@
 };
 
 
+// JSRegExpResult is just a JSArray with a specific initial map.
+// This initial map adds in-object properties for "index" and "input"
+// properties, as assigned by RegExp.prototype.exec, which allows
+// faster creation of RegExp exec results.
+// This class just holds constants used when creating the result.
+// After creation the result must be treated as a JSArray in all regards.
+class JSRegExpResult: public JSArray {
+ public:
+  // Offsets of object fields.
+  static const int kIndexOffset = JSArray::kSize;
+  static const int kInputOffset = kIndexOffset + kPointerSize;
+  static const int kSize = kInputOffset + kPointerSize;
+  // Indices of in-object properties.
+  static const int kIndexIndex = 0;
+  static const int kInputIndex = 1;
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSRegExpResult);
+};
+
+
 // An accessor must have a getter, but can have no setter.
 //
 // When setting a property, V8 searches accessors in prototypes.
diff --git a/src/oprofile-agent.cc b/src/oprofile-agent.cc
index 8aa3937..6df8f50 100644
--- a/src/oprofile-agent.cc
+++ b/src/oprofile-agent.cc
@@ -32,10 +32,6 @@
 namespace v8 {
 namespace internal {
 
-#ifdef ENABLE_OPROFILE_AGENT
-op_agent_t OProfileAgent::handle_ = NULL;
-#endif
-
 
 bool OProfileAgent::Initialize() {
 #ifdef ENABLE_OPROFILE_AGENT
@@ -70,47 +66,43 @@
 }
 
 
+#ifdef ENABLE_OPROFILE_AGENT
+op_agent_t OProfileAgent::handle_ = NULL;
+
+
 void OProfileAgent::CreateNativeCodeRegion(const char* name,
     const void* ptr, unsigned int size) {
-#ifdef ENABLE_OPROFILE_AGENT
-  if (handle_ == NULL) return;
   op_write_native_code(handle_, name, (uint64_t)ptr, ptr, size);
-#endif
 }
 
 
 void OProfileAgent::CreateNativeCodeRegion(String* name,
     const void* ptr, unsigned int size) {
-#ifdef ENABLE_OPROFILE_AGENT
-  if (handle_ != NULL) {
-    const char* func_name;
-    SmartPointer<char> str =
-        name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-    func_name = name->length() > 0 ? *str : "<anonymous>";
-    CreateNativeCodeRegion(func_name, ptr, size);
-  }
-#endif
+  const char* func_name;
+  SmartPointer<char> str =
+      name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+  func_name = name->length() > 0 ? *str : "<anonymous>";
+  CreateNativeCodeRegion(func_name, ptr, size);
 }
 
 
 void OProfileAgent::CreateNativeCodeRegion(String* name, String* source,
     int line_num, const void* ptr, unsigned int size) {
-#ifdef ENABLE_OPROFILE_AGENT
-  if (handle_ != NULL) {
-    Vector<char> buf = Vector<char>::New(OProfileAgent::kFormattingBufSize);
-    const char* func_name;
-    SmartPointer<char> str =
-        name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-    func_name = name->length() > 0 ? *str : "<anonymous>";
-    SmartPointer<char> source_str =
-        source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-    if (v8::internal::OS::SNPrintF(buf, "%s %s:%d",
-                                   func_name, *source_str, line_num) != -1) {
-      CreateNativeCodeRegion(buf.start(), ptr, size);
-    } else {
-      CreateNativeCodeRegion("<script/func name too long>", ptr, size);
-    }
+  Vector<char> buf = Vector<char>::New(OProfileAgent::kFormattingBufSize);
+  const char* func_name;
+  SmartPointer<char> str =
+      name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+  func_name = name->length() > 0 ? *str : "<anonymous>";
+  SmartPointer<char> source_str =
+      source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+  if (v8::internal::OS::SNPrintF(buf, "%s %s:%d",
+                                 func_name, *source_str, line_num) != -1) {
+    CreateNativeCodeRegion(buf.start(), ptr, size);
+  } else {
+    CreateNativeCodeRegion("<script/func name too long>", ptr, size);
   }
-#endif
 }
-} }
+
+#endif  // ENABLE_OPROFILE_AGENT
+
+} }  // namespace v8::internal
diff --git a/src/oprofile-agent.h b/src/oprofile-agent.h
index 4c299bf..4c50f0f 100644
--- a/src/oprofile-agent.h
+++ b/src/oprofile-agent.h
@@ -37,6 +37,14 @@
 // system headers (they have __uint64_t), but is defined
 // in V8's headers.
 #include <opagent.h>  // NOLINT
+
+#define OPROFILE(Call)                             \
+  do {                                             \
+    if (v8::internal::OProfileAgent::is_enabled()) \
+      v8::internal::OProfileAgent::Call;           \
+  } while (false)
+#else
+#define OPROFILE(Call) ((void) 0)
 #endif
 
 namespace v8 {
@@ -46,13 +54,13 @@
  public:
   static bool Initialize();
   static void TearDown();
+#ifdef ENABLE_OPROFILE_AGENT
   static void CreateNativeCodeRegion(const char* name,
                                      const void* ptr, unsigned int size);
   static void CreateNativeCodeRegion(String* name,
                                      const void* ptr, unsigned int size);
   static void CreateNativeCodeRegion(String* name, String* source, int line_num,
                                      const void* ptr, unsigned int size);
-#ifdef ENABLE_OPROFILE_AGENT
   static bool is_enabled() { return handle_ != NULL; }
 
  private:
diff --git a/src/parser.cc b/src/parser.cc
index 5058296..089eeee 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,7 +30,9 @@
 #include "api.h"
 #include "ast.h"
 #include "bootstrapper.h"
+#include "codegen.h"
 #include "compiler.h"
+#include "messages.h"
 #include "platform.h"
 #include "runtime.h"
 #include "parser.h"
@@ -107,13 +109,13 @@
 
   // Returns NULL if parsing failed.
   FunctionLiteral* ParseProgram(Handle<String> source,
-                                unibrow::CharacterStream* stream,
                                 bool in_global_context);
   FunctionLiteral* ParseLazy(Handle<String> source,
                              Handle<String> name,
-                             int start_position, bool is_expression);
-  FunctionLiteral* ParseJson(Handle<String> source,
-                             unibrow::CharacterStream* stream);
+                             int start_position,
+                             int end_position,
+                             bool is_expression);
+  FunctionLiteral* ParseJson(Handle<String> source);
 
   // The minimum number of contiguous assignment that will
   // be treated as an initialization block. Benchmarks show that
@@ -146,6 +148,7 @@
   ParserLog* log_;
   bool is_pre_parsing_;
   ScriptDataImpl* pre_data_;
+  bool seen_loop_stmt_;  // Used for inner loop detection.
 
   bool inside_with() const  { return with_nesting_level_ > 0; }
   ParserFactory* factory() const  { return factory_; }
@@ -210,6 +213,7 @@
       ZoneList<ObjectLiteral::Property*>* properties,
       Handle<FixedArray> constants,
       bool* is_simple,
+      bool* fast_elements,
       int* depth);
 
   // Populate the literals fixed array for a materialized array literal.
@@ -1202,7 +1206,8 @@
       factory_(factory),
       log_(log),
       is_pre_parsing_(is_pre_parsing == PREPARSE),
-      pre_data_(pre_data) {
+      pre_data_(pre_data),
+      seen_loop_stmt_(false) {
 }
 
 
@@ -1212,7 +1217,7 @@
   AssertNoZoneAllocation assert_no_zone_allocation;
   AssertNoAllocation assert_no_allocation;
   NoHandleAllocation no_handle_allocation;
-  scanner_.Init(source, stream, 0, JAVASCRIPT);
+  scanner_.Initialize(source, stream, JAVASCRIPT);
   ASSERT(target_stack_ == NULL);
   mode_ = PARSE_EAGERLY;
   DummyScope top_scope;
@@ -1226,7 +1231,6 @@
 
 
 FunctionLiteral* Parser::ParseProgram(Handle<String> source,
-                                      unibrow::CharacterStream* stream,
                                       bool in_global_context) {
   CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
 
@@ -1234,8 +1238,8 @@
   Counters::total_parse_size.Increment(source->length());
 
   // Initialize parser state.
-  source->TryFlattenIfNotFlat();
-  scanner_.Init(source, stream, 0, JAVASCRIPT);
+  source->TryFlatten();
+  scanner_.Initialize(source, JAVASCRIPT);
   ASSERT(target_stack_ == NULL);
 
   // Compute the parsing mode.
@@ -1286,15 +1290,15 @@
 FunctionLiteral* Parser::ParseLazy(Handle<String> source,
                                    Handle<String> name,
                                    int start_position,
+                                   int end_position,
                                    bool is_expression) {
   CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
   HistogramTimerScope timer(&Counters::parse_lazy);
-  source->TryFlattenIfNotFlat();
   Counters::total_parse_size.Increment(source->length());
-  SafeStringInputBuffer buffer(source.location());
 
   // Initialize parser state.
-  scanner_.Init(source, &buffer, start_position, JAVASCRIPT);
+  source->TryFlatten();
+  scanner_.Initialize(source, start_position, end_position, JAVASCRIPT);
   ASSERT(target_stack_ == NULL);
   mode_ = PARSE_EAGERLY;
 
@@ -1330,16 +1334,15 @@
   return result;
 }
 
-FunctionLiteral* Parser::ParseJson(Handle<String> source,
-                                   unibrow::CharacterStream* stream) {
+FunctionLiteral* Parser::ParseJson(Handle<String> source) {
   CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
 
   HistogramTimerScope timer(&Counters::parse);
   Counters::total_parse_size.Increment(source->length());
 
   // Initialize parser state.
-  source->TryFlattenIfNotFlat();
-  scanner_.Init(source, stream, 0, JSON);
+  source->TryFlatten(TENURED);
+  scanner_.Initialize(source, JSON);
   ASSERT(target_stack_ == NULL);
 
   FunctionLiteral* result = NULL;
@@ -1584,13 +1587,15 @@
   }
 
   void HandleThisPropertyAssignment(Scope* scope, Assignment* assignment) {
-    // Check that the property assigned to is a named property.
+    // Check that the property assigned to is a named property, which is not
+    // __proto__.
     Property* property = assignment->target()->AsProperty();
     ASSERT(property != NULL);
     Literal* literal = property->key()->AsLiteral();
     uint32_t dummy;
     if (literal != NULL &&
         literal->handle()->IsString() &&
+        !String::cast(*(literal->handle()))->Equals(Heap::Proto_symbol()) &&
         !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
       Handle<String> key = Handle<String>::cast(literal->handle());
 
@@ -1954,27 +1959,24 @@
       extension_->GetNativeFunction(v8::Utils::ToLocal(name));
   ASSERT(!fun_template.IsEmpty());
 
-  // Instantiate the function and create a boilerplate function from it.
+  // Instantiate the function and create a shared function info from it.
   Handle<JSFunction> fun = Utils::OpenHandle(*fun_template->GetFunction());
   const int literals = fun->NumberOfLiterals();
   Handle<Code> code = Handle<Code>(fun->shared()->code());
   Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
-  Handle<JSFunction> boilerplate =
-      Factory::NewFunctionBoilerplate(name, literals, code);
-  boilerplate->shared()->set_construct_stub(*construct_stub);
+  Handle<SharedFunctionInfo> shared =
+      Factory::NewSharedFunctionInfo(name, literals, code);
+  shared->set_construct_stub(*construct_stub);
 
-  // Copy the function data to the boilerplate. Used by
-  // builtins.cc:HandleApiCall to perform argument type checks and to
-  // find the right native code to call.
-  boilerplate->shared()->set_function_data(fun->shared()->function_data());
+  // Copy the function data to the shared function info.
+  shared->set_function_data(fun->shared()->function_data());
   int parameters = fun->shared()->formal_parameter_count();
-  boilerplate->shared()->set_formal_parameter_count(parameters);
+  shared->set_formal_parameter_count(parameters);
 
   // TODO(1240846): It's weird that native function declarations are
   // introduced dynamically when we meet their declarations, whereas
   // other functions are setup when entering the surrounding scope.
-  FunctionBoilerplateLiteral* lit =
-      NEW(FunctionBoilerplateLiteral(boilerplate));
+  SharedFunctionInfoLiteral* lit = NEW(SharedFunctionInfoLiteral(shared));
   VariableProxy* var = Declare(name, Variable::VAR, NULL, true, CHECK_OK);
   return NEW(ExpressionStatement(
       new Assignment(Token::INIT_VAR, var, lit, RelocInfo::kNoPosition)));
@@ -2643,6 +2645,7 @@
   }
 
   Expression* cond = ParseExpression(true, CHECK_OK);
+  if (cond != NULL) cond->set_is_loop_condition(true);
   Expect(Token::RPAREN, CHECK_OK);
 
   // Allow do-statements to be terminated with and without
@@ -2652,6 +2655,9 @@
   if (peek() == Token::SEMICOLON) Consume(Token::SEMICOLON);
 
   if (loop != NULL) loop->Initialize(cond, body);
+
+  seen_loop_stmt_ = true;
+
   return loop;
 }
 
@@ -2666,10 +2672,14 @@
   Expect(Token::WHILE, CHECK_OK);
   Expect(Token::LPAREN, CHECK_OK);
   Expression* cond = ParseExpression(true, CHECK_OK);
+  if (cond != NULL) cond->set_is_loop_condition(true);
   Expect(Token::RPAREN, CHECK_OK);
   Statement* body = ParseStatement(NULL, CHECK_OK);
 
   if (loop != NULL) loop->Initialize(cond, body);
+
+  seen_loop_stmt_ = true;
+
   return loop;
 }
 
@@ -2703,6 +2713,9 @@
           Block* result = NEW(Block(NULL, 2, false));
           result->AddStatement(variable_statement);
           result->AddStatement(loop);
+
+          seen_loop_stmt_ = true;
+
           // Parsed for-in loop w/ variable/const declaration.
           return result;
         }
@@ -2732,6 +2745,8 @@
         Statement* body = ParseStatement(NULL, CHECK_OK);
         if (loop) loop->Initialize(expression, enumerable, body);
 
+        seen_loop_stmt_ = true;
+
         // Parsed for-in loop.
         return loop;
 
@@ -2751,9 +2766,7 @@
   Expression* cond = NULL;
   if (peek() != Token::SEMICOLON) {
     cond = ParseExpression(true, CHECK_OK);
-    if (cond && cond->AsCompareOperation()) {
-      cond->AsCompareOperation()->set_is_for_loop_condition();
-    }
+    if (cond != NULL) cond->set_is_loop_condition(true);
   }
   Expect(Token::SEMICOLON, CHECK_OK);
 
@@ -2764,9 +2777,17 @@
   }
   Expect(Token::RPAREN, CHECK_OK);
 
+  seen_loop_stmt_ = false;
+
   Statement* body = ParseStatement(NULL, CHECK_OK);
 
+  // Mark this loop if it is an inner loop.
+  if (loop && !seen_loop_stmt_) loop->set_peel_this_loop(true);
+
   if (loop) loop->Initialize(init, cond, next, body);
+
+  seen_loop_stmt_ = true;
+
   return loop;
 }
 
@@ -3258,7 +3279,6 @@
         result = VariableProxySentinel::this_proxy();
       } else {
         VariableProxy* recv = top_scope_->receiver();
-        recv->var_uses()->RecordRead(1);
         result = recv;
       }
       break;
@@ -3447,7 +3467,11 @@
   ObjectLiteral* object_literal = expression->AsObjectLiteral();
   if (object_literal != NULL) {
     ASSERT(object_literal->is_simple());
-    result->set(kTypeSlot, Smi::FromInt(OBJECT_LITERAL));
+    if (object_literal->fast_elements()) {
+      result->set(kTypeSlot, Smi::FromInt(OBJECT_LITERAL_FAST_ELEMENTS));
+    } else {
+      result->set(kTypeSlot, Smi::FromInt(OBJECT_LITERAL_SLOW_ELEMENTS));
+    }
     result->set(kElementsSlot, *object_literal->constant_properties());
   } else {
     ArrayLiteral* array_literal = expression->AsArrayLiteral();
@@ -3485,11 +3509,14 @@
     ZoneList<ObjectLiteral::Property*>* properties,
     Handle<FixedArray> constant_properties,
     bool* is_simple,
+    bool* fast_elements,
     int* depth) {
   int position = 0;
   // Accumulate the value in local variables and store it at the end.
   bool is_simple_acc = true;
   int depth_acc = 1;
+  uint32_t max_element_index = 0;
+  uint32_t elements = 0;
   for (int i = 0; i < properties->length(); i++) {
     ObjectLiteral::Property* property = properties->at(i);
     if (!IsBoilerplateProperty(property)) {
@@ -3508,11 +3535,31 @@
     Handle<Object> value = GetBoilerplateValue(property->value());
     is_simple_acc = is_simple_acc && !value->IsUndefined();
 
+    // Keep track of the number of elements in the object literal and
+    // the largest element index.  If the largest element index is
+    // much larger than the number of elements, creating an object
+    // literal with fast elements will be a waste of space.
+    uint32_t element_index = 0;
+    if (key->IsString()
+        && Handle<String>::cast(key)->AsArrayIndex(&element_index)
+        && element_index > max_element_index) {
+      max_element_index = element_index;
+      elements++;
+    } else if (key->IsSmi()) {
+      int key_value = Smi::cast(*key)->value();
+      if (key_value > 0
+          && static_cast<uint32_t>(key_value) > max_element_index) {
+        max_element_index = key_value;
+      }
+      elements++;
+    }
+
     // Add name, value pair to the fixed array.
     constant_properties->set(position++, *key);
     constant_properties->set(position++, *value);
   }
-
+  *fast_elements =
+      (max_element_index <= 32) || ((2 * elements) >= max_element_index);
   *is_simple = is_simple_acc;
   *depth = depth_acc;
 }
@@ -3610,15 +3657,18 @@
       Factory::NewFixedArray(number_of_boilerplate_properties * 2, TENURED);
 
   bool is_simple = true;
+  bool fast_elements = true;
   int depth = 1;
   BuildObjectLiteralConstantProperties(properties.elements(),
                                        constant_properties,
                                        &is_simple,
+                                       &fast_elements,
                                        &depth);
   return new ObjectLiteral(constant_properties,
                            properties.elements(),
                            literal_index,
                            is_simple,
+                           fast_elements,
                            depth);
 }
 
@@ -3682,6 +3732,9 @@
   // Function ::
   //   '(' FormalParameterList? ')' '{' FunctionBody '}'
 
+  // Reset flag used for inner loop detection.
+  seen_loop_stmt_ = false;
+
   bool is_named = !var_name.is_null();
 
   // The name associated with this function. If it's a function expression,
@@ -3792,6 +3845,12 @@
     if (!is_pre_parsing_) {
       function_literal->set_function_token_position(function_token_position);
     }
+
+    // Set flag for inner loop detection. We treat loops that contain a function
+    // literal not as inner loops because we avoid duplicating function literals
+    // when peeling or unrolling such a loop.
+    seen_loop_stmt_ = true;
+
     return function_literal;
   }
 }
@@ -3834,7 +3893,27 @@
     }
   }
 
-  // Otherwise we have a runtime call.
+  // Check that the expected number arguments are passed to runtime functions.
+  if (!is_pre_parsing_) {
+    if (function != NULL
+        && function->nargs != -1
+        && function->nargs != args->length()) {
+      ReportMessage("illegal_access", Vector<const char*>::empty());
+      *ok = false;
+      return NULL;
+    } else if (function == NULL && !name.is_null()) {
+      // If this is not a runtime function implemented in C++ it might be an
+      // inlined runtime function.
+      int argc = CodeGenerator::InlineRuntimeCallArgumentsCount(name);
+      if (argc != -1 && argc != args->length()) {
+        ReportMessage("illegal_access", Vector<const char*>::empty());
+        *ok = false;
+        return NULL;
+      }
+    }
+  }
+
+  // Otherwise we have a valid runtime call.
   return NEW(CallRuntime(name, function, args));
 }
 
@@ -4125,15 +4204,18 @@
   Handle<FixedArray> constant_properties =
         Factory::NewFixedArray(boilerplate_properties * 2, TENURED);
   bool is_simple = true;
+  bool fast_elements = true;
   int depth = 1;
   BuildObjectLiteralConstantProperties(properties.elements(),
                                        constant_properties,
                                        &is_simple,
+                                       &fast_elements,
                                        &depth);
   return new ObjectLiteral(constant_properties,
                            properties.elements(),
                            literal_index,
                            is_simple,
+                           fast_elements,
                            depth);
 }
 
@@ -5065,13 +5147,12 @@
     return NULL;
   }
   Handle<String> source = Handle<String>(String::cast(script->source()));
-  SafeStringInputBuffer input(source.location());
   FunctionLiteral* result;
   if (is_json) {
     ASSERT(compile_in_global_context);
-    result = parser.ParseJson(source, &input);
+    result = parser.ParseJson(source);
   } else {
-    result = parser.ParseProgram(source, &input, compile_in_global_context);
+    result = parser.ParseProgram(source, compile_in_global_context);
   }
   return result;
 }
@@ -5086,13 +5167,11 @@
   always_allow_natives_syntax = true;
   AstBuildingParser parser(script, true, NULL, NULL);  // always allow
   always_allow_natives_syntax = allow_natives_syntax_before;
-  // Parse the function by pulling the function source from the script source.
+  // Parse the function by pointing to the function source in the script source.
   Handle<String> script_source(String::cast(script->source()));
   FunctionLiteral* result =
-      parser.ParseLazy(SubString(script_source, start_position, end_position),
-                       name,
-                       start_position,
-                       is_expression);
+      parser.ParseLazy(script_source, name,
+                       start_position, end_position, is_expression);
   return result;
 }
 
diff --git a/src/parser.h b/src/parser.h
index 0f808d7..2e5daf9 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -168,7 +168,8 @@
 class CompileTimeValue: public AllStatic {
  public:
   enum Type {
-    OBJECT_LITERAL,
+    OBJECT_LITERAL_FAST_ELEMENTS,
+    OBJECT_LITERAL_SLOW_ELEMENTS,
     ARRAY_LITERAL
   };
 
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index a978777..67e52ce 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -568,6 +568,9 @@
 
   TickSample sample;
 
+  // We always sample the VM state.
+  sample.state = VMState::current_state();
+
   // If profiling, we extract the current pc and sp.
   if (active_sampler_->IsProfiling()) {
     // Extracting the sample from the context is extremely machine dependent.
@@ -589,9 +592,6 @@
     active_sampler_->SampleStack(&sample);
   }
 
-  // We always sample the VM state.
-  sample.state = Logger::state();
-
   active_sampler_->Tick(&sample);
 }
 
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 8cc513d..b28597d 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -159,7 +159,7 @@
 #elif V8_TARGET_ARCH_MIPS
   return 8;
 #endif
-  // With gcc 4.4 the tree vectorization optimiser can generate code
+  // With gcc 4.4 the tree vectorization optimizer can generate code
   // that requires 16 byte alignment such as movdqa on x86.
   return 16;
 }
@@ -332,8 +332,8 @@
     if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
 
     int c;
-    if (attr_r == 'r' && attr_x == 'x') {
-      // Found a readable and executable entry. Skip characters until we reach
+    if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
+      // Found a read-only executable entry. Skip characters until we reach
       // the beginning of the filename or the end of the line.
       do {
         c = getc(fp);
@@ -727,44 +727,46 @@
   if (signal != SIGPROF) return;
   if (active_sampler_ == NULL) return;
 
-  TickSample sample;
+  TickSample sample_obj;
+  TickSample* sample = CpuProfiler::TickSampleEvent();
+  if (sample == NULL) sample = &sample_obj;
 
+  // We always sample the VM state.
+  sample->state = VMState::current_state();
   // If profiling, we extract the current pc and sp.
   if (active_sampler_->IsProfiling()) {
     // Extracting the sample from the context is extremely machine dependent.
     ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
     mcontext_t& mcontext = ucontext->uc_mcontext;
 #if V8_HOST_ARCH_IA32
-    sample.pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
-    sample.sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
-    sample.fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
+    sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
+    sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
+    sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
 #elif V8_HOST_ARCH_X64
-    sample.pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
-    sample.sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
-    sample.fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
+    sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
+    sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
+    sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
 #elif V8_HOST_ARCH_ARM
 // An undefined macro evaluates to 0, so this applies to Android's Bionic also.
 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
-    sample.pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
-    sample.sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
-    sample.fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
+    sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
+    sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
+    sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
 #else
-    sample.pc = reinterpret_cast<Address>(mcontext.arm_pc);
-    sample.sp = reinterpret_cast<Address>(mcontext.arm_sp);
-    sample.fp = reinterpret_cast<Address>(mcontext.arm_fp);
+    sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
+    sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
+    sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
 #endif
 #elif V8_HOST_ARCH_MIPS
     // Implement this on MIPS.
     UNIMPLEMENTED();
 #endif
-    if (IsVmThread())
-      active_sampler_->SampleStack(&sample);
+    if (IsVmThread()) {
+      active_sampler_->SampleStack(sample);
+    }
   }
 
-  // We always sample the VM state.
-  sample.state = Logger::state();
-
-  active_sampler_->Tick(&sample);
+  active_sampler_->Tick(sample);
 #endif
 }
 
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 27f0141..5516909 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -278,12 +278,6 @@
 
 
 int OS::StackWalk(Vector<StackFrame> frames) {
-#ifdef ANDROID
-  // For some reason the weak linkage doesn't work when building mksnapshot
-  // for android on macos. Just bail out as if we're on 10.4. We don't need
-  // stack walking for mksnapshot.
-  return 0;
-#else
   // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
   if (backtrace == NULL)
     return 0;
@@ -315,7 +309,6 @@
   free(symbols);
 
   return frames_count;
-#endif // ANDROID
 }
 
 
@@ -551,10 +544,14 @@
 
   // Sampler thread handler.
   void Runner() {
-    // Loop until the sampler is disengaged.
-    while (sampler_->IsActive()) {
-      TickSample sample;
+    // Loop until the sampler is disengaged, keeping the specified samling freq.
+    for ( ; sampler_->IsActive(); OS::Sleep(sampler_->interval_)) {
+      TickSample sample_obj;
+      TickSample* sample = CpuProfiler::TickSampleEvent();
+      if (sample == NULL) sample = &sample_obj;
 
+      // We always sample the VM state.
+      sample->state = VMState::current_state();
       // If profiling, we record the pc and sp of the profiled thread.
       if (sampler_->IsProfiling()
           && KERN_SUCCESS == thread_suspend(profiled_thread_)) {
@@ -584,21 +581,16 @@
                              flavor,
                              reinterpret_cast<natural_t*>(&state),
                              &count) == KERN_SUCCESS) {
-          sample.pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
-          sample.sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
-          sample.fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
-          sampler_->SampleStack(&sample);
+          sample->pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
+          sample->sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
+          sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
+          sampler_->SampleStack(sample);
         }
         thread_resume(profiled_thread_);
       }
 
-      // We always sample the VM state.
-      sample.state = Logger::state();
       // Invoke tick handler with program counter and stack pointer.
-      sampler_->Tick(&sample);
-
-      // Wait until next sampling.
-      usleep(sampler_->interval_ * 1000);
+      sampler_->Tick(sample);
     }
   }
 };
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index f96e769..e3ae867 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -542,7 +542,7 @@
   TickSample sample;
 
   // We always sample the VM state.
-  sample.state = Logger::state();
+  sample.state = VMState::current_state();
 
   active_sampler_->Tick(&sample);
 }
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index 85c2c54..1fa652d 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -533,7 +533,7 @@
   sample.fp = 0;
 
   // We always sample the VM state.
-  sample.state = Logger::state();
+  sample.state = VMState::current_state();
 
   active_sampler_->Tick(&sample);
 }
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 7ea3ce9..d03a0a9 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -1803,36 +1803,35 @@
     // Context used for sampling the register state of the profiled thread.
     CONTEXT context;
     memset(&context, 0, sizeof(context));
-    // Loop until the sampler is disengaged.
-    while (sampler_->IsActive()) {
-      TickSample sample;
+    // Loop until the sampler is disengaged, keeping the specified samling freq.
+    for ( ; sampler_->IsActive(); Sleep(sampler_->interval_)) {
+      TickSample sample_obj;
+      TickSample* sample = CpuProfiler::TickSampleEvent();
+      if (sample == NULL) sample = &sample_obj;
 
+      // We always sample the VM state.
+      sample->state = VMState::current_state();
       // If profiling, we record the pc and sp of the profiled thread.
       if (sampler_->IsProfiling()
           && SuspendThread(profiled_thread_) != (DWORD)-1) {
         context.ContextFlags = CONTEXT_FULL;
         if (GetThreadContext(profiled_thread_, &context) != 0) {
 #if V8_HOST_ARCH_X64
-          sample.pc = reinterpret_cast<Address>(context.Rip);
-          sample.sp = reinterpret_cast<Address>(context.Rsp);
-          sample.fp = reinterpret_cast<Address>(context.Rbp);
+          sample->pc = reinterpret_cast<Address>(context.Rip);
+          sample->sp = reinterpret_cast<Address>(context.Rsp);
+          sample->fp = reinterpret_cast<Address>(context.Rbp);
 #else
-          sample.pc = reinterpret_cast<Address>(context.Eip);
-          sample.sp = reinterpret_cast<Address>(context.Esp);
-          sample.fp = reinterpret_cast<Address>(context.Ebp);
+          sample->pc = reinterpret_cast<Address>(context.Eip);
+          sample->sp = reinterpret_cast<Address>(context.Esp);
+          sample->fp = reinterpret_cast<Address>(context.Ebp);
 #endif
-          sampler_->SampleStack(&sample);
+          sampler_->SampleStack(sample);
         }
         ResumeThread(profiled_thread_);
       }
 
-      // We always sample the VM state.
-      sample.state = Logger::state();
       // Invoke tick handler with program counter and stack pointer.
-      sampler_->Tick(&sample);
-
-      // Wait until next sampling.
-      Sleep(sampler_->interval_);
+      sampler_->Tick(sample);
     }
   }
 };
diff --git a/src/platform.h b/src/platform.h
index bc2e9d6..82e2e3c 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -114,6 +114,10 @@
 namespace v8 {
 namespace internal {
 
+// Use AtomicWord for a machine-sized pointer. It is assumed that
+// reads and writes of naturally aligned values of this type are atomic.
+typedef intptr_t AtomicWord;
+
 class Semaphore;
 
 double ceiling(double x);
@@ -501,7 +505,6 @@
 };
 
 
-#ifdef ENABLE_LOGGING_AND_PROFILING
 // ----------------------------------------------------------------------------
 // Sampler
 //
@@ -513,22 +516,23 @@
 class TickSample {
  public:
   TickSample()
-      : pc(NULL),
+      : state(OTHER),
+        pc(NULL),
         sp(NULL),
         fp(NULL),
         function(NULL),
-        state(OTHER),
         frames_count(0) {}
+  StateTag state;  // The state of the VM.
   Address pc;  // Instruction pointer.
   Address sp;  // Stack pointer.
   Address fp;  // Frame pointer.
   Address function;  // The last called JS function.
-  StateTag state;  // The state of the VM.
-  static const int kMaxFramesCount = 100;
-  EmbeddedVector<Address, kMaxFramesCount> stack;  // Call stack.
+  static const int kMaxFramesCount = 64;
+  Address stack[kMaxFramesCount];  // Call stack.
   int frames_count;  // Number of captured frames.
 };
 
+#ifdef ENABLE_LOGGING_AND_PROFILING
 class Sampler {
  public:
   // Initialize sampler.
diff --git a/src/powers-ten.h b/src/powers-ten.h
new file mode 100644
index 0000000..93d92d9
--- /dev/null
+++ b/src/powers-ten.h
@@ -0,0 +1,2461 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// ------------ GENERATED FILE ----------------
+// command used:
+//  tools/generate-ten-powers --from -308 --to 342 --mantissa-size 64 --round round -o src/powers-ten.h  // NOLINT
+
+// This file is intended to be included inside another .h or .cc files
+// with the following defines set:
+//  GRISU_CACHE_STRUCT: should expand to the name of a struct that will
+//   hold the cached powers of ten. Each entry will hold a 64-bit
+//   significand, a 16-bit signed binary exponent, and a 16-bit
+//   signed decimal exponent. Each entry will be constructed as follows:
+//      { significand, binary_exponent, decimal_exponent }.
+//  GRISU_CACHE_NAME(i): generates the name for the different caches.
+//   The parameter i will be a number in the range 1-20. A cache will
+//   hold every i'th element of a full cache. GRISU_CACHE_NAME(1) will
+//   thus hold all elements. The higher i the fewer elements it has.
+//   Ideally the user should only reference one cache and let the
+//   compiler remove the unused ones.
+//  GRISU_CACHE_MAX_DISTANCE(i): generates the name for the maximum
+//   binary exponent distance between all elements of a given cache.
+//  GRISU_CACHE_OFFSET: is used as variable name for the decimal
+//   exponent offset. It is equal to -cache[0].decimal_exponent.
+//  GRISU_UINT64_C: used to construct 64-bit values in a platform
+//   independent way. In order to encode 0x123456789ABCDEF0 the macro
+//   will be invoked as follows: GRISU_UINT64_C(0x12345678,9ABCDEF0).
+
+
+static const GRISU_CACHE_STRUCT GRISU_CACHE_NAME(1)[] = {
+  {GRISU_UINT64_C(0xe61acf03, 3d1a45df), -1087, -308},
+  {GRISU_UINT64_C(0x8fd0c162, 06306bac), -1083, -307},
+  {GRISU_UINT64_C(0xb3c4f1ba, 87bc8697), -1080, -306},
+  {GRISU_UINT64_C(0xe0b62e29, 29aba83c), -1077, -305},
+  {GRISU_UINT64_C(0x8c71dcd9, ba0b4926), -1073, -304},
+  {GRISU_UINT64_C(0xaf8e5410, 288e1b6f), -1070, -303},
+  {GRISU_UINT64_C(0xdb71e914, 32b1a24b), -1067, -302},
+  {GRISU_UINT64_C(0x892731ac, 9faf056f), -1063, -301},
+  {GRISU_UINT64_C(0xab70fe17, c79ac6ca), -1060, -300},
+  {GRISU_UINT64_C(0xd64d3d9d, b981787d), -1057, -299},
+  {GRISU_UINT64_C(0x85f04682, 93f0eb4e), -1053, -298},
+  {GRISU_UINT64_C(0xa76c5823, 38ed2622), -1050, -297},
+  {GRISU_UINT64_C(0xd1476e2c, 07286faa), -1047, -296},
+  {GRISU_UINT64_C(0x82cca4db, 847945ca), -1043, -295},
+  {GRISU_UINT64_C(0xa37fce12, 6597973d), -1040, -294},
+  {GRISU_UINT64_C(0xcc5fc196, fefd7d0c), -1037, -293},
+  {GRISU_UINT64_C(0xff77b1fc, bebcdc4f), -1034, -292},
+  {GRISU_UINT64_C(0x9faacf3d, f73609b1), -1030, -291},
+  {GRISU_UINT64_C(0xc795830d, 75038c1e), -1027, -290},
+  {GRISU_UINT64_C(0xf97ae3d0, d2446f25), -1024, -289},
+  {GRISU_UINT64_C(0x9becce62, 836ac577), -1020, -288},
+  {GRISU_UINT64_C(0xc2e801fb, 244576d5), -1017, -287},
+  {GRISU_UINT64_C(0xf3a20279, ed56d48a), -1014, -286},
+  {GRISU_UINT64_C(0x9845418c, 345644d7), -1010, -285},
+  {GRISU_UINT64_C(0xbe5691ef, 416bd60c), -1007, -284},
+  {GRISU_UINT64_C(0xedec366b, 11c6cb8f), -1004, -283},
+  {GRISU_UINT64_C(0x94b3a202, eb1c3f39), -1000, -282},
+  {GRISU_UINT64_C(0xb9e08a83, a5e34f08), -997, -281},
+  {GRISU_UINT64_C(0xe858ad24, 8f5c22ca), -994, -280},
+  {GRISU_UINT64_C(0x91376c36, d99995be), -990, -279},
+  {GRISU_UINT64_C(0xb5854744, 8ffffb2e), -987, -278},
+  {GRISU_UINT64_C(0xe2e69915, b3fff9f9), -984, -277},
+  {GRISU_UINT64_C(0x8dd01fad, 907ffc3c), -980, -276},
+  {GRISU_UINT64_C(0xb1442798, f49ffb4b), -977, -275},
+  {GRISU_UINT64_C(0xdd95317f, 31c7fa1d), -974, -274},
+  {GRISU_UINT64_C(0x8a7d3eef, 7f1cfc52), -970, -273},
+  {GRISU_UINT64_C(0xad1c8eab, 5ee43b67), -967, -272},
+  {GRISU_UINT64_C(0xd863b256, 369d4a41), -964, -271},
+  {GRISU_UINT64_C(0x873e4f75, e2224e68), -960, -270},
+  {GRISU_UINT64_C(0xa90de353, 5aaae202), -957, -269},
+  {GRISU_UINT64_C(0xd3515c28, 31559a83), -954, -268},
+  {GRISU_UINT64_C(0x8412d999, 1ed58092), -950, -267},
+  {GRISU_UINT64_C(0xa5178fff, 668ae0b6), -947, -266},
+  {GRISU_UINT64_C(0xce5d73ff, 402d98e4), -944, -265},
+  {GRISU_UINT64_C(0x80fa687f, 881c7f8e), -940, -264},
+  {GRISU_UINT64_C(0xa139029f, 6a239f72), -937, -263},
+  {GRISU_UINT64_C(0xc9874347, 44ac874f), -934, -262},
+  {GRISU_UINT64_C(0xfbe91419, 15d7a922), -931, -261},
+  {GRISU_UINT64_C(0x9d71ac8f, ada6c9b5), -927, -260},
+  {GRISU_UINT64_C(0xc4ce17b3, 99107c23), -924, -259},
+  {GRISU_UINT64_C(0xf6019da0, 7f549b2b), -921, -258},
+  {GRISU_UINT64_C(0x99c10284, 4f94e0fb), -917, -257},
+  {GRISU_UINT64_C(0xc0314325, 637a193a), -914, -256},
+  {GRISU_UINT64_C(0xf03d93ee, bc589f88), -911, -255},
+  {GRISU_UINT64_C(0x96267c75, 35b763b5), -907, -254},
+  {GRISU_UINT64_C(0xbbb01b92, 83253ca3), -904, -253},
+  {GRISU_UINT64_C(0xea9c2277, 23ee8bcb), -901, -252},
+  {GRISU_UINT64_C(0x92a1958a, 7675175f), -897, -251},
+  {GRISU_UINT64_C(0xb749faed, 14125d37), -894, -250},
+  {GRISU_UINT64_C(0xe51c79a8, 5916f485), -891, -249},
+  {GRISU_UINT64_C(0x8f31cc09, 37ae58d3), -887, -248},
+  {GRISU_UINT64_C(0xb2fe3f0b, 8599ef08), -884, -247},
+  {GRISU_UINT64_C(0xdfbdcece, 67006ac9), -881, -246},
+  {GRISU_UINT64_C(0x8bd6a141, 006042be), -877, -245},
+  {GRISU_UINT64_C(0xaecc4991, 4078536d), -874, -244},
+  {GRISU_UINT64_C(0xda7f5bf5, 90966849), -871, -243},
+  {GRISU_UINT64_C(0x888f9979, 7a5e012d), -867, -242},
+  {GRISU_UINT64_C(0xaab37fd7, d8f58179), -864, -241},
+  {GRISU_UINT64_C(0xd5605fcd, cf32e1d7), -861, -240},
+  {GRISU_UINT64_C(0x855c3be0, a17fcd26), -857, -239},
+  {GRISU_UINT64_C(0xa6b34ad8, c9dfc070), -854, -238},
+  {GRISU_UINT64_C(0xd0601d8e, fc57b08c), -851, -237},
+  {GRISU_UINT64_C(0x823c1279, 5db6ce57), -847, -236},
+  {GRISU_UINT64_C(0xa2cb1717, b52481ed), -844, -235},
+  {GRISU_UINT64_C(0xcb7ddcdd, a26da269), -841, -234},
+  {GRISU_UINT64_C(0xfe5d5415, 0b090b03), -838, -233},
+  {GRISU_UINT64_C(0x9efa548d, 26e5a6e2), -834, -232},
+  {GRISU_UINT64_C(0xc6b8e9b0, 709f109a), -831, -231},
+  {GRISU_UINT64_C(0xf867241c, 8cc6d4c1), -828, -230},
+  {GRISU_UINT64_C(0x9b407691, d7fc44f8), -824, -229},
+  {GRISU_UINT64_C(0xc2109436, 4dfb5637), -821, -228},
+  {GRISU_UINT64_C(0xf294b943, e17a2bc4), -818, -227},
+  {GRISU_UINT64_C(0x979cf3ca, 6cec5b5b), -814, -226},
+  {GRISU_UINT64_C(0xbd8430bd, 08277231), -811, -225},
+  {GRISU_UINT64_C(0xece53cec, 4a314ebe), -808, -224},
+  {GRISU_UINT64_C(0x940f4613, ae5ed137), -804, -223},
+  {GRISU_UINT64_C(0xb9131798, 99f68584), -801, -222},
+  {GRISU_UINT64_C(0xe757dd7e, c07426e5), -798, -221},
+  {GRISU_UINT64_C(0x9096ea6f, 3848984f), -794, -220},
+  {GRISU_UINT64_C(0xb4bca50b, 065abe63), -791, -219},
+  {GRISU_UINT64_C(0xe1ebce4d, c7f16dfc), -788, -218},
+  {GRISU_UINT64_C(0x8d3360f0, 9cf6e4bd), -784, -217},
+  {GRISU_UINT64_C(0xb080392c, c4349ded), -781, -216},
+  {GRISU_UINT64_C(0xdca04777, f541c568), -778, -215},
+  {GRISU_UINT64_C(0x89e42caa, f9491b61), -774, -214},
+  {GRISU_UINT64_C(0xac5d37d5, b79b6239), -771, -213},
+  {GRISU_UINT64_C(0xd77485cb, 25823ac7), -768, -212},
+  {GRISU_UINT64_C(0x86a8d39e, f77164bd), -764, -211},
+  {GRISU_UINT64_C(0xa8530886, b54dbdec), -761, -210},
+  {GRISU_UINT64_C(0xd267caa8, 62a12d67), -758, -209},
+  {GRISU_UINT64_C(0x8380dea9, 3da4bc60), -754, -208},
+  {GRISU_UINT64_C(0xa4611653, 8d0deb78), -751, -207},
+  {GRISU_UINT64_C(0xcd795be8, 70516656), -748, -206},
+  {GRISU_UINT64_C(0x806bd971, 4632dff6), -744, -205},
+  {GRISU_UINT64_C(0xa086cfcd, 97bf97f4), -741, -204},
+  {GRISU_UINT64_C(0xc8a883c0, fdaf7df0), -738, -203},
+  {GRISU_UINT64_C(0xfad2a4b1, 3d1b5d6c), -735, -202},
+  {GRISU_UINT64_C(0x9cc3a6ee, c6311a64), -731, -201},
+  {GRISU_UINT64_C(0xc3f490aa, 77bd60fd), -728, -200},
+  {GRISU_UINT64_C(0xf4f1b4d5, 15acb93c), -725, -199},
+  {GRISU_UINT64_C(0x99171105, 2d8bf3c5), -721, -198},
+  {GRISU_UINT64_C(0xbf5cd546, 78eef0b7), -718, -197},
+  {GRISU_UINT64_C(0xef340a98, 172aace5), -715, -196},
+  {GRISU_UINT64_C(0x9580869f, 0e7aac0f), -711, -195},
+  {GRISU_UINT64_C(0xbae0a846, d2195713), -708, -194},
+  {GRISU_UINT64_C(0xe998d258, 869facd7), -705, -193},
+  {GRISU_UINT64_C(0x91ff8377, 5423cc06), -701, -192},
+  {GRISU_UINT64_C(0xb67f6455, 292cbf08), -698, -191},
+  {GRISU_UINT64_C(0xe41f3d6a, 7377eeca), -695, -190},
+  {GRISU_UINT64_C(0x8e938662, 882af53e), -691, -189},
+  {GRISU_UINT64_C(0xb23867fb, 2a35b28e), -688, -188},
+  {GRISU_UINT64_C(0xdec681f9, f4c31f31), -685, -187},
+  {GRISU_UINT64_C(0x8b3c113c, 38f9f37f), -681, -186},
+  {GRISU_UINT64_C(0xae0b158b, 4738705f), -678, -185},
+  {GRISU_UINT64_C(0xd98ddaee, 19068c76), -675, -184},
+  {GRISU_UINT64_C(0x87f8a8d4, cfa417ca), -671, -183},
+  {GRISU_UINT64_C(0xa9f6d30a, 038d1dbc), -668, -182},
+  {GRISU_UINT64_C(0xd47487cc, 8470652b), -665, -181},
+  {GRISU_UINT64_C(0x84c8d4df, d2c63f3b), -661, -180},
+  {GRISU_UINT64_C(0xa5fb0a17, c777cf0a), -658, -179},
+  {GRISU_UINT64_C(0xcf79cc9d, b955c2cc), -655, -178},
+  {GRISU_UINT64_C(0x81ac1fe2, 93d599c0), -651, -177},
+  {GRISU_UINT64_C(0xa21727db, 38cb0030), -648, -176},
+  {GRISU_UINT64_C(0xca9cf1d2, 06fdc03c), -645, -175},
+  {GRISU_UINT64_C(0xfd442e46, 88bd304b), -642, -174},
+  {GRISU_UINT64_C(0x9e4a9cec, 15763e2f), -638, -173},
+  {GRISU_UINT64_C(0xc5dd4427, 1ad3cdba), -635, -172},
+  {GRISU_UINT64_C(0xf7549530, e188c129), -632, -171},
+  {GRISU_UINT64_C(0x9a94dd3e, 8cf578ba), -628, -170},
+  {GRISU_UINT64_C(0xc13a148e, 3032d6e8), -625, -169},
+  {GRISU_UINT64_C(0xf18899b1, bc3f8ca2), -622, -168},
+  {GRISU_UINT64_C(0x96f5600f, 15a7b7e5), -618, -167},
+  {GRISU_UINT64_C(0xbcb2b812, db11a5de), -615, -166},
+  {GRISU_UINT64_C(0xebdf6617, 91d60f56), -612, -165},
+  {GRISU_UINT64_C(0x936b9fce, bb25c996), -608, -164},
+  {GRISU_UINT64_C(0xb84687c2, 69ef3bfb), -605, -163},
+  {GRISU_UINT64_C(0xe65829b3, 046b0afa), -602, -162},
+  {GRISU_UINT64_C(0x8ff71a0f, e2c2e6dc), -598, -161},
+  {GRISU_UINT64_C(0xb3f4e093, db73a093), -595, -160},
+  {GRISU_UINT64_C(0xe0f218b8, d25088b8), -592, -159},
+  {GRISU_UINT64_C(0x8c974f73, 83725573), -588, -158},
+  {GRISU_UINT64_C(0xafbd2350, 644eead0), -585, -157},
+  {GRISU_UINT64_C(0xdbac6c24, 7d62a584), -582, -156},
+  {GRISU_UINT64_C(0x894bc396, ce5da772), -578, -155},
+  {GRISU_UINT64_C(0xab9eb47c, 81f5114f), -575, -154},
+  {GRISU_UINT64_C(0xd686619b, a27255a3), -572, -153},
+  {GRISU_UINT64_C(0x8613fd01, 45877586), -568, -152},
+  {GRISU_UINT64_C(0xa798fc41, 96e952e7), -565, -151},
+  {GRISU_UINT64_C(0xd17f3b51, fca3a7a1), -562, -150},
+  {GRISU_UINT64_C(0x82ef8513, 3de648c5), -558, -149},
+  {GRISU_UINT64_C(0xa3ab6658, 0d5fdaf6), -555, -148},
+  {GRISU_UINT64_C(0xcc963fee, 10b7d1b3), -552, -147},
+  {GRISU_UINT64_C(0xffbbcfe9, 94e5c620), -549, -146},
+  {GRISU_UINT64_C(0x9fd561f1, fd0f9bd4), -545, -145},
+  {GRISU_UINT64_C(0xc7caba6e, 7c5382c9), -542, -144},
+  {GRISU_UINT64_C(0xf9bd690a, 1b68637b), -539, -143},
+  {GRISU_UINT64_C(0x9c1661a6, 51213e2d), -535, -142},
+  {GRISU_UINT64_C(0xc31bfa0f, e5698db8), -532, -141},
+  {GRISU_UINT64_C(0xf3e2f893, dec3f126), -529, -140},
+  {GRISU_UINT64_C(0x986ddb5c, 6b3a76b8), -525, -139},
+  {GRISU_UINT64_C(0xbe895233, 86091466), -522, -138},
+  {GRISU_UINT64_C(0xee2ba6c0, 678b597f), -519, -137},
+  {GRISU_UINT64_C(0x94db4838, 40b717f0), -515, -136},
+  {GRISU_UINT64_C(0xba121a46, 50e4ddec), -512, -135},
+  {GRISU_UINT64_C(0xe896a0d7, e51e1566), -509, -134},
+  {GRISU_UINT64_C(0x915e2486, ef32cd60), -505, -133},
+  {GRISU_UINT64_C(0xb5b5ada8, aaff80b8), -502, -132},
+  {GRISU_UINT64_C(0xe3231912, d5bf60e6), -499, -131},
+  {GRISU_UINT64_C(0x8df5efab, c5979c90), -495, -130},
+  {GRISU_UINT64_C(0xb1736b96, b6fd83b4), -492, -129},
+  {GRISU_UINT64_C(0xddd0467c, 64bce4a1), -489, -128},
+  {GRISU_UINT64_C(0x8aa22c0d, bef60ee4), -485, -127},
+  {GRISU_UINT64_C(0xad4ab711, 2eb3929e), -482, -126},
+  {GRISU_UINT64_C(0xd89d64d5, 7a607745), -479, -125},
+  {GRISU_UINT64_C(0x87625f05, 6c7c4a8b), -475, -124},
+  {GRISU_UINT64_C(0xa93af6c6, c79b5d2e), -472, -123},
+  {GRISU_UINT64_C(0xd389b478, 79823479), -469, -122},
+  {GRISU_UINT64_C(0x843610cb, 4bf160cc), -465, -121},
+  {GRISU_UINT64_C(0xa54394fe, 1eedb8ff), -462, -120},
+  {GRISU_UINT64_C(0xce947a3d, a6a9273e), -459, -119},
+  {GRISU_UINT64_C(0x811ccc66, 8829b887), -455, -118},
+  {GRISU_UINT64_C(0xa163ff80, 2a3426a9), -452, -117},
+  {GRISU_UINT64_C(0xc9bcff60, 34c13053), -449, -116},
+  {GRISU_UINT64_C(0xfc2c3f38, 41f17c68), -446, -115},
+  {GRISU_UINT64_C(0x9d9ba783, 2936edc1), -442, -114},
+  {GRISU_UINT64_C(0xc5029163, f384a931), -439, -113},
+  {GRISU_UINT64_C(0xf64335bc, f065d37d), -436, -112},
+  {GRISU_UINT64_C(0x99ea0196, 163fa42e), -432, -111},
+  {GRISU_UINT64_C(0xc06481fb, 9bcf8d3a), -429, -110},
+  {GRISU_UINT64_C(0xf07da27a, 82c37088), -426, -109},
+  {GRISU_UINT64_C(0x964e858c, 91ba2655), -422, -108},
+  {GRISU_UINT64_C(0xbbe226ef, b628afeb), -419, -107},
+  {GRISU_UINT64_C(0xeadab0ab, a3b2dbe5), -416, -106},
+  {GRISU_UINT64_C(0x92c8ae6b, 464fc96f), -412, -105},
+  {GRISU_UINT64_C(0xb77ada06, 17e3bbcb), -409, -104},
+  {GRISU_UINT64_C(0xe5599087, 9ddcaabe), -406, -103},
+  {GRISU_UINT64_C(0x8f57fa54, c2a9eab7), -402, -102},
+  {GRISU_UINT64_C(0xb32df8e9, f3546564), -399, -101},
+  {GRISU_UINT64_C(0xdff97724, 70297ebd), -396, -100},
+  {GRISU_UINT64_C(0x8bfbea76, c619ef36), -392, -99},
+  {GRISU_UINT64_C(0xaefae514, 77a06b04), -389, -98},
+  {GRISU_UINT64_C(0xdab99e59, 958885c5), -386, -97},
+  {GRISU_UINT64_C(0x88b402f7, fd75539b), -382, -96},
+  {GRISU_UINT64_C(0xaae103b5, fcd2a882), -379, -95},
+  {GRISU_UINT64_C(0xd59944a3, 7c0752a2), -376, -94},
+  {GRISU_UINT64_C(0x857fcae6, 2d8493a5), -372, -93},
+  {GRISU_UINT64_C(0xa6dfbd9f, b8e5b88f), -369, -92},
+  {GRISU_UINT64_C(0xd097ad07, a71f26b2), -366, -91},
+  {GRISU_UINT64_C(0x825ecc24, c8737830), -362, -90},
+  {GRISU_UINT64_C(0xa2f67f2d, fa90563b), -359, -89},
+  {GRISU_UINT64_C(0xcbb41ef9, 79346bca), -356, -88},
+  {GRISU_UINT64_C(0xfea126b7, d78186bd), -353, -87},
+  {GRISU_UINT64_C(0x9f24b832, e6b0f436), -349, -86},
+  {GRISU_UINT64_C(0xc6ede63f, a05d3144), -346, -85},
+  {GRISU_UINT64_C(0xf8a95fcf, 88747d94), -343, -84},
+  {GRISU_UINT64_C(0x9b69dbe1, b548ce7d), -339, -83},
+  {GRISU_UINT64_C(0xc24452da, 229b021c), -336, -82},
+  {GRISU_UINT64_C(0xf2d56790, ab41c2a3), -333, -81},
+  {GRISU_UINT64_C(0x97c560ba, 6b0919a6), -329, -80},
+  {GRISU_UINT64_C(0xbdb6b8e9, 05cb600f), -326, -79},
+  {GRISU_UINT64_C(0xed246723, 473e3813), -323, -78},
+  {GRISU_UINT64_C(0x9436c076, 0c86e30c), -319, -77},
+  {GRISU_UINT64_C(0xb9447093, 8fa89bcf), -316, -76},
+  {GRISU_UINT64_C(0xe7958cb8, 7392c2c3), -313, -75},
+  {GRISU_UINT64_C(0x90bd77f3, 483bb9ba), -309, -74},
+  {GRISU_UINT64_C(0xb4ecd5f0, 1a4aa828), -306, -73},
+  {GRISU_UINT64_C(0xe2280b6c, 20dd5232), -303, -72},
+  {GRISU_UINT64_C(0x8d590723, 948a535f), -299, -71},
+  {GRISU_UINT64_C(0xb0af48ec, 79ace837), -296, -70},
+  {GRISU_UINT64_C(0xdcdb1b27, 98182245), -293, -69},
+  {GRISU_UINT64_C(0x8a08f0f8, bf0f156b), -289, -68},
+  {GRISU_UINT64_C(0xac8b2d36, eed2dac6), -286, -67},
+  {GRISU_UINT64_C(0xd7adf884, aa879177), -283, -66},
+  {GRISU_UINT64_C(0x86ccbb52, ea94baeb), -279, -65},
+  {GRISU_UINT64_C(0xa87fea27, a539e9a5), -276, -64},
+  {GRISU_UINT64_C(0xd29fe4b1, 8e88640f), -273, -63},
+  {GRISU_UINT64_C(0x83a3eeee, f9153e89), -269, -62},
+  {GRISU_UINT64_C(0xa48ceaaa, b75a8e2b), -266, -61},
+  {GRISU_UINT64_C(0xcdb02555, 653131b6), -263, -60},
+  {GRISU_UINT64_C(0x808e1755, 5f3ebf12), -259, -59},
+  {GRISU_UINT64_C(0xa0b19d2a, b70e6ed6), -256, -58},
+  {GRISU_UINT64_C(0xc8de0475, 64d20a8c), -253, -57},
+  {GRISU_UINT64_C(0xfb158592, be068d2f), -250, -56},
+  {GRISU_UINT64_C(0x9ced737b, b6c4183d), -246, -55},
+  {GRISU_UINT64_C(0xc428d05a, a4751e4d), -243, -54},
+  {GRISU_UINT64_C(0xf5330471, 4d9265e0), -240, -53},
+  {GRISU_UINT64_C(0x993fe2c6, d07b7fac), -236, -52},
+  {GRISU_UINT64_C(0xbf8fdb78, 849a5f97), -233, -51},
+  {GRISU_UINT64_C(0xef73d256, a5c0f77d), -230, -50},
+  {GRISU_UINT64_C(0x95a86376, 27989aae), -226, -49},
+  {GRISU_UINT64_C(0xbb127c53, b17ec159), -223, -48},
+  {GRISU_UINT64_C(0xe9d71b68, 9dde71b0), -220, -47},
+  {GRISU_UINT64_C(0x92267121, 62ab070e), -216, -46},
+  {GRISU_UINT64_C(0xb6b00d69, bb55c8d1), -213, -45},
+  {GRISU_UINT64_C(0xe45c10c4, 2a2b3b06), -210, -44},
+  {GRISU_UINT64_C(0x8eb98a7a, 9a5b04e3), -206, -43},
+  {GRISU_UINT64_C(0xb267ed19, 40f1c61c), -203, -42},
+  {GRISU_UINT64_C(0xdf01e85f, 912e37a3), -200, -41},
+  {GRISU_UINT64_C(0x8b61313b, babce2c6), -196, -40},
+  {GRISU_UINT64_C(0xae397d8a, a96c1b78), -193, -39},
+  {GRISU_UINT64_C(0xd9c7dced, 53c72256), -190, -38},
+  {GRISU_UINT64_C(0x881cea14, 545c7575), -186, -37},
+  {GRISU_UINT64_C(0xaa242499, 697392d3), -183, -36},
+  {GRISU_UINT64_C(0xd4ad2dbf, c3d07788), -180, -35},
+  {GRISU_UINT64_C(0x84ec3c97, da624ab5), -176, -34},
+  {GRISU_UINT64_C(0xa6274bbd, d0fadd62), -173, -33},
+  {GRISU_UINT64_C(0xcfb11ead, 453994ba), -170, -32},
+  {GRISU_UINT64_C(0x81ceb32c, 4b43fcf5), -166, -31},
+  {GRISU_UINT64_C(0xa2425ff7, 5e14fc32), -163, -30},
+  {GRISU_UINT64_C(0xcad2f7f5, 359a3b3e), -160, -29},
+  {GRISU_UINT64_C(0xfd87b5f2, 8300ca0e), -157, -28},
+  {GRISU_UINT64_C(0x9e74d1b7, 91e07e48), -153, -27},
+  {GRISU_UINT64_C(0xc6120625, 76589ddb), -150, -26},
+  {GRISU_UINT64_C(0xf79687ae, d3eec551), -147, -25},
+  {GRISU_UINT64_C(0x9abe14cd, 44753b53), -143, -24},
+  {GRISU_UINT64_C(0xc16d9a00, 95928a27), -140, -23},
+  {GRISU_UINT64_C(0xf1c90080, baf72cb1), -137, -22},
+  {GRISU_UINT64_C(0x971da050, 74da7bef), -133, -21},
+  {GRISU_UINT64_C(0xbce50864, 92111aeb), -130, -20},
+  {GRISU_UINT64_C(0xec1e4a7d, b69561a5), -127, -19},
+  {GRISU_UINT64_C(0x9392ee8e, 921d5d07), -123, -18},
+  {GRISU_UINT64_C(0xb877aa32, 36a4b449), -120, -17},
+  {GRISU_UINT64_C(0xe69594be, c44de15b), -117, -16},
+  {GRISU_UINT64_C(0x901d7cf7, 3ab0acd9), -113, -15},
+  {GRISU_UINT64_C(0xb424dc35, 095cd80f), -110, -14},
+  {GRISU_UINT64_C(0xe12e1342, 4bb40e13), -107, -13},
+  {GRISU_UINT64_C(0x8cbccc09, 6f5088cc), -103, -12},
+  {GRISU_UINT64_C(0xafebff0b, cb24aaff), -100, -11},
+  {GRISU_UINT64_C(0xdbe6fece, bdedd5bf), -97, -10},
+  {GRISU_UINT64_C(0x89705f41, 36b4a597), -93, -9},
+  {GRISU_UINT64_C(0xabcc7711, 8461cefd), -90, -8},
+  {GRISU_UINT64_C(0xd6bf94d5, e57a42bc), -87, -7},
+  {GRISU_UINT64_C(0x8637bd05, af6c69b6), -83, -6},
+  {GRISU_UINT64_C(0xa7c5ac47, 1b478423), -80, -5},
+  {GRISU_UINT64_C(0xd1b71758, e219652c), -77, -4},
+  {GRISU_UINT64_C(0x83126e97, 8d4fdf3b), -73, -3},
+  {GRISU_UINT64_C(0xa3d70a3d, 70a3d70a), -70, -2},
+  {GRISU_UINT64_C(0xcccccccc, cccccccd), -67, -1},
+  {GRISU_UINT64_C(0x80000000, 00000000), -63, 0},
+  {GRISU_UINT64_C(0xa0000000, 00000000), -60, 1},
+  {GRISU_UINT64_C(0xc8000000, 00000000), -57, 2},
+  {GRISU_UINT64_C(0xfa000000, 00000000), -54, 3},
+  {GRISU_UINT64_C(0x9c400000, 00000000), -50, 4},
+  {GRISU_UINT64_C(0xc3500000, 00000000), -47, 5},
+  {GRISU_UINT64_C(0xf4240000, 00000000), -44, 6},
+  {GRISU_UINT64_C(0x98968000, 00000000), -40, 7},
+  {GRISU_UINT64_C(0xbebc2000, 00000000), -37, 8},
+  {GRISU_UINT64_C(0xee6b2800, 00000000), -34, 9},
+  {GRISU_UINT64_C(0x9502f900, 00000000), -30, 10},
+  {GRISU_UINT64_C(0xba43b740, 00000000), -27, 11},
+  {GRISU_UINT64_C(0xe8d4a510, 00000000), -24, 12},
+  {GRISU_UINT64_C(0x9184e72a, 00000000), -20, 13},
+  {GRISU_UINT64_C(0xb5e620f4, 80000000), -17, 14},
+  {GRISU_UINT64_C(0xe35fa931, a0000000), -14, 15},
+  {GRISU_UINT64_C(0x8e1bc9bf, 04000000), -10, 16},
+  {GRISU_UINT64_C(0xb1a2bc2e, c5000000), -7, 17},
+  {GRISU_UINT64_C(0xde0b6b3a, 76400000), -4, 18},
+  {GRISU_UINT64_C(0x8ac72304, 89e80000), 0, 19},
+  {GRISU_UINT64_C(0xad78ebc5, ac620000), 3, 20},
+  {GRISU_UINT64_C(0xd8d726b7, 177a8000), 6, 21},
+  {GRISU_UINT64_C(0x87867832, 6eac9000), 10, 22},
+  {GRISU_UINT64_C(0xa968163f, 0a57b400), 13, 23},
+  {GRISU_UINT64_C(0xd3c21bce, cceda100), 16, 24},
+  {GRISU_UINT64_C(0x84595161, 401484a0), 20, 25},
+  {GRISU_UINT64_C(0xa56fa5b9, 9019a5c8), 23, 26},
+  {GRISU_UINT64_C(0xcecb8f27, f4200f3a), 26, 27},
+  {GRISU_UINT64_C(0x813f3978, f8940984), 30, 28},
+  {GRISU_UINT64_C(0xa18f07d7, 36b90be5), 33, 29},
+  {GRISU_UINT64_C(0xc9f2c9cd, 04674edf), 36, 30},
+  {GRISU_UINT64_C(0xfc6f7c40, 45812296), 39, 31},
+  {GRISU_UINT64_C(0x9dc5ada8, 2b70b59e), 43, 32},
+  {GRISU_UINT64_C(0xc5371912, 364ce305), 46, 33},
+  {GRISU_UINT64_C(0xf684df56, c3e01bc7), 49, 34},
+  {GRISU_UINT64_C(0x9a130b96, 3a6c115c), 53, 35},
+  {GRISU_UINT64_C(0xc097ce7b, c90715b3), 56, 36},
+  {GRISU_UINT64_C(0xf0bdc21a, bb48db20), 59, 37},
+  {GRISU_UINT64_C(0x96769950, b50d88f4), 63, 38},
+  {GRISU_UINT64_C(0xbc143fa4, e250eb31), 66, 39},
+  {GRISU_UINT64_C(0xeb194f8e, 1ae525fd), 69, 40},
+  {GRISU_UINT64_C(0x92efd1b8, d0cf37be), 73, 41},
+  {GRISU_UINT64_C(0xb7abc627, 050305ae), 76, 42},
+  {GRISU_UINT64_C(0xe596b7b0, c643c719), 79, 43},
+  {GRISU_UINT64_C(0x8f7e32ce, 7bea5c70), 83, 44},
+  {GRISU_UINT64_C(0xb35dbf82, 1ae4f38c), 86, 45},
+  {GRISU_UINT64_C(0xe0352f62, a19e306f), 89, 46},
+  {GRISU_UINT64_C(0x8c213d9d, a502de45), 93, 47},
+  {GRISU_UINT64_C(0xaf298d05, 0e4395d7), 96, 48},
+  {GRISU_UINT64_C(0xdaf3f046, 51d47b4c), 99, 49},
+  {GRISU_UINT64_C(0x88d8762b, f324cd10), 103, 50},
+  {GRISU_UINT64_C(0xab0e93b6, efee0054), 106, 51},
+  {GRISU_UINT64_C(0xd5d238a4, abe98068), 109, 52},
+  {GRISU_UINT64_C(0x85a36366, eb71f041), 113, 53},
+  {GRISU_UINT64_C(0xa70c3c40, a64e6c52), 116, 54},
+  {GRISU_UINT64_C(0xd0cf4b50, cfe20766), 119, 55},
+  {GRISU_UINT64_C(0x82818f12, 81ed44a0), 123, 56},
+  {GRISU_UINT64_C(0xa321f2d7, 226895c8), 126, 57},
+  {GRISU_UINT64_C(0xcbea6f8c, eb02bb3a), 129, 58},
+  {GRISU_UINT64_C(0xfee50b70, 25c36a08), 132, 59},
+  {GRISU_UINT64_C(0x9f4f2726, 179a2245), 136, 60},
+  {GRISU_UINT64_C(0xc722f0ef, 9d80aad6), 139, 61},
+  {GRISU_UINT64_C(0xf8ebad2b, 84e0d58c), 142, 62},
+  {GRISU_UINT64_C(0x9b934c3b, 330c8577), 146, 63},
+  {GRISU_UINT64_C(0xc2781f49, ffcfa6d5), 149, 64},
+  {GRISU_UINT64_C(0xf316271c, 7fc3908b), 152, 65},
+  {GRISU_UINT64_C(0x97edd871, cfda3a57), 156, 66},
+  {GRISU_UINT64_C(0xbde94e8e, 43d0c8ec), 159, 67},
+  {GRISU_UINT64_C(0xed63a231, d4c4fb27), 162, 68},
+  {GRISU_UINT64_C(0x945e455f, 24fb1cf9), 166, 69},
+  {GRISU_UINT64_C(0xb975d6b6, ee39e437), 169, 70},
+  {GRISU_UINT64_C(0xe7d34c64, a9c85d44), 172, 71},
+  {GRISU_UINT64_C(0x90e40fbe, ea1d3a4b), 176, 72},
+  {GRISU_UINT64_C(0xb51d13ae, a4a488dd), 179, 73},
+  {GRISU_UINT64_C(0xe264589a, 4dcdab15), 182, 74},
+  {GRISU_UINT64_C(0x8d7eb760, 70a08aed), 186, 75},
+  {GRISU_UINT64_C(0xb0de6538, 8cc8ada8), 189, 76},
+  {GRISU_UINT64_C(0xdd15fe86, affad912), 192, 77},
+  {GRISU_UINT64_C(0x8a2dbf14, 2dfcc7ab), 196, 78},
+  {GRISU_UINT64_C(0xacb92ed9, 397bf996), 199, 79},
+  {GRISU_UINT64_C(0xd7e77a8f, 87daf7fc), 202, 80},
+  {GRISU_UINT64_C(0x86f0ac99, b4e8dafd), 206, 81},
+  {GRISU_UINT64_C(0xa8acd7c0, 222311bd), 209, 82},
+  {GRISU_UINT64_C(0xd2d80db0, 2aabd62c), 212, 83},
+  {GRISU_UINT64_C(0x83c7088e, 1aab65db), 216, 84},
+  {GRISU_UINT64_C(0xa4b8cab1, a1563f52), 219, 85},
+  {GRISU_UINT64_C(0xcde6fd5e, 09abcf27), 222, 86},
+  {GRISU_UINT64_C(0x80b05e5a, c60b6178), 226, 87},
+  {GRISU_UINT64_C(0xa0dc75f1, 778e39d6), 229, 88},
+  {GRISU_UINT64_C(0xc913936d, d571c84c), 232, 89},
+  {GRISU_UINT64_C(0xfb587849, 4ace3a5f), 235, 90},
+  {GRISU_UINT64_C(0x9d174b2d, cec0e47b), 239, 91},
+  {GRISU_UINT64_C(0xc45d1df9, 42711d9a), 242, 92},
+  {GRISU_UINT64_C(0xf5746577, 930d6501), 245, 93},
+  {GRISU_UINT64_C(0x9968bf6a, bbe85f20), 249, 94},
+  {GRISU_UINT64_C(0xbfc2ef45, 6ae276e9), 252, 95},
+  {GRISU_UINT64_C(0xefb3ab16, c59b14a3), 255, 96},
+  {GRISU_UINT64_C(0x95d04aee, 3b80ece6), 259, 97},
+  {GRISU_UINT64_C(0xbb445da9, ca61281f), 262, 98},
+  {GRISU_UINT64_C(0xea157514, 3cf97227), 265, 99},
+  {GRISU_UINT64_C(0x924d692c, a61be758), 269, 100},
+  {GRISU_UINT64_C(0xb6e0c377, cfa2e12e), 272, 101},
+  {GRISU_UINT64_C(0xe498f455, c38b997a), 275, 102},
+  {GRISU_UINT64_C(0x8edf98b5, 9a373fec), 279, 103},
+  {GRISU_UINT64_C(0xb2977ee3, 00c50fe7), 282, 104},
+  {GRISU_UINT64_C(0xdf3d5e9b, c0f653e1), 285, 105},
+  {GRISU_UINT64_C(0x8b865b21, 5899f46d), 289, 106},
+  {GRISU_UINT64_C(0xae67f1e9, aec07188), 292, 107},
+  {GRISU_UINT64_C(0xda01ee64, 1a708dea), 295, 108},
+  {GRISU_UINT64_C(0x884134fe, 908658b2), 299, 109},
+  {GRISU_UINT64_C(0xaa51823e, 34a7eedf), 302, 110},
+  {GRISU_UINT64_C(0xd4e5e2cd, c1d1ea96), 305, 111},
+  {GRISU_UINT64_C(0x850fadc0, 9923329e), 309, 112},
+  {GRISU_UINT64_C(0xa6539930, bf6bff46), 312, 113},
+  {GRISU_UINT64_C(0xcfe87f7c, ef46ff17), 315, 114},
+  {GRISU_UINT64_C(0x81f14fae, 158c5f6e), 319, 115},
+  {GRISU_UINT64_C(0xa26da399, 9aef774a), 322, 116},
+  {GRISU_UINT64_C(0xcb090c80, 01ab551c), 325, 117},
+  {GRISU_UINT64_C(0xfdcb4fa0, 02162a63), 328, 118},
+  {GRISU_UINT64_C(0x9e9f11c4, 014dda7e), 332, 119},
+  {GRISU_UINT64_C(0xc646d635, 01a1511e), 335, 120},
+  {GRISU_UINT64_C(0xf7d88bc2, 4209a565), 338, 121},
+  {GRISU_UINT64_C(0x9ae75759, 6946075f), 342, 122},
+  {GRISU_UINT64_C(0xc1a12d2f, c3978937), 345, 123},
+  {GRISU_UINT64_C(0xf209787b, b47d6b85), 348, 124},
+  {GRISU_UINT64_C(0x9745eb4d, 50ce6333), 352, 125},
+  {GRISU_UINT64_C(0xbd176620, a501fc00), 355, 126},
+  {GRISU_UINT64_C(0xec5d3fa8, ce427b00), 358, 127},
+  {GRISU_UINT64_C(0x93ba47c9, 80e98ce0), 362, 128},
+  {GRISU_UINT64_C(0xb8a8d9bb, e123f018), 365, 129},
+  {GRISU_UINT64_C(0xe6d3102a, d96cec1e), 368, 130},
+  {GRISU_UINT64_C(0x9043ea1a, c7e41393), 372, 131},
+  {GRISU_UINT64_C(0xb454e4a1, 79dd1877), 375, 132},
+  {GRISU_UINT64_C(0xe16a1dc9, d8545e95), 378, 133},
+  {GRISU_UINT64_C(0x8ce2529e, 2734bb1d), 382, 134},
+  {GRISU_UINT64_C(0xb01ae745, b101e9e4), 385, 135},
+  {GRISU_UINT64_C(0xdc21a117, 1d42645d), 388, 136},
+  {GRISU_UINT64_C(0x899504ae, 72497eba), 392, 137},
+  {GRISU_UINT64_C(0xabfa45da, 0edbde69), 395, 138},
+  {GRISU_UINT64_C(0xd6f8d750, 9292d603), 398, 139},
+  {GRISU_UINT64_C(0x865b8692, 5b9bc5c2), 402, 140},
+  {GRISU_UINT64_C(0xa7f26836, f282b733), 405, 141},
+  {GRISU_UINT64_C(0xd1ef0244, af2364ff), 408, 142},
+  {GRISU_UINT64_C(0x8335616a, ed761f1f), 412, 143},
+  {GRISU_UINT64_C(0xa402b9c5, a8d3a6e7), 415, 144},
+  {GRISU_UINT64_C(0xcd036837, 130890a1), 418, 145},
+  {GRISU_UINT64_C(0x80222122, 6be55a65), 422, 146},
+  {GRISU_UINT64_C(0xa02aa96b, 06deb0fe), 425, 147},
+  {GRISU_UINT64_C(0xc83553c5, c8965d3d), 428, 148},
+  {GRISU_UINT64_C(0xfa42a8b7, 3abbf48d), 431, 149},
+  {GRISU_UINT64_C(0x9c69a972, 84b578d8), 435, 150},
+  {GRISU_UINT64_C(0xc38413cf, 25e2d70e), 438, 151},
+  {GRISU_UINT64_C(0xf46518c2, ef5b8cd1), 441, 152},
+  {GRISU_UINT64_C(0x98bf2f79, d5993803), 445, 153},
+  {GRISU_UINT64_C(0xbeeefb58, 4aff8604), 448, 154},
+  {GRISU_UINT64_C(0xeeaaba2e, 5dbf6785), 451, 155},
+  {GRISU_UINT64_C(0x952ab45c, fa97a0b3), 455, 156},
+  {GRISU_UINT64_C(0xba756174, 393d88e0), 458, 157},
+  {GRISU_UINT64_C(0xe912b9d1, 478ceb17), 461, 158},
+  {GRISU_UINT64_C(0x91abb422, ccb812ef), 465, 159},
+  {GRISU_UINT64_C(0xb616a12b, 7fe617aa), 468, 160},
+  {GRISU_UINT64_C(0xe39c4976, 5fdf9d95), 471, 161},
+  {GRISU_UINT64_C(0x8e41ade9, fbebc27d), 475, 162},
+  {GRISU_UINT64_C(0xb1d21964, 7ae6b31c), 478, 163},
+  {GRISU_UINT64_C(0xde469fbd, 99a05fe3), 481, 164},
+  {GRISU_UINT64_C(0x8aec23d6, 80043bee), 485, 165},
+  {GRISU_UINT64_C(0xada72ccc, 20054aea), 488, 166},
+  {GRISU_UINT64_C(0xd910f7ff, 28069da4), 491, 167},
+  {GRISU_UINT64_C(0x87aa9aff, 79042287), 495, 168},
+  {GRISU_UINT64_C(0xa99541bf, 57452b28), 498, 169},
+  {GRISU_UINT64_C(0xd3fa922f, 2d1675f2), 501, 170},
+  {GRISU_UINT64_C(0x847c9b5d, 7c2e09b7), 505, 171},
+  {GRISU_UINT64_C(0xa59bc234, db398c25), 508, 172},
+  {GRISU_UINT64_C(0xcf02b2c2, 1207ef2f), 511, 173},
+  {GRISU_UINT64_C(0x8161afb9, 4b44f57d), 515, 174},
+  {GRISU_UINT64_C(0xa1ba1ba7, 9e1632dc), 518, 175},
+  {GRISU_UINT64_C(0xca28a291, 859bbf93), 521, 176},
+  {GRISU_UINT64_C(0xfcb2cb35, e702af78), 524, 177},
+  {GRISU_UINT64_C(0x9defbf01, b061adab), 528, 178},
+  {GRISU_UINT64_C(0xc56baec2, 1c7a1916), 531, 179},
+  {GRISU_UINT64_C(0xf6c69a72, a3989f5c), 534, 180},
+  {GRISU_UINT64_C(0x9a3c2087, a63f6399), 538, 181},
+  {GRISU_UINT64_C(0xc0cb28a9, 8fcf3c80), 541, 182},
+  {GRISU_UINT64_C(0xf0fdf2d3, f3c30b9f), 544, 183},
+  {GRISU_UINT64_C(0x969eb7c4, 7859e744), 548, 184},
+  {GRISU_UINT64_C(0xbc4665b5, 96706115), 551, 185},
+  {GRISU_UINT64_C(0xeb57ff22, fc0c795a), 554, 186},
+  {GRISU_UINT64_C(0x9316ff75, dd87cbd8), 558, 187},
+  {GRISU_UINT64_C(0xb7dcbf53, 54e9bece), 561, 188},
+  {GRISU_UINT64_C(0xe5d3ef28, 2a242e82), 564, 189},
+  {GRISU_UINT64_C(0x8fa47579, 1a569d11), 568, 190},
+  {GRISU_UINT64_C(0xb38d92d7, 60ec4455), 571, 191},
+  {GRISU_UINT64_C(0xe070f78d, 3927556b), 574, 192},
+  {GRISU_UINT64_C(0x8c469ab8, 43b89563), 578, 193},
+  {GRISU_UINT64_C(0xaf584166, 54a6babb), 581, 194},
+  {GRISU_UINT64_C(0xdb2e51bf, e9d0696a), 584, 195},
+  {GRISU_UINT64_C(0x88fcf317, f22241e2), 588, 196},
+  {GRISU_UINT64_C(0xab3c2fdd, eeaad25b), 591, 197},
+  {GRISU_UINT64_C(0xd60b3bd5, 6a5586f2), 594, 198},
+  {GRISU_UINT64_C(0x85c70565, 62757457), 598, 199},
+  {GRISU_UINT64_C(0xa738c6be, bb12d16d), 601, 200},
+  {GRISU_UINT64_C(0xd106f86e, 69d785c8), 604, 201},
+  {GRISU_UINT64_C(0x82a45b45, 0226b39d), 608, 202},
+  {GRISU_UINT64_C(0xa34d7216, 42b06084), 611, 203},
+  {GRISU_UINT64_C(0xcc20ce9b, d35c78a5), 614, 204},
+  {GRISU_UINT64_C(0xff290242, c83396ce), 617, 205},
+  {GRISU_UINT64_C(0x9f79a169, bd203e41), 621, 206},
+  {GRISU_UINT64_C(0xc75809c4, 2c684dd1), 624, 207},
+  {GRISU_UINT64_C(0xf92e0c35, 37826146), 627, 208},
+  {GRISU_UINT64_C(0x9bbcc7a1, 42b17ccc), 631, 209},
+  {GRISU_UINT64_C(0xc2abf989, 935ddbfe), 634, 210},
+  {GRISU_UINT64_C(0xf356f7eb, f83552fe), 637, 211},
+  {GRISU_UINT64_C(0x98165af3, 7b2153df), 641, 212},
+  {GRISU_UINT64_C(0xbe1bf1b0, 59e9a8d6), 644, 213},
+  {GRISU_UINT64_C(0xeda2ee1c, 7064130c), 647, 214},
+  {GRISU_UINT64_C(0x9485d4d1, c63e8be8), 651, 215},
+  {GRISU_UINT64_C(0xb9a74a06, 37ce2ee1), 654, 216},
+  {GRISU_UINT64_C(0xe8111c87, c5c1ba9a), 657, 217},
+  {GRISU_UINT64_C(0x910ab1d4, db9914a0), 661, 218},
+  {GRISU_UINT64_C(0xb54d5e4a, 127f59c8), 664, 219},
+  {GRISU_UINT64_C(0xe2a0b5dc, 971f303a), 667, 220},
+  {GRISU_UINT64_C(0x8da471a9, de737e24), 671, 221},
+  {GRISU_UINT64_C(0xb10d8e14, 56105dad), 674, 222},
+  {GRISU_UINT64_C(0xdd50f199, 6b947519), 677, 223},
+  {GRISU_UINT64_C(0x8a5296ff, e33cc930), 681, 224},
+  {GRISU_UINT64_C(0xace73cbf, dc0bfb7b), 684, 225},
+  {GRISU_UINT64_C(0xd8210bef, d30efa5a), 687, 226},
+  {GRISU_UINT64_C(0x8714a775, e3e95c78), 691, 227},
+  {GRISU_UINT64_C(0xa8d9d153, 5ce3b396), 694, 228},
+  {GRISU_UINT64_C(0xd31045a8, 341ca07c), 697, 229},
+  {GRISU_UINT64_C(0x83ea2b89, 2091e44e), 701, 230},
+  {GRISU_UINT64_C(0xa4e4b66b, 68b65d61), 704, 231},
+  {GRISU_UINT64_C(0xce1de406, 42e3f4b9), 707, 232},
+  {GRISU_UINT64_C(0x80d2ae83, e9ce78f4), 711, 233},
+  {GRISU_UINT64_C(0xa1075a24, e4421731), 714, 234},
+  {GRISU_UINT64_C(0xc94930ae, 1d529cfd), 717, 235},
+  {GRISU_UINT64_C(0xfb9b7cd9, a4a7443c), 720, 236},
+  {GRISU_UINT64_C(0x9d412e08, 06e88aa6), 724, 237},
+  {GRISU_UINT64_C(0xc491798a, 08a2ad4f), 727, 238},
+  {GRISU_UINT64_C(0xf5b5d7ec, 8acb58a3), 730, 239},
+  {GRISU_UINT64_C(0x9991a6f3, d6bf1766), 734, 240},
+  {GRISU_UINT64_C(0xbff610b0, cc6edd3f), 737, 241},
+  {GRISU_UINT64_C(0xeff394dc, ff8a948f), 740, 242},
+  {GRISU_UINT64_C(0x95f83d0a, 1fb69cd9), 744, 243},
+  {GRISU_UINT64_C(0xbb764c4c, a7a44410), 747, 244},
+  {GRISU_UINT64_C(0xea53df5f, d18d5514), 750, 245},
+  {GRISU_UINT64_C(0x92746b9b, e2f8552c), 754, 246},
+  {GRISU_UINT64_C(0xb7118682, dbb66a77), 757, 247},
+  {GRISU_UINT64_C(0xe4d5e823, 92a40515), 760, 248},
+  {GRISU_UINT64_C(0x8f05b116, 3ba6832d), 764, 249},
+  {GRISU_UINT64_C(0xb2c71d5b, ca9023f8), 767, 250},
+  {GRISU_UINT64_C(0xdf78e4b2, bd342cf7), 770, 251},
+  {GRISU_UINT64_C(0x8bab8eef, b6409c1a), 774, 252},
+  {GRISU_UINT64_C(0xae9672ab, a3d0c321), 777, 253},
+  {GRISU_UINT64_C(0xda3c0f56, 8cc4f3e9), 780, 254},
+  {GRISU_UINT64_C(0x88658996, 17fb1871), 784, 255},
+  {GRISU_UINT64_C(0xaa7eebfb, 9df9de8e), 787, 256},
+  {GRISU_UINT64_C(0xd51ea6fa, 85785631), 790, 257},
+  {GRISU_UINT64_C(0x8533285c, 936b35df), 794, 258},
+  {GRISU_UINT64_C(0xa67ff273, b8460357), 797, 259},
+  {GRISU_UINT64_C(0xd01fef10, a657842c), 800, 260},
+  {GRISU_UINT64_C(0x8213f56a, 67f6b29c), 804, 261},
+  {GRISU_UINT64_C(0xa298f2c5, 01f45f43), 807, 262},
+  {GRISU_UINT64_C(0xcb3f2f76, 42717713), 810, 263},
+  {GRISU_UINT64_C(0xfe0efb53, d30dd4d8), 813, 264},
+  {GRISU_UINT64_C(0x9ec95d14, 63e8a507), 817, 265},
+  {GRISU_UINT64_C(0xc67bb459, 7ce2ce49), 820, 266},
+  {GRISU_UINT64_C(0xf81aa16f, dc1b81db), 823, 267},
+  {GRISU_UINT64_C(0x9b10a4e5, e9913129), 827, 268},
+  {GRISU_UINT64_C(0xc1d4ce1f, 63f57d73), 830, 269},
+  {GRISU_UINT64_C(0xf24a01a7, 3cf2dcd0), 833, 270},
+  {GRISU_UINT64_C(0x976e4108, 8617ca02), 837, 271},
+  {GRISU_UINT64_C(0xbd49d14a, a79dbc82), 840, 272},
+  {GRISU_UINT64_C(0xec9c459d, 51852ba3), 843, 273},
+  {GRISU_UINT64_C(0x93e1ab82, 52f33b46), 847, 274},
+  {GRISU_UINT64_C(0xb8da1662, e7b00a17), 850, 275},
+  {GRISU_UINT64_C(0xe7109bfb, a19c0c9d), 853, 276},
+  {GRISU_UINT64_C(0x906a617d, 450187e2), 857, 277},
+  {GRISU_UINT64_C(0xb484f9dc, 9641e9db), 860, 278},
+  {GRISU_UINT64_C(0xe1a63853, bbd26451), 863, 279},
+  {GRISU_UINT64_C(0x8d07e334, 55637eb3), 867, 280},
+  {GRISU_UINT64_C(0xb049dc01, 6abc5e60), 870, 281},
+  {GRISU_UINT64_C(0xdc5c5301, c56b75f7), 873, 282},
+  {GRISU_UINT64_C(0x89b9b3e1, 1b6329bb), 877, 283},
+  {GRISU_UINT64_C(0xac2820d9, 623bf429), 880, 284},
+  {GRISU_UINT64_C(0xd732290f, bacaf134), 883, 285},
+  {GRISU_UINT64_C(0x867f59a9, d4bed6c0), 887, 286},
+  {GRISU_UINT64_C(0xa81f3014, 49ee8c70), 890, 287},
+  {GRISU_UINT64_C(0xd226fc19, 5c6a2f8c), 893, 288},
+  {GRISU_UINT64_C(0x83585d8f, d9c25db8), 897, 289},
+  {GRISU_UINT64_C(0xa42e74f3, d032f526), 900, 290},
+  {GRISU_UINT64_C(0xcd3a1230, c43fb26f), 903, 291},
+  {GRISU_UINT64_C(0x80444b5e, 7aa7cf85), 907, 292},
+  {GRISU_UINT64_C(0xa0555e36, 1951c367), 910, 293},
+  {GRISU_UINT64_C(0xc86ab5c3, 9fa63441), 913, 294},
+  {GRISU_UINT64_C(0xfa856334, 878fc151), 916, 295},
+  {GRISU_UINT64_C(0x9c935e00, d4b9d8d2), 920, 296},
+  {GRISU_UINT64_C(0xc3b83581, 09e84f07), 923, 297},
+  {GRISU_UINT64_C(0xf4a642e1, 4c6262c9), 926, 298},
+  {GRISU_UINT64_C(0x98e7e9cc, cfbd7dbe), 930, 299},
+  {GRISU_UINT64_C(0xbf21e440, 03acdd2d), 933, 300},
+  {GRISU_UINT64_C(0xeeea5d50, 04981478), 936, 301},
+  {GRISU_UINT64_C(0x95527a52, 02df0ccb), 940, 302},
+  {GRISU_UINT64_C(0xbaa718e6, 8396cffe), 943, 303},
+  {GRISU_UINT64_C(0xe950df20, 247c83fd), 946, 304},
+  {GRISU_UINT64_C(0x91d28b74, 16cdd27e), 950, 305},
+  {GRISU_UINT64_C(0xb6472e51, 1c81471e), 953, 306},
+  {GRISU_UINT64_C(0xe3d8f9e5, 63a198e5), 956, 307},
+  {GRISU_UINT64_C(0x8e679c2f, 5e44ff8f), 960, 308},
+  {GRISU_UINT64_C(0xb201833b, 35d63f73), 963, 309},
+  {GRISU_UINT64_C(0xde81e40a, 034bcf50), 966, 310},
+  {GRISU_UINT64_C(0x8b112e86, 420f6192), 970, 311},
+  {GRISU_UINT64_C(0xadd57a27, d29339f6), 973, 312},
+  {GRISU_UINT64_C(0xd94ad8b1, c7380874), 976, 313},
+  {GRISU_UINT64_C(0x87cec76f, 1c830549), 980, 314},
+  {GRISU_UINT64_C(0xa9c2794a, e3a3c69b), 983, 315},
+  {GRISU_UINT64_C(0xd433179d, 9c8cb841), 986, 316},
+  {GRISU_UINT64_C(0x849feec2, 81d7f329), 990, 317},
+  {GRISU_UINT64_C(0xa5c7ea73, 224deff3), 993, 318},
+  {GRISU_UINT64_C(0xcf39e50f, eae16bf0), 996, 319},
+  {GRISU_UINT64_C(0x81842f29, f2cce376), 1000, 320},
+  {GRISU_UINT64_C(0xa1e53af4, 6f801c53), 1003, 321},
+  {GRISU_UINT64_C(0xca5e89b1, 8b602368), 1006, 322},
+  {GRISU_UINT64_C(0xfcf62c1d, ee382c42), 1009, 323},
+  {GRISU_UINT64_C(0x9e19db92, b4e31ba9), 1013, 324},
+  {GRISU_UINT64_C(0xc5a05277, 621be294), 1016, 325},
+  {GRISU_UINT64_C(0xf7086715, 3aa2db39), 1019, 326},
+  {GRISU_UINT64_C(0x9a65406d, 44a5c903), 1023, 327},
+  {GRISU_UINT64_C(0xc0fe9088, 95cf3b44), 1026, 328},
+  {GRISU_UINT64_C(0xf13e34aa, bb430a15), 1029, 329},
+  {GRISU_UINT64_C(0x96c6e0ea, b509e64d), 1033, 330},
+  {GRISU_UINT64_C(0xbc789925, 624c5fe1), 1036, 331},
+  {GRISU_UINT64_C(0xeb96bf6e, badf77d9), 1039, 332},
+  {GRISU_UINT64_C(0x933e37a5, 34cbaae8), 1043, 333},
+  {GRISU_UINT64_C(0xb80dc58e, 81fe95a1), 1046, 334},
+  {GRISU_UINT64_C(0xe61136f2, 227e3b0a), 1049, 335},
+  {GRISU_UINT64_C(0x8fcac257, 558ee4e6), 1053, 336},
+  {GRISU_UINT64_C(0xb3bd72ed, 2af29e20), 1056, 337},
+  {GRISU_UINT64_C(0xe0accfa8, 75af45a8), 1059, 338},
+  {GRISU_UINT64_C(0x8c6c01c9, 498d8b89), 1063, 339},
+  {GRISU_UINT64_C(0xaf87023b, 9bf0ee6b), 1066, 340},
+  {GRISU_UINT64_C(0xdb68c2ca, 82ed2a06), 1069, 341},
+  {GRISU_UINT64_C(0x892179be, 91d43a44), 1073, 342},
+  };
+static const int GRISU_CACHE_MAX_DISTANCE(1) = 4;
+// nb elements (1): 651
+static const GRISU_CACHE_STRUCT GRISU_CACHE_NAME(2)[] = {
+  {GRISU_UINT64_C(0xe61acf03, 3d1a45df), -1087, -308},
+  {GRISU_UINT64_C(0xb3c4f1ba, 87bc8697), -1080, -306},
+  {GRISU_UINT64_C(0x8c71dcd9, ba0b4926), -1073, -304},
+  {GRISU_UINT64_C(0xdb71e914, 32b1a24b), -1067, -302},
+  {GRISU_UINT64_C(0xab70fe17, c79ac6ca), -1060, -300},
+  {GRISU_UINT64_C(0x85f04682, 93f0eb4e), -1053, -298},
+  {GRISU_UINT64_C(0xd1476e2c, 07286faa), -1047, -296},
+  {GRISU_UINT64_C(0xa37fce12, 6597973d), -1040, -294},
+  {GRISU_UINT64_C(0xff77b1fc, bebcdc4f), -1034, -292},
+  {GRISU_UINT64_C(0xc795830d, 75038c1e), -1027, -290},
+  {GRISU_UINT64_C(0x9becce62, 836ac577), -1020, -288},
+  {GRISU_UINT64_C(0xf3a20279, ed56d48a), -1014, -286},
+  {GRISU_UINT64_C(0xbe5691ef, 416bd60c), -1007, -284},
+  {GRISU_UINT64_C(0x94b3a202, eb1c3f39), -1000, -282},
+  {GRISU_UINT64_C(0xe858ad24, 8f5c22ca), -994, -280},
+  {GRISU_UINT64_C(0xb5854744, 8ffffb2e), -987, -278},
+  {GRISU_UINT64_C(0x8dd01fad, 907ffc3c), -980, -276},
+  {GRISU_UINT64_C(0xdd95317f, 31c7fa1d), -974, -274},
+  {GRISU_UINT64_C(0xad1c8eab, 5ee43b67), -967, -272},
+  {GRISU_UINT64_C(0x873e4f75, e2224e68), -960, -270},
+  {GRISU_UINT64_C(0xd3515c28, 31559a83), -954, -268},
+  {GRISU_UINT64_C(0xa5178fff, 668ae0b6), -947, -266},
+  {GRISU_UINT64_C(0x80fa687f, 881c7f8e), -940, -264},
+  {GRISU_UINT64_C(0xc9874347, 44ac874f), -934, -262},
+  {GRISU_UINT64_C(0x9d71ac8f, ada6c9b5), -927, -260},
+  {GRISU_UINT64_C(0xf6019da0, 7f549b2b), -921, -258},
+  {GRISU_UINT64_C(0xc0314325, 637a193a), -914, -256},
+  {GRISU_UINT64_C(0x96267c75, 35b763b5), -907, -254},
+  {GRISU_UINT64_C(0xea9c2277, 23ee8bcb), -901, -252},
+  {GRISU_UINT64_C(0xb749faed, 14125d37), -894, -250},
+  {GRISU_UINT64_C(0x8f31cc09, 37ae58d3), -887, -248},
+  {GRISU_UINT64_C(0xdfbdcece, 67006ac9), -881, -246},
+  {GRISU_UINT64_C(0xaecc4991, 4078536d), -874, -244},
+  {GRISU_UINT64_C(0x888f9979, 7a5e012d), -867, -242},
+  {GRISU_UINT64_C(0xd5605fcd, cf32e1d7), -861, -240},
+  {GRISU_UINT64_C(0xa6b34ad8, c9dfc070), -854, -238},
+  {GRISU_UINT64_C(0x823c1279, 5db6ce57), -847, -236},
+  {GRISU_UINT64_C(0xcb7ddcdd, a26da269), -841, -234},
+  {GRISU_UINT64_C(0x9efa548d, 26e5a6e2), -834, -232},
+  {GRISU_UINT64_C(0xf867241c, 8cc6d4c1), -828, -230},
+  {GRISU_UINT64_C(0xc2109436, 4dfb5637), -821, -228},
+  {GRISU_UINT64_C(0x979cf3ca, 6cec5b5b), -814, -226},
+  {GRISU_UINT64_C(0xece53cec, 4a314ebe), -808, -224},
+  {GRISU_UINT64_C(0xb9131798, 99f68584), -801, -222},
+  {GRISU_UINT64_C(0x9096ea6f, 3848984f), -794, -220},
+  {GRISU_UINT64_C(0xe1ebce4d, c7f16dfc), -788, -218},
+  {GRISU_UINT64_C(0xb080392c, c4349ded), -781, -216},
+  {GRISU_UINT64_C(0x89e42caa, f9491b61), -774, -214},
+  {GRISU_UINT64_C(0xd77485cb, 25823ac7), -768, -212},
+  {GRISU_UINT64_C(0xa8530886, b54dbdec), -761, -210},
+  {GRISU_UINT64_C(0x8380dea9, 3da4bc60), -754, -208},
+  {GRISU_UINT64_C(0xcd795be8, 70516656), -748, -206},
+  {GRISU_UINT64_C(0xa086cfcd, 97bf97f4), -741, -204},
+  {GRISU_UINT64_C(0xfad2a4b1, 3d1b5d6c), -735, -202},
+  {GRISU_UINT64_C(0xc3f490aa, 77bd60fd), -728, -200},
+  {GRISU_UINT64_C(0x99171105, 2d8bf3c5), -721, -198},
+  {GRISU_UINT64_C(0xef340a98, 172aace5), -715, -196},
+  {GRISU_UINT64_C(0xbae0a846, d2195713), -708, -194},
+  {GRISU_UINT64_C(0x91ff8377, 5423cc06), -701, -192},
+  {GRISU_UINT64_C(0xe41f3d6a, 7377eeca), -695, -190},
+  {GRISU_UINT64_C(0xb23867fb, 2a35b28e), -688, -188},
+  {GRISU_UINT64_C(0x8b3c113c, 38f9f37f), -681, -186},
+  {GRISU_UINT64_C(0xd98ddaee, 19068c76), -675, -184},
+  {GRISU_UINT64_C(0xa9f6d30a, 038d1dbc), -668, -182},
+  {GRISU_UINT64_C(0x84c8d4df, d2c63f3b), -661, -180},
+  {GRISU_UINT64_C(0xcf79cc9d, b955c2cc), -655, -178},
+  {GRISU_UINT64_C(0xa21727db, 38cb0030), -648, -176},
+  {GRISU_UINT64_C(0xfd442e46, 88bd304b), -642, -174},
+  {GRISU_UINT64_C(0xc5dd4427, 1ad3cdba), -635, -172},
+  {GRISU_UINT64_C(0x9a94dd3e, 8cf578ba), -628, -170},
+  {GRISU_UINT64_C(0xf18899b1, bc3f8ca2), -622, -168},
+  {GRISU_UINT64_C(0xbcb2b812, db11a5de), -615, -166},
+  {GRISU_UINT64_C(0x936b9fce, bb25c996), -608, -164},
+  {GRISU_UINT64_C(0xe65829b3, 046b0afa), -602, -162},
+  {GRISU_UINT64_C(0xb3f4e093, db73a093), -595, -160},
+  {GRISU_UINT64_C(0x8c974f73, 83725573), -588, -158},
+  {GRISU_UINT64_C(0xdbac6c24, 7d62a584), -582, -156},
+  {GRISU_UINT64_C(0xab9eb47c, 81f5114f), -575, -154},
+  {GRISU_UINT64_C(0x8613fd01, 45877586), -568, -152},
+  {GRISU_UINT64_C(0xd17f3b51, fca3a7a1), -562, -150},
+  {GRISU_UINT64_C(0xa3ab6658, 0d5fdaf6), -555, -148},
+  {GRISU_UINT64_C(0xffbbcfe9, 94e5c620), -549, -146},
+  {GRISU_UINT64_C(0xc7caba6e, 7c5382c9), -542, -144},
+  {GRISU_UINT64_C(0x9c1661a6, 51213e2d), -535, -142},
+  {GRISU_UINT64_C(0xf3e2f893, dec3f126), -529, -140},
+  {GRISU_UINT64_C(0xbe895233, 86091466), -522, -138},
+  {GRISU_UINT64_C(0x94db4838, 40b717f0), -515, -136},
+  {GRISU_UINT64_C(0xe896a0d7, e51e1566), -509, -134},
+  {GRISU_UINT64_C(0xb5b5ada8, aaff80b8), -502, -132},
+  {GRISU_UINT64_C(0x8df5efab, c5979c90), -495, -130},
+  {GRISU_UINT64_C(0xddd0467c, 64bce4a1), -489, -128},
+  {GRISU_UINT64_C(0xad4ab711, 2eb3929e), -482, -126},
+  {GRISU_UINT64_C(0x87625f05, 6c7c4a8b), -475, -124},
+  {GRISU_UINT64_C(0xd389b478, 79823479), -469, -122},
+  {GRISU_UINT64_C(0xa54394fe, 1eedb8ff), -462, -120},
+  {GRISU_UINT64_C(0x811ccc66, 8829b887), -455, -118},
+  {GRISU_UINT64_C(0xc9bcff60, 34c13053), -449, -116},
+  {GRISU_UINT64_C(0x9d9ba783, 2936edc1), -442, -114},
+  {GRISU_UINT64_C(0xf64335bc, f065d37d), -436, -112},
+  {GRISU_UINT64_C(0xc06481fb, 9bcf8d3a), -429, -110},
+  {GRISU_UINT64_C(0x964e858c, 91ba2655), -422, -108},
+  {GRISU_UINT64_C(0xeadab0ab, a3b2dbe5), -416, -106},
+  {GRISU_UINT64_C(0xb77ada06, 17e3bbcb), -409, -104},
+  {GRISU_UINT64_C(0x8f57fa54, c2a9eab7), -402, -102},
+  {GRISU_UINT64_C(0xdff97724, 70297ebd), -396, -100},
+  {GRISU_UINT64_C(0xaefae514, 77a06b04), -389, -98},
+  {GRISU_UINT64_C(0x88b402f7, fd75539b), -382, -96},
+  {GRISU_UINT64_C(0xd59944a3, 7c0752a2), -376, -94},
+  {GRISU_UINT64_C(0xa6dfbd9f, b8e5b88f), -369, -92},
+  {GRISU_UINT64_C(0x825ecc24, c8737830), -362, -90},
+  {GRISU_UINT64_C(0xcbb41ef9, 79346bca), -356, -88},
+  {GRISU_UINT64_C(0x9f24b832, e6b0f436), -349, -86},
+  {GRISU_UINT64_C(0xf8a95fcf, 88747d94), -343, -84},
+  {GRISU_UINT64_C(0xc24452da, 229b021c), -336, -82},
+  {GRISU_UINT64_C(0x97c560ba, 6b0919a6), -329, -80},
+  {GRISU_UINT64_C(0xed246723, 473e3813), -323, -78},
+  {GRISU_UINT64_C(0xb9447093, 8fa89bcf), -316, -76},
+  {GRISU_UINT64_C(0x90bd77f3, 483bb9ba), -309, -74},
+  {GRISU_UINT64_C(0xe2280b6c, 20dd5232), -303, -72},
+  {GRISU_UINT64_C(0xb0af48ec, 79ace837), -296, -70},
+  {GRISU_UINT64_C(0x8a08f0f8, bf0f156b), -289, -68},
+  {GRISU_UINT64_C(0xd7adf884, aa879177), -283, -66},
+  {GRISU_UINT64_C(0xa87fea27, a539e9a5), -276, -64},
+  {GRISU_UINT64_C(0x83a3eeee, f9153e89), -269, -62},
+  {GRISU_UINT64_C(0xcdb02555, 653131b6), -263, -60},
+  {GRISU_UINT64_C(0xa0b19d2a, b70e6ed6), -256, -58},
+  {GRISU_UINT64_C(0xfb158592, be068d2f), -250, -56},
+  {GRISU_UINT64_C(0xc428d05a, a4751e4d), -243, -54},
+  {GRISU_UINT64_C(0x993fe2c6, d07b7fac), -236, -52},
+  {GRISU_UINT64_C(0xef73d256, a5c0f77d), -230, -50},
+  {GRISU_UINT64_C(0xbb127c53, b17ec159), -223, -48},
+  {GRISU_UINT64_C(0x92267121, 62ab070e), -216, -46},
+  {GRISU_UINT64_C(0xe45c10c4, 2a2b3b06), -210, -44},
+  {GRISU_UINT64_C(0xb267ed19, 40f1c61c), -203, -42},
+  {GRISU_UINT64_C(0x8b61313b, babce2c6), -196, -40},
+  {GRISU_UINT64_C(0xd9c7dced, 53c72256), -190, -38},
+  {GRISU_UINT64_C(0xaa242499, 697392d3), -183, -36},
+  {GRISU_UINT64_C(0x84ec3c97, da624ab5), -176, -34},
+  {GRISU_UINT64_C(0xcfb11ead, 453994ba), -170, -32},
+  {GRISU_UINT64_C(0xa2425ff7, 5e14fc32), -163, -30},
+  {GRISU_UINT64_C(0xfd87b5f2, 8300ca0e), -157, -28},
+  {GRISU_UINT64_C(0xc6120625, 76589ddb), -150, -26},
+  {GRISU_UINT64_C(0x9abe14cd, 44753b53), -143, -24},
+  {GRISU_UINT64_C(0xf1c90080, baf72cb1), -137, -22},
+  {GRISU_UINT64_C(0xbce50864, 92111aeb), -130, -20},
+  {GRISU_UINT64_C(0x9392ee8e, 921d5d07), -123, -18},
+  {GRISU_UINT64_C(0xe69594be, c44de15b), -117, -16},
+  {GRISU_UINT64_C(0xb424dc35, 095cd80f), -110, -14},
+  {GRISU_UINT64_C(0x8cbccc09, 6f5088cc), -103, -12},
+  {GRISU_UINT64_C(0xdbe6fece, bdedd5bf), -97, -10},
+  {GRISU_UINT64_C(0xabcc7711, 8461cefd), -90, -8},
+  {GRISU_UINT64_C(0x8637bd05, af6c69b6), -83, -6},
+  {GRISU_UINT64_C(0xd1b71758, e219652c), -77, -4},
+  {GRISU_UINT64_C(0xa3d70a3d, 70a3d70a), -70, -2},
+  {GRISU_UINT64_C(0x80000000, 00000000), -63, 0},
+  {GRISU_UINT64_C(0xc8000000, 00000000), -57, 2},
+  {GRISU_UINT64_C(0x9c400000, 00000000), -50, 4},
+  {GRISU_UINT64_C(0xf4240000, 00000000), -44, 6},
+  {GRISU_UINT64_C(0xbebc2000, 00000000), -37, 8},
+  {GRISU_UINT64_C(0x9502f900, 00000000), -30, 10},
+  {GRISU_UINT64_C(0xe8d4a510, 00000000), -24, 12},
+  {GRISU_UINT64_C(0xb5e620f4, 80000000), -17, 14},
+  {GRISU_UINT64_C(0x8e1bc9bf, 04000000), -10, 16},
+  {GRISU_UINT64_C(0xde0b6b3a, 76400000), -4, 18},
+  {GRISU_UINT64_C(0xad78ebc5, ac620000), 3, 20},
+  {GRISU_UINT64_C(0x87867832, 6eac9000), 10, 22},
+  {GRISU_UINT64_C(0xd3c21bce, cceda100), 16, 24},
+  {GRISU_UINT64_C(0xa56fa5b9, 9019a5c8), 23, 26},
+  {GRISU_UINT64_C(0x813f3978, f8940984), 30, 28},
+  {GRISU_UINT64_C(0xc9f2c9cd, 04674edf), 36, 30},
+  {GRISU_UINT64_C(0x9dc5ada8, 2b70b59e), 43, 32},
+  {GRISU_UINT64_C(0xf684df56, c3e01bc7), 49, 34},
+  {GRISU_UINT64_C(0xc097ce7b, c90715b3), 56, 36},
+  {GRISU_UINT64_C(0x96769950, b50d88f4), 63, 38},
+  {GRISU_UINT64_C(0xeb194f8e, 1ae525fd), 69, 40},
+  {GRISU_UINT64_C(0xb7abc627, 050305ae), 76, 42},
+  {GRISU_UINT64_C(0x8f7e32ce, 7bea5c70), 83, 44},
+  {GRISU_UINT64_C(0xe0352f62, a19e306f), 89, 46},
+  {GRISU_UINT64_C(0xaf298d05, 0e4395d7), 96, 48},
+  {GRISU_UINT64_C(0x88d8762b, f324cd10), 103, 50},
+  {GRISU_UINT64_C(0xd5d238a4, abe98068), 109, 52},
+  {GRISU_UINT64_C(0xa70c3c40, a64e6c52), 116, 54},
+  {GRISU_UINT64_C(0x82818f12, 81ed44a0), 123, 56},
+  {GRISU_UINT64_C(0xcbea6f8c, eb02bb3a), 129, 58},
+  {GRISU_UINT64_C(0x9f4f2726, 179a2245), 136, 60},
+  {GRISU_UINT64_C(0xf8ebad2b, 84e0d58c), 142, 62},
+  {GRISU_UINT64_C(0xc2781f49, ffcfa6d5), 149, 64},
+  {GRISU_UINT64_C(0x97edd871, cfda3a57), 156, 66},
+  {GRISU_UINT64_C(0xed63a231, d4c4fb27), 162, 68},
+  {GRISU_UINT64_C(0xb975d6b6, ee39e437), 169, 70},
+  {GRISU_UINT64_C(0x90e40fbe, ea1d3a4b), 176, 72},
+  {GRISU_UINT64_C(0xe264589a, 4dcdab15), 182, 74},
+  {GRISU_UINT64_C(0xb0de6538, 8cc8ada8), 189, 76},
+  {GRISU_UINT64_C(0x8a2dbf14, 2dfcc7ab), 196, 78},
+  {GRISU_UINT64_C(0xd7e77a8f, 87daf7fc), 202, 80},
+  {GRISU_UINT64_C(0xa8acd7c0, 222311bd), 209, 82},
+  {GRISU_UINT64_C(0x83c7088e, 1aab65db), 216, 84},
+  {GRISU_UINT64_C(0xcde6fd5e, 09abcf27), 222, 86},
+  {GRISU_UINT64_C(0xa0dc75f1, 778e39d6), 229, 88},
+  {GRISU_UINT64_C(0xfb587849, 4ace3a5f), 235, 90},
+  {GRISU_UINT64_C(0xc45d1df9, 42711d9a), 242, 92},
+  {GRISU_UINT64_C(0x9968bf6a, bbe85f20), 249, 94},
+  {GRISU_UINT64_C(0xefb3ab16, c59b14a3), 255, 96},
+  {GRISU_UINT64_C(0xbb445da9, ca61281f), 262, 98},
+  {GRISU_UINT64_C(0x924d692c, a61be758), 269, 100},
+  {GRISU_UINT64_C(0xe498f455, c38b997a), 275, 102},
+  {GRISU_UINT64_C(0xb2977ee3, 00c50fe7), 282, 104},
+  {GRISU_UINT64_C(0x8b865b21, 5899f46d), 289, 106},
+  {GRISU_UINT64_C(0xda01ee64, 1a708dea), 295, 108},
+  {GRISU_UINT64_C(0xaa51823e, 34a7eedf), 302, 110},
+  {GRISU_UINT64_C(0x850fadc0, 9923329e), 309, 112},
+  {GRISU_UINT64_C(0xcfe87f7c, ef46ff17), 315, 114},
+  {GRISU_UINT64_C(0xa26da399, 9aef774a), 322, 116},
+  {GRISU_UINT64_C(0xfdcb4fa0, 02162a63), 328, 118},
+  {GRISU_UINT64_C(0xc646d635, 01a1511e), 335, 120},
+  {GRISU_UINT64_C(0x9ae75759, 6946075f), 342, 122},
+  {GRISU_UINT64_C(0xf209787b, b47d6b85), 348, 124},
+  {GRISU_UINT64_C(0xbd176620, a501fc00), 355, 126},
+  {GRISU_UINT64_C(0x93ba47c9, 80e98ce0), 362, 128},
+  {GRISU_UINT64_C(0xe6d3102a, d96cec1e), 368, 130},
+  {GRISU_UINT64_C(0xb454e4a1, 79dd1877), 375, 132},
+  {GRISU_UINT64_C(0x8ce2529e, 2734bb1d), 382, 134},
+  {GRISU_UINT64_C(0xdc21a117, 1d42645d), 388, 136},
+  {GRISU_UINT64_C(0xabfa45da, 0edbde69), 395, 138},
+  {GRISU_UINT64_C(0x865b8692, 5b9bc5c2), 402, 140},
+  {GRISU_UINT64_C(0xd1ef0244, af2364ff), 408, 142},
+  {GRISU_UINT64_C(0xa402b9c5, a8d3a6e7), 415, 144},
+  {GRISU_UINT64_C(0x80222122, 6be55a65), 422, 146},
+  {GRISU_UINT64_C(0xc83553c5, c8965d3d), 428, 148},
+  {GRISU_UINT64_C(0x9c69a972, 84b578d8), 435, 150},
+  {GRISU_UINT64_C(0xf46518c2, ef5b8cd1), 441, 152},
+  {GRISU_UINT64_C(0xbeeefb58, 4aff8604), 448, 154},
+  {GRISU_UINT64_C(0x952ab45c, fa97a0b3), 455, 156},
+  {GRISU_UINT64_C(0xe912b9d1, 478ceb17), 461, 158},
+  {GRISU_UINT64_C(0xb616a12b, 7fe617aa), 468, 160},
+  {GRISU_UINT64_C(0x8e41ade9, fbebc27d), 475, 162},
+  {GRISU_UINT64_C(0xde469fbd, 99a05fe3), 481, 164},
+  {GRISU_UINT64_C(0xada72ccc, 20054aea), 488, 166},
+  {GRISU_UINT64_C(0x87aa9aff, 79042287), 495, 168},
+  {GRISU_UINT64_C(0xd3fa922f, 2d1675f2), 501, 170},
+  {GRISU_UINT64_C(0xa59bc234, db398c25), 508, 172},
+  {GRISU_UINT64_C(0x8161afb9, 4b44f57d), 515, 174},
+  {GRISU_UINT64_C(0xca28a291, 859bbf93), 521, 176},
+  {GRISU_UINT64_C(0x9defbf01, b061adab), 528, 178},
+  {GRISU_UINT64_C(0xf6c69a72, a3989f5c), 534, 180},
+  {GRISU_UINT64_C(0xc0cb28a9, 8fcf3c80), 541, 182},
+  {GRISU_UINT64_C(0x969eb7c4, 7859e744), 548, 184},
+  {GRISU_UINT64_C(0xeb57ff22, fc0c795a), 554, 186},
+  {GRISU_UINT64_C(0xb7dcbf53, 54e9bece), 561, 188},
+  {GRISU_UINT64_C(0x8fa47579, 1a569d11), 568, 190},
+  {GRISU_UINT64_C(0xe070f78d, 3927556b), 574, 192},
+  {GRISU_UINT64_C(0xaf584166, 54a6babb), 581, 194},
+  {GRISU_UINT64_C(0x88fcf317, f22241e2), 588, 196},
+  {GRISU_UINT64_C(0xd60b3bd5, 6a5586f2), 594, 198},
+  {GRISU_UINT64_C(0xa738c6be, bb12d16d), 601, 200},
+  {GRISU_UINT64_C(0x82a45b45, 0226b39d), 608, 202},
+  {GRISU_UINT64_C(0xcc20ce9b, d35c78a5), 614, 204},
+  {GRISU_UINT64_C(0x9f79a169, bd203e41), 621, 206},
+  {GRISU_UINT64_C(0xf92e0c35, 37826146), 627, 208},
+  {GRISU_UINT64_C(0xc2abf989, 935ddbfe), 634, 210},
+  {GRISU_UINT64_C(0x98165af3, 7b2153df), 641, 212},
+  {GRISU_UINT64_C(0xeda2ee1c, 7064130c), 647, 214},
+  {GRISU_UINT64_C(0xb9a74a06, 37ce2ee1), 654, 216},
+  {GRISU_UINT64_C(0x910ab1d4, db9914a0), 661, 218},
+  {GRISU_UINT64_C(0xe2a0b5dc, 971f303a), 667, 220},
+  {GRISU_UINT64_C(0xb10d8e14, 56105dad), 674, 222},
+  {GRISU_UINT64_C(0x8a5296ff, e33cc930), 681, 224},
+  {GRISU_UINT64_C(0xd8210bef, d30efa5a), 687, 226},
+  {GRISU_UINT64_C(0xa8d9d153, 5ce3b396), 694, 228},
+  {GRISU_UINT64_C(0x83ea2b89, 2091e44e), 701, 230},
+  {GRISU_UINT64_C(0xce1de406, 42e3f4b9), 707, 232},
+  {GRISU_UINT64_C(0xa1075a24, e4421731), 714, 234},
+  {GRISU_UINT64_C(0xfb9b7cd9, a4a7443c), 720, 236},
+  {GRISU_UINT64_C(0xc491798a, 08a2ad4f), 727, 238},
+  {GRISU_UINT64_C(0x9991a6f3, d6bf1766), 734, 240},
+  {GRISU_UINT64_C(0xeff394dc, ff8a948f), 740, 242},
+  {GRISU_UINT64_C(0xbb764c4c, a7a44410), 747, 244},
+  {GRISU_UINT64_C(0x92746b9b, e2f8552c), 754, 246},
+  {GRISU_UINT64_C(0xe4d5e823, 92a40515), 760, 248},
+  {GRISU_UINT64_C(0xb2c71d5b, ca9023f8), 767, 250},
+  {GRISU_UINT64_C(0x8bab8eef, b6409c1a), 774, 252},
+  {GRISU_UINT64_C(0xda3c0f56, 8cc4f3e9), 780, 254},
+  {GRISU_UINT64_C(0xaa7eebfb, 9df9de8e), 787, 256},
+  {GRISU_UINT64_C(0x8533285c, 936b35df), 794, 258},
+  {GRISU_UINT64_C(0xd01fef10, a657842c), 800, 260},
+  {GRISU_UINT64_C(0xa298f2c5, 01f45f43), 807, 262},
+  {GRISU_UINT64_C(0xfe0efb53, d30dd4d8), 813, 264},
+  {GRISU_UINT64_C(0xc67bb459, 7ce2ce49), 820, 266},
+  {GRISU_UINT64_C(0x9b10a4e5, e9913129), 827, 268},
+  {GRISU_UINT64_C(0xf24a01a7, 3cf2dcd0), 833, 270},
+  {GRISU_UINT64_C(0xbd49d14a, a79dbc82), 840, 272},
+  {GRISU_UINT64_C(0x93e1ab82, 52f33b46), 847, 274},
+  {GRISU_UINT64_C(0xe7109bfb, a19c0c9d), 853, 276},
+  {GRISU_UINT64_C(0xb484f9dc, 9641e9db), 860, 278},
+  {GRISU_UINT64_C(0x8d07e334, 55637eb3), 867, 280},
+  {GRISU_UINT64_C(0xdc5c5301, c56b75f7), 873, 282},
+  {GRISU_UINT64_C(0xac2820d9, 623bf429), 880, 284},
+  {GRISU_UINT64_C(0x867f59a9, d4bed6c0), 887, 286},
+  {GRISU_UINT64_C(0xd226fc19, 5c6a2f8c), 893, 288},
+  {GRISU_UINT64_C(0xa42e74f3, d032f526), 900, 290},
+  {GRISU_UINT64_C(0x80444b5e, 7aa7cf85), 907, 292},
+  {GRISU_UINT64_C(0xc86ab5c3, 9fa63441), 913, 294},
+  {GRISU_UINT64_C(0x9c935e00, d4b9d8d2), 920, 296},
+  {GRISU_UINT64_C(0xf4a642e1, 4c6262c9), 926, 298},
+  {GRISU_UINT64_C(0xbf21e440, 03acdd2d), 933, 300},
+  {GRISU_UINT64_C(0x95527a52, 02df0ccb), 940, 302},
+  {GRISU_UINT64_C(0xe950df20, 247c83fd), 946, 304},
+  {GRISU_UINT64_C(0xb6472e51, 1c81471e), 953, 306},
+  {GRISU_UINT64_C(0x8e679c2f, 5e44ff8f), 960, 308},
+  {GRISU_UINT64_C(0xde81e40a, 034bcf50), 966, 310},
+  {GRISU_UINT64_C(0xadd57a27, d29339f6), 973, 312},
+  {GRISU_UINT64_C(0x87cec76f, 1c830549), 980, 314},
+  {GRISU_UINT64_C(0xd433179d, 9c8cb841), 986, 316},
+  {GRISU_UINT64_C(0xa5c7ea73, 224deff3), 993, 318},
+  {GRISU_UINT64_C(0x81842f29, f2cce376), 1000, 320},
+  {GRISU_UINT64_C(0xca5e89b1, 8b602368), 1006, 322},
+  {GRISU_UINT64_C(0x9e19db92, b4e31ba9), 1013, 324},
+  {GRISU_UINT64_C(0xf7086715, 3aa2db39), 1019, 326},
+  {GRISU_UINT64_C(0xc0fe9088, 95cf3b44), 1026, 328},
+  {GRISU_UINT64_C(0x96c6e0ea, b509e64d), 1033, 330},
+  {GRISU_UINT64_C(0xeb96bf6e, badf77d9), 1039, 332},
+  {GRISU_UINT64_C(0xb80dc58e, 81fe95a1), 1046, 334},
+  {GRISU_UINT64_C(0x8fcac257, 558ee4e6), 1053, 336},
+  {GRISU_UINT64_C(0xe0accfa8, 75af45a8), 1059, 338},
+  {GRISU_UINT64_C(0xaf87023b, 9bf0ee6b), 1066, 340},
+  {GRISU_UINT64_C(0x892179be, 91d43a44), 1073, 342},
+  };
+static const int GRISU_CACHE_MAX_DISTANCE(2) = 7;
+// nb elements (2): 326
+static const GRISU_CACHE_STRUCT GRISU_CACHE_NAME(3)[] = {
+  {GRISU_UINT64_C(0xe61acf03, 3d1a45df), -1087, -308},
+  {GRISU_UINT64_C(0xe0b62e29, 29aba83c), -1077, -305},
+  {GRISU_UINT64_C(0xdb71e914, 32b1a24b), -1067, -302},
+  {GRISU_UINT64_C(0xd64d3d9d, b981787d), -1057, -299},
+  {GRISU_UINT64_C(0xd1476e2c, 07286faa), -1047, -296},
+  {GRISU_UINT64_C(0xcc5fc196, fefd7d0c), -1037, -293},
+  {GRISU_UINT64_C(0xc795830d, 75038c1e), -1027, -290},
+  {GRISU_UINT64_C(0xc2e801fb, 244576d5), -1017, -287},
+  {GRISU_UINT64_C(0xbe5691ef, 416bd60c), -1007, -284},
+  {GRISU_UINT64_C(0xb9e08a83, a5e34f08), -997, -281},
+  {GRISU_UINT64_C(0xb5854744, 8ffffb2e), -987, -278},
+  {GRISU_UINT64_C(0xb1442798, f49ffb4b), -977, -275},
+  {GRISU_UINT64_C(0xad1c8eab, 5ee43b67), -967, -272},
+  {GRISU_UINT64_C(0xa90de353, 5aaae202), -957, -269},
+  {GRISU_UINT64_C(0xa5178fff, 668ae0b6), -947, -266},
+  {GRISU_UINT64_C(0xa139029f, 6a239f72), -937, -263},
+  {GRISU_UINT64_C(0x9d71ac8f, ada6c9b5), -927, -260},
+  {GRISU_UINT64_C(0x99c10284, 4f94e0fb), -917, -257},
+  {GRISU_UINT64_C(0x96267c75, 35b763b5), -907, -254},
+  {GRISU_UINT64_C(0x92a1958a, 7675175f), -897, -251},
+  {GRISU_UINT64_C(0x8f31cc09, 37ae58d3), -887, -248},
+  {GRISU_UINT64_C(0x8bd6a141, 006042be), -877, -245},
+  {GRISU_UINT64_C(0x888f9979, 7a5e012d), -867, -242},
+  {GRISU_UINT64_C(0x855c3be0, a17fcd26), -857, -239},
+  {GRISU_UINT64_C(0x823c1279, 5db6ce57), -847, -236},
+  {GRISU_UINT64_C(0xfe5d5415, 0b090b03), -838, -233},
+  {GRISU_UINT64_C(0xf867241c, 8cc6d4c1), -828, -230},
+  {GRISU_UINT64_C(0xf294b943, e17a2bc4), -818, -227},
+  {GRISU_UINT64_C(0xece53cec, 4a314ebe), -808, -224},
+  {GRISU_UINT64_C(0xe757dd7e, c07426e5), -798, -221},
+  {GRISU_UINT64_C(0xe1ebce4d, c7f16dfc), -788, -218},
+  {GRISU_UINT64_C(0xdca04777, f541c568), -778, -215},
+  {GRISU_UINT64_C(0xd77485cb, 25823ac7), -768, -212},
+  {GRISU_UINT64_C(0xd267caa8, 62a12d67), -758, -209},
+  {GRISU_UINT64_C(0xcd795be8, 70516656), -748, -206},
+  {GRISU_UINT64_C(0xc8a883c0, fdaf7df0), -738, -203},
+  {GRISU_UINT64_C(0xc3f490aa, 77bd60fd), -728, -200},
+  {GRISU_UINT64_C(0xbf5cd546, 78eef0b7), -718, -197},
+  {GRISU_UINT64_C(0xbae0a846, d2195713), -708, -194},
+  {GRISU_UINT64_C(0xb67f6455, 292cbf08), -698, -191},
+  {GRISU_UINT64_C(0xb23867fb, 2a35b28e), -688, -188},
+  {GRISU_UINT64_C(0xae0b158b, 4738705f), -678, -185},
+  {GRISU_UINT64_C(0xa9f6d30a, 038d1dbc), -668, -182},
+  {GRISU_UINT64_C(0xa5fb0a17, c777cf0a), -658, -179},
+  {GRISU_UINT64_C(0xa21727db, 38cb0030), -648, -176},
+  {GRISU_UINT64_C(0x9e4a9cec, 15763e2f), -638, -173},
+  {GRISU_UINT64_C(0x9a94dd3e, 8cf578ba), -628, -170},
+  {GRISU_UINT64_C(0x96f5600f, 15a7b7e5), -618, -167},
+  {GRISU_UINT64_C(0x936b9fce, bb25c996), -608, -164},
+  {GRISU_UINT64_C(0x8ff71a0f, e2c2e6dc), -598, -161},
+  {GRISU_UINT64_C(0x8c974f73, 83725573), -588, -158},
+  {GRISU_UINT64_C(0x894bc396, ce5da772), -578, -155},
+  {GRISU_UINT64_C(0x8613fd01, 45877586), -568, -152},
+  {GRISU_UINT64_C(0x82ef8513, 3de648c5), -558, -149},
+  {GRISU_UINT64_C(0xffbbcfe9, 94e5c620), -549, -146},
+  {GRISU_UINT64_C(0xf9bd690a, 1b68637b), -539, -143},
+  {GRISU_UINT64_C(0xf3e2f893, dec3f126), -529, -140},
+  {GRISU_UINT64_C(0xee2ba6c0, 678b597f), -519, -137},
+  {GRISU_UINT64_C(0xe896a0d7, e51e1566), -509, -134},
+  {GRISU_UINT64_C(0xe3231912, d5bf60e6), -499, -131},
+  {GRISU_UINT64_C(0xddd0467c, 64bce4a1), -489, -128},
+  {GRISU_UINT64_C(0xd89d64d5, 7a607745), -479, -125},
+  {GRISU_UINT64_C(0xd389b478, 79823479), -469, -122},
+  {GRISU_UINT64_C(0xce947a3d, a6a9273e), -459, -119},
+  {GRISU_UINT64_C(0xc9bcff60, 34c13053), -449, -116},
+  {GRISU_UINT64_C(0xc5029163, f384a931), -439, -113},
+  {GRISU_UINT64_C(0xc06481fb, 9bcf8d3a), -429, -110},
+  {GRISU_UINT64_C(0xbbe226ef, b628afeb), -419, -107},
+  {GRISU_UINT64_C(0xb77ada06, 17e3bbcb), -409, -104},
+  {GRISU_UINT64_C(0xb32df8e9, f3546564), -399, -101},
+  {GRISU_UINT64_C(0xaefae514, 77a06b04), -389, -98},
+  {GRISU_UINT64_C(0xaae103b5, fcd2a882), -379, -95},
+  {GRISU_UINT64_C(0xa6dfbd9f, b8e5b88f), -369, -92},
+  {GRISU_UINT64_C(0xa2f67f2d, fa90563b), -359, -89},
+  {GRISU_UINT64_C(0x9f24b832, e6b0f436), -349, -86},
+  {GRISU_UINT64_C(0x9b69dbe1, b548ce7d), -339, -83},
+  {GRISU_UINT64_C(0x97c560ba, 6b0919a6), -329, -80},
+  {GRISU_UINT64_C(0x9436c076, 0c86e30c), -319, -77},
+  {GRISU_UINT64_C(0x90bd77f3, 483bb9ba), -309, -74},
+  {GRISU_UINT64_C(0x8d590723, 948a535f), -299, -71},
+  {GRISU_UINT64_C(0x8a08f0f8, bf0f156b), -289, -68},
+  {GRISU_UINT64_C(0x86ccbb52, ea94baeb), -279, -65},
+  {GRISU_UINT64_C(0x83a3eeee, f9153e89), -269, -62},
+  {GRISU_UINT64_C(0x808e1755, 5f3ebf12), -259, -59},
+  {GRISU_UINT64_C(0xfb158592, be068d2f), -250, -56},
+  {GRISU_UINT64_C(0xf5330471, 4d9265e0), -240, -53},
+  {GRISU_UINT64_C(0xef73d256, a5c0f77d), -230, -50},
+  {GRISU_UINT64_C(0xe9d71b68, 9dde71b0), -220, -47},
+  {GRISU_UINT64_C(0xe45c10c4, 2a2b3b06), -210, -44},
+  {GRISU_UINT64_C(0xdf01e85f, 912e37a3), -200, -41},
+  {GRISU_UINT64_C(0xd9c7dced, 53c72256), -190, -38},
+  {GRISU_UINT64_C(0xd4ad2dbf, c3d07788), -180, -35},
+  {GRISU_UINT64_C(0xcfb11ead, 453994ba), -170, -32},
+  {GRISU_UINT64_C(0xcad2f7f5, 359a3b3e), -160, -29},
+  {GRISU_UINT64_C(0xc6120625, 76589ddb), -150, -26},
+  {GRISU_UINT64_C(0xc16d9a00, 95928a27), -140, -23},
+  {GRISU_UINT64_C(0xbce50864, 92111aeb), -130, -20},
+  {GRISU_UINT64_C(0xb877aa32, 36a4b449), -120, -17},
+  {GRISU_UINT64_C(0xb424dc35, 095cd80f), -110, -14},
+  {GRISU_UINT64_C(0xafebff0b, cb24aaff), -100, -11},
+  {GRISU_UINT64_C(0xabcc7711, 8461cefd), -90, -8},
+  {GRISU_UINT64_C(0xa7c5ac47, 1b478423), -80, -5},
+  {GRISU_UINT64_C(0xa3d70a3d, 70a3d70a), -70, -2},
+  {GRISU_UINT64_C(0xa0000000, 00000000), -60, 1},
+  {GRISU_UINT64_C(0x9c400000, 00000000), -50, 4},
+  {GRISU_UINT64_C(0x98968000, 00000000), -40, 7},
+  {GRISU_UINT64_C(0x9502f900, 00000000), -30, 10},
+  {GRISU_UINT64_C(0x9184e72a, 00000000), -20, 13},
+  {GRISU_UINT64_C(0x8e1bc9bf, 04000000), -10, 16},
+  {GRISU_UINT64_C(0x8ac72304, 89e80000), 0, 19},
+  {GRISU_UINT64_C(0x87867832, 6eac9000), 10, 22},
+  {GRISU_UINT64_C(0x84595161, 401484a0), 20, 25},
+  {GRISU_UINT64_C(0x813f3978, f8940984), 30, 28},
+  {GRISU_UINT64_C(0xfc6f7c40, 45812296), 39, 31},
+  {GRISU_UINT64_C(0xf684df56, c3e01bc7), 49, 34},
+  {GRISU_UINT64_C(0xf0bdc21a, bb48db20), 59, 37},
+  {GRISU_UINT64_C(0xeb194f8e, 1ae525fd), 69, 40},
+  {GRISU_UINT64_C(0xe596b7b0, c643c719), 79, 43},
+  {GRISU_UINT64_C(0xe0352f62, a19e306f), 89, 46},
+  {GRISU_UINT64_C(0xdaf3f046, 51d47b4c), 99, 49},
+  {GRISU_UINT64_C(0xd5d238a4, abe98068), 109, 52},
+  {GRISU_UINT64_C(0xd0cf4b50, cfe20766), 119, 55},
+  {GRISU_UINT64_C(0xcbea6f8c, eb02bb3a), 129, 58},
+  {GRISU_UINT64_C(0xc722f0ef, 9d80aad6), 139, 61},
+  {GRISU_UINT64_C(0xc2781f49, ffcfa6d5), 149, 64},
+  {GRISU_UINT64_C(0xbde94e8e, 43d0c8ec), 159, 67},
+  {GRISU_UINT64_C(0xb975d6b6, ee39e437), 169, 70},
+  {GRISU_UINT64_C(0xb51d13ae, a4a488dd), 179, 73},
+  {GRISU_UINT64_C(0xb0de6538, 8cc8ada8), 189, 76},
+  {GRISU_UINT64_C(0xacb92ed9, 397bf996), 199, 79},
+  {GRISU_UINT64_C(0xa8acd7c0, 222311bd), 209, 82},
+  {GRISU_UINT64_C(0xa4b8cab1, a1563f52), 219, 85},
+  {GRISU_UINT64_C(0xa0dc75f1, 778e39d6), 229, 88},
+  {GRISU_UINT64_C(0x9d174b2d, cec0e47b), 239, 91},
+  {GRISU_UINT64_C(0x9968bf6a, bbe85f20), 249, 94},
+  {GRISU_UINT64_C(0x95d04aee, 3b80ece6), 259, 97},
+  {GRISU_UINT64_C(0x924d692c, a61be758), 269, 100},
+  {GRISU_UINT64_C(0x8edf98b5, 9a373fec), 279, 103},
+  {GRISU_UINT64_C(0x8b865b21, 5899f46d), 289, 106},
+  {GRISU_UINT64_C(0x884134fe, 908658b2), 299, 109},
+  {GRISU_UINT64_C(0x850fadc0, 9923329e), 309, 112},
+  {GRISU_UINT64_C(0x81f14fae, 158c5f6e), 319, 115},
+  {GRISU_UINT64_C(0xfdcb4fa0, 02162a63), 328, 118},
+  {GRISU_UINT64_C(0xf7d88bc2, 4209a565), 338, 121},
+  {GRISU_UINT64_C(0xf209787b, b47d6b85), 348, 124},
+  {GRISU_UINT64_C(0xec5d3fa8, ce427b00), 358, 127},
+  {GRISU_UINT64_C(0xe6d3102a, d96cec1e), 368, 130},
+  {GRISU_UINT64_C(0xe16a1dc9, d8545e95), 378, 133},
+  {GRISU_UINT64_C(0xdc21a117, 1d42645d), 388, 136},
+  {GRISU_UINT64_C(0xd6f8d750, 9292d603), 398, 139},
+  {GRISU_UINT64_C(0xd1ef0244, af2364ff), 408, 142},
+  {GRISU_UINT64_C(0xcd036837, 130890a1), 418, 145},
+  {GRISU_UINT64_C(0xc83553c5, c8965d3d), 428, 148},
+  {GRISU_UINT64_C(0xc38413cf, 25e2d70e), 438, 151},
+  {GRISU_UINT64_C(0xbeeefb58, 4aff8604), 448, 154},
+  {GRISU_UINT64_C(0xba756174, 393d88e0), 458, 157},
+  {GRISU_UINT64_C(0xb616a12b, 7fe617aa), 468, 160},
+  {GRISU_UINT64_C(0xb1d21964, 7ae6b31c), 478, 163},
+  {GRISU_UINT64_C(0xada72ccc, 20054aea), 488, 166},
+  {GRISU_UINT64_C(0xa99541bf, 57452b28), 498, 169},
+  {GRISU_UINT64_C(0xa59bc234, db398c25), 508, 172},
+  {GRISU_UINT64_C(0xa1ba1ba7, 9e1632dc), 518, 175},
+  {GRISU_UINT64_C(0x9defbf01, b061adab), 528, 178},
+  {GRISU_UINT64_C(0x9a3c2087, a63f6399), 538, 181},
+  {GRISU_UINT64_C(0x969eb7c4, 7859e744), 548, 184},
+  {GRISU_UINT64_C(0x9316ff75, dd87cbd8), 558, 187},
+  {GRISU_UINT64_C(0x8fa47579, 1a569d11), 568, 190},
+  {GRISU_UINT64_C(0x8c469ab8, 43b89563), 578, 193},
+  {GRISU_UINT64_C(0x88fcf317, f22241e2), 588, 196},
+  {GRISU_UINT64_C(0x85c70565, 62757457), 598, 199},
+  {GRISU_UINT64_C(0x82a45b45, 0226b39d), 608, 202},
+  {GRISU_UINT64_C(0xff290242, c83396ce), 617, 205},
+  {GRISU_UINT64_C(0xf92e0c35, 37826146), 627, 208},
+  {GRISU_UINT64_C(0xf356f7eb, f83552fe), 637, 211},
+  {GRISU_UINT64_C(0xeda2ee1c, 7064130c), 647, 214},
+  {GRISU_UINT64_C(0xe8111c87, c5c1ba9a), 657, 217},
+  {GRISU_UINT64_C(0xe2a0b5dc, 971f303a), 667, 220},
+  {GRISU_UINT64_C(0xdd50f199, 6b947519), 677, 223},
+  {GRISU_UINT64_C(0xd8210bef, d30efa5a), 687, 226},
+  {GRISU_UINT64_C(0xd31045a8, 341ca07c), 697, 229},
+  {GRISU_UINT64_C(0xce1de406, 42e3f4b9), 707, 232},
+  {GRISU_UINT64_C(0xc94930ae, 1d529cfd), 717, 235},
+  {GRISU_UINT64_C(0xc491798a, 08a2ad4f), 727, 238},
+  {GRISU_UINT64_C(0xbff610b0, cc6edd3f), 737, 241},
+  {GRISU_UINT64_C(0xbb764c4c, a7a44410), 747, 244},
+  {GRISU_UINT64_C(0xb7118682, dbb66a77), 757, 247},
+  {GRISU_UINT64_C(0xb2c71d5b, ca9023f8), 767, 250},
+  {GRISU_UINT64_C(0xae9672ab, a3d0c321), 777, 253},
+  {GRISU_UINT64_C(0xaa7eebfb, 9df9de8e), 787, 256},
+  {GRISU_UINT64_C(0xa67ff273, b8460357), 797, 259},
+  {GRISU_UINT64_C(0xa298f2c5, 01f45f43), 807, 262},
+  {GRISU_UINT64_C(0x9ec95d14, 63e8a507), 817, 265},
+  {GRISU_UINT64_C(0x9b10a4e5, e9913129), 827, 268},
+  {GRISU_UINT64_C(0x976e4108, 8617ca02), 837, 271},
+  {GRISU_UINT64_C(0x93e1ab82, 52f33b46), 847, 274},
+  {GRISU_UINT64_C(0x906a617d, 450187e2), 857, 277},
+  {GRISU_UINT64_C(0x8d07e334, 55637eb3), 867, 280},
+  {GRISU_UINT64_C(0x89b9b3e1, 1b6329bb), 877, 283},
+  {GRISU_UINT64_C(0x867f59a9, d4bed6c0), 887, 286},
+  {GRISU_UINT64_C(0x83585d8f, d9c25db8), 897, 289},
+  {GRISU_UINT64_C(0x80444b5e, 7aa7cf85), 907, 292},
+  {GRISU_UINT64_C(0xfa856334, 878fc151), 916, 295},
+  {GRISU_UINT64_C(0xf4a642e1, 4c6262c9), 926, 298},
+  {GRISU_UINT64_C(0xeeea5d50, 04981478), 936, 301},
+  {GRISU_UINT64_C(0xe950df20, 247c83fd), 946, 304},
+  {GRISU_UINT64_C(0xe3d8f9e5, 63a198e5), 956, 307},
+  {GRISU_UINT64_C(0xde81e40a, 034bcf50), 966, 310},
+  {GRISU_UINT64_C(0xd94ad8b1, c7380874), 976, 313},
+  {GRISU_UINT64_C(0xd433179d, 9c8cb841), 986, 316},
+  {GRISU_UINT64_C(0xcf39e50f, eae16bf0), 996, 319},
+  {GRISU_UINT64_C(0xca5e89b1, 8b602368), 1006, 322},
+  {GRISU_UINT64_C(0xc5a05277, 621be294), 1016, 325},
+  {GRISU_UINT64_C(0xc0fe9088, 95cf3b44), 1026, 328},
+  {GRISU_UINT64_C(0xbc789925, 624c5fe1), 1036, 331},
+  {GRISU_UINT64_C(0xb80dc58e, 81fe95a1), 1046, 334},
+  {GRISU_UINT64_C(0xb3bd72ed, 2af29e20), 1056, 337},
+  {GRISU_UINT64_C(0xaf87023b, 9bf0ee6b), 1066, 340},
+  };
+static const int GRISU_CACHE_MAX_DISTANCE(3) = 10;
+// nb elements (3): 217
+static const GRISU_CACHE_STRUCT GRISU_CACHE_NAME(4)[] = {
+  {GRISU_UINT64_C(0xe61acf03, 3d1a45df), -1087, -308},
+  {GRISU_UINT64_C(0x8c71dcd9, ba0b4926), -1073, -304},
+  {GRISU_UINT64_C(0xab70fe17, c79ac6ca), -1060, -300},
+  {GRISU_UINT64_C(0xd1476e2c, 07286faa), -1047, -296},
+  {GRISU_UINT64_C(0xff77b1fc, bebcdc4f), -1034, -292},
+  {GRISU_UINT64_C(0x9becce62, 836ac577), -1020, -288},
+  {GRISU_UINT64_C(0xbe5691ef, 416bd60c), -1007, -284},
+  {GRISU_UINT64_C(0xe858ad24, 8f5c22ca), -994, -280},
+  {GRISU_UINT64_C(0x8dd01fad, 907ffc3c), -980, -276},
+  {GRISU_UINT64_C(0xad1c8eab, 5ee43b67), -967, -272},
+  {GRISU_UINT64_C(0xd3515c28, 31559a83), -954, -268},
+  {GRISU_UINT64_C(0x80fa687f, 881c7f8e), -940, -264},
+  {GRISU_UINT64_C(0x9d71ac8f, ada6c9b5), -927, -260},
+  {GRISU_UINT64_C(0xc0314325, 637a193a), -914, -256},
+  {GRISU_UINT64_C(0xea9c2277, 23ee8bcb), -901, -252},
+  {GRISU_UINT64_C(0x8f31cc09, 37ae58d3), -887, -248},
+  {GRISU_UINT64_C(0xaecc4991, 4078536d), -874, -244},
+  {GRISU_UINT64_C(0xd5605fcd, cf32e1d7), -861, -240},
+  {GRISU_UINT64_C(0x823c1279, 5db6ce57), -847, -236},
+  {GRISU_UINT64_C(0x9efa548d, 26e5a6e2), -834, -232},
+  {GRISU_UINT64_C(0xc2109436, 4dfb5637), -821, -228},
+  {GRISU_UINT64_C(0xece53cec, 4a314ebe), -808, -224},
+  {GRISU_UINT64_C(0x9096ea6f, 3848984f), -794, -220},
+  {GRISU_UINT64_C(0xb080392c, c4349ded), -781, -216},
+  {GRISU_UINT64_C(0xd77485cb, 25823ac7), -768, -212},
+  {GRISU_UINT64_C(0x8380dea9, 3da4bc60), -754, -208},
+  {GRISU_UINT64_C(0xa086cfcd, 97bf97f4), -741, -204},
+  {GRISU_UINT64_C(0xc3f490aa, 77bd60fd), -728, -200},
+  {GRISU_UINT64_C(0xef340a98, 172aace5), -715, -196},
+  {GRISU_UINT64_C(0x91ff8377, 5423cc06), -701, -192},
+  {GRISU_UINT64_C(0xb23867fb, 2a35b28e), -688, -188},
+  {GRISU_UINT64_C(0xd98ddaee, 19068c76), -675, -184},
+  {GRISU_UINT64_C(0x84c8d4df, d2c63f3b), -661, -180},
+  {GRISU_UINT64_C(0xa21727db, 38cb0030), -648, -176},
+  {GRISU_UINT64_C(0xc5dd4427, 1ad3cdba), -635, -172},
+  {GRISU_UINT64_C(0xf18899b1, bc3f8ca2), -622, -168},
+  {GRISU_UINT64_C(0x936b9fce, bb25c996), -608, -164},
+  {GRISU_UINT64_C(0xb3f4e093, db73a093), -595, -160},
+  {GRISU_UINT64_C(0xdbac6c24, 7d62a584), -582, -156},
+  {GRISU_UINT64_C(0x8613fd01, 45877586), -568, -152},
+  {GRISU_UINT64_C(0xa3ab6658, 0d5fdaf6), -555, -148},
+  {GRISU_UINT64_C(0xc7caba6e, 7c5382c9), -542, -144},
+  {GRISU_UINT64_C(0xf3e2f893, dec3f126), -529, -140},
+  {GRISU_UINT64_C(0x94db4838, 40b717f0), -515, -136},
+  {GRISU_UINT64_C(0xb5b5ada8, aaff80b8), -502, -132},
+  {GRISU_UINT64_C(0xddd0467c, 64bce4a1), -489, -128},
+  {GRISU_UINT64_C(0x87625f05, 6c7c4a8b), -475, -124},
+  {GRISU_UINT64_C(0xa54394fe, 1eedb8ff), -462, -120},
+  {GRISU_UINT64_C(0xc9bcff60, 34c13053), -449, -116},
+  {GRISU_UINT64_C(0xf64335bc, f065d37d), -436, -112},
+  {GRISU_UINT64_C(0x964e858c, 91ba2655), -422, -108},
+  {GRISU_UINT64_C(0xb77ada06, 17e3bbcb), -409, -104},
+  {GRISU_UINT64_C(0xdff97724, 70297ebd), -396, -100},
+  {GRISU_UINT64_C(0x88b402f7, fd75539b), -382, -96},
+  {GRISU_UINT64_C(0xa6dfbd9f, b8e5b88f), -369, -92},
+  {GRISU_UINT64_C(0xcbb41ef9, 79346bca), -356, -88},
+  {GRISU_UINT64_C(0xf8a95fcf, 88747d94), -343, -84},
+  {GRISU_UINT64_C(0x97c560ba, 6b0919a6), -329, -80},
+  {GRISU_UINT64_C(0xb9447093, 8fa89bcf), -316, -76},
+  {GRISU_UINT64_C(0xe2280b6c, 20dd5232), -303, -72},
+  {GRISU_UINT64_C(0x8a08f0f8, bf0f156b), -289, -68},
+  {GRISU_UINT64_C(0xa87fea27, a539e9a5), -276, -64},
+  {GRISU_UINT64_C(0xcdb02555, 653131b6), -263, -60},
+  {GRISU_UINT64_C(0xfb158592, be068d2f), -250, -56},
+  {GRISU_UINT64_C(0x993fe2c6, d07b7fac), -236, -52},
+  {GRISU_UINT64_C(0xbb127c53, b17ec159), -223, -48},
+  {GRISU_UINT64_C(0xe45c10c4, 2a2b3b06), -210, -44},
+  {GRISU_UINT64_C(0x8b61313b, babce2c6), -196, -40},
+  {GRISU_UINT64_C(0xaa242499, 697392d3), -183, -36},
+  {GRISU_UINT64_C(0xcfb11ead, 453994ba), -170, -32},
+  {GRISU_UINT64_C(0xfd87b5f2, 8300ca0e), -157, -28},
+  {GRISU_UINT64_C(0x9abe14cd, 44753b53), -143, -24},
+  {GRISU_UINT64_C(0xbce50864, 92111aeb), -130, -20},
+  {GRISU_UINT64_C(0xe69594be, c44de15b), -117, -16},
+  {GRISU_UINT64_C(0x8cbccc09, 6f5088cc), -103, -12},
+  {GRISU_UINT64_C(0xabcc7711, 8461cefd), -90, -8},
+  {GRISU_UINT64_C(0xd1b71758, e219652c), -77, -4},
+  {GRISU_UINT64_C(0x80000000, 00000000), -63, 0},
+  {GRISU_UINT64_C(0x9c400000, 00000000), -50, 4},
+  {GRISU_UINT64_C(0xbebc2000, 00000000), -37, 8},
+  {GRISU_UINT64_C(0xe8d4a510, 00000000), -24, 12},
+  {GRISU_UINT64_C(0x8e1bc9bf, 04000000), -10, 16},
+  {GRISU_UINT64_C(0xad78ebc5, ac620000), 3, 20},
+  {GRISU_UINT64_C(0xd3c21bce, cceda100), 16, 24},
+  {GRISU_UINT64_C(0x813f3978, f8940984), 30, 28},
+  {GRISU_UINT64_C(0x9dc5ada8, 2b70b59e), 43, 32},
+  {GRISU_UINT64_C(0xc097ce7b, c90715b3), 56, 36},
+  {GRISU_UINT64_C(0xeb194f8e, 1ae525fd), 69, 40},
+  {GRISU_UINT64_C(0x8f7e32ce, 7bea5c70), 83, 44},
+  {GRISU_UINT64_C(0xaf298d05, 0e4395d7), 96, 48},
+  {GRISU_UINT64_C(0xd5d238a4, abe98068), 109, 52},
+  {GRISU_UINT64_C(0x82818f12, 81ed44a0), 123, 56},
+  {GRISU_UINT64_C(0x9f4f2726, 179a2245), 136, 60},
+  {GRISU_UINT64_C(0xc2781f49, ffcfa6d5), 149, 64},
+  {GRISU_UINT64_C(0xed63a231, d4c4fb27), 162, 68},
+  {GRISU_UINT64_C(0x90e40fbe, ea1d3a4b), 176, 72},
+  {GRISU_UINT64_C(0xb0de6538, 8cc8ada8), 189, 76},
+  {GRISU_UINT64_C(0xd7e77a8f, 87daf7fc), 202, 80},
+  {GRISU_UINT64_C(0x83c7088e, 1aab65db), 216, 84},
+  {GRISU_UINT64_C(0xa0dc75f1, 778e39d6), 229, 88},
+  {GRISU_UINT64_C(0xc45d1df9, 42711d9a), 242, 92},
+  {GRISU_UINT64_C(0xefb3ab16, c59b14a3), 255, 96},
+  {GRISU_UINT64_C(0x924d692c, a61be758), 269, 100},
+  {GRISU_UINT64_C(0xb2977ee3, 00c50fe7), 282, 104},
+  {GRISU_UINT64_C(0xda01ee64, 1a708dea), 295, 108},
+  {GRISU_UINT64_C(0x850fadc0, 9923329e), 309, 112},
+  {GRISU_UINT64_C(0xa26da399, 9aef774a), 322, 116},
+  {GRISU_UINT64_C(0xc646d635, 01a1511e), 335, 120},
+  {GRISU_UINT64_C(0xf209787b, b47d6b85), 348, 124},
+  {GRISU_UINT64_C(0x93ba47c9, 80e98ce0), 362, 128},
+  {GRISU_UINT64_C(0xb454e4a1, 79dd1877), 375, 132},
+  {GRISU_UINT64_C(0xdc21a117, 1d42645d), 388, 136},
+  {GRISU_UINT64_C(0x865b8692, 5b9bc5c2), 402, 140},
+  {GRISU_UINT64_C(0xa402b9c5, a8d3a6e7), 415, 144},
+  {GRISU_UINT64_C(0xc83553c5, c8965d3d), 428, 148},
+  {GRISU_UINT64_C(0xf46518c2, ef5b8cd1), 441, 152},
+  {GRISU_UINT64_C(0x952ab45c, fa97a0b3), 455, 156},
+  {GRISU_UINT64_C(0xb616a12b, 7fe617aa), 468, 160},
+  {GRISU_UINT64_C(0xde469fbd, 99a05fe3), 481, 164},
+  {GRISU_UINT64_C(0x87aa9aff, 79042287), 495, 168},
+  {GRISU_UINT64_C(0xa59bc234, db398c25), 508, 172},
+  {GRISU_UINT64_C(0xca28a291, 859bbf93), 521, 176},
+  {GRISU_UINT64_C(0xf6c69a72, a3989f5c), 534, 180},
+  {GRISU_UINT64_C(0x969eb7c4, 7859e744), 548, 184},
+  {GRISU_UINT64_C(0xb7dcbf53, 54e9bece), 561, 188},
+  {GRISU_UINT64_C(0xe070f78d, 3927556b), 574, 192},
+  {GRISU_UINT64_C(0x88fcf317, f22241e2), 588, 196},
+  {GRISU_UINT64_C(0xa738c6be, bb12d16d), 601, 200},
+  {GRISU_UINT64_C(0xcc20ce9b, d35c78a5), 614, 204},
+  {GRISU_UINT64_C(0xf92e0c35, 37826146), 627, 208},
+  {GRISU_UINT64_C(0x98165af3, 7b2153df), 641, 212},
+  {GRISU_UINT64_C(0xb9a74a06, 37ce2ee1), 654, 216},
+  {GRISU_UINT64_C(0xe2a0b5dc, 971f303a), 667, 220},
+  {GRISU_UINT64_C(0x8a5296ff, e33cc930), 681, 224},
+  {GRISU_UINT64_C(0xa8d9d153, 5ce3b396), 694, 228},
+  {GRISU_UINT64_C(0xce1de406, 42e3f4b9), 707, 232},
+  {GRISU_UINT64_C(0xfb9b7cd9, a4a7443c), 720, 236},
+  {GRISU_UINT64_C(0x9991a6f3, d6bf1766), 734, 240},
+  {GRISU_UINT64_C(0xbb764c4c, a7a44410), 747, 244},
+  {GRISU_UINT64_C(0xe4d5e823, 92a40515), 760, 248},
+  {GRISU_UINT64_C(0x8bab8eef, b6409c1a), 774, 252},
+  {GRISU_UINT64_C(0xaa7eebfb, 9df9de8e), 787, 256},
+  {GRISU_UINT64_C(0xd01fef10, a657842c), 800, 260},
+  {GRISU_UINT64_C(0xfe0efb53, d30dd4d8), 813, 264},
+  {GRISU_UINT64_C(0x9b10a4e5, e9913129), 827, 268},
+  {GRISU_UINT64_C(0xbd49d14a, a79dbc82), 840, 272},
+  {GRISU_UINT64_C(0xe7109bfb, a19c0c9d), 853, 276},
+  {GRISU_UINT64_C(0x8d07e334, 55637eb3), 867, 280},
+  {GRISU_UINT64_C(0xac2820d9, 623bf429), 880, 284},
+  {GRISU_UINT64_C(0xd226fc19, 5c6a2f8c), 893, 288},
+  {GRISU_UINT64_C(0x80444b5e, 7aa7cf85), 907, 292},
+  {GRISU_UINT64_C(0x9c935e00, d4b9d8d2), 920, 296},
+  {GRISU_UINT64_C(0xbf21e440, 03acdd2d), 933, 300},
+  {GRISU_UINT64_C(0xe950df20, 247c83fd), 946, 304},
+  {GRISU_UINT64_C(0x8e679c2f, 5e44ff8f), 960, 308},
+  {GRISU_UINT64_C(0xadd57a27, d29339f6), 973, 312},
+  {GRISU_UINT64_C(0xd433179d, 9c8cb841), 986, 316},
+  {GRISU_UINT64_C(0x81842f29, f2cce376), 1000, 320},
+  {GRISU_UINT64_C(0x9e19db92, b4e31ba9), 1013, 324},
+  {GRISU_UINT64_C(0xc0fe9088, 95cf3b44), 1026, 328},
+  {GRISU_UINT64_C(0xeb96bf6e, badf77d9), 1039, 332},
+  {GRISU_UINT64_C(0x8fcac257, 558ee4e6), 1053, 336},
+  {GRISU_UINT64_C(0xaf87023b, 9bf0ee6b), 1066, 340},
+  };
+static const int GRISU_CACHE_MAX_DISTANCE(4) = 14;
+// nb elements (4): 163
+static const GRISU_CACHE_STRUCT GRISU_CACHE_NAME(5)[] = {
+  {GRISU_UINT64_C(0xe61acf03, 3d1a45df), -1087, -308},
+  {GRISU_UINT64_C(0xaf8e5410, 288e1b6f), -1070, -303},
+  {GRISU_UINT64_C(0x85f04682, 93f0eb4e), -1053, -298},
+  {GRISU_UINT64_C(0xcc5fc196, fefd7d0c), -1037, -293},
+  {GRISU_UINT64_C(0x9becce62, 836ac577), -1020, -288},
+  {GRISU_UINT64_C(0xedec366b, 11c6cb8f), -1004, -283},
+  {GRISU_UINT64_C(0xb5854744, 8ffffb2e), -987, -278},
+  {GRISU_UINT64_C(0x8a7d3eef, 7f1cfc52), -970, -273},
+  {GRISU_UINT64_C(0xd3515c28, 31559a83), -954, -268},
+  {GRISU_UINT64_C(0xa139029f, 6a239f72), -937, -263},
+  {GRISU_UINT64_C(0xf6019da0, 7f549b2b), -921, -258},
+  {GRISU_UINT64_C(0xbbb01b92, 83253ca3), -904, -253},
+  {GRISU_UINT64_C(0x8f31cc09, 37ae58d3), -887, -248},
+  {GRISU_UINT64_C(0xda7f5bf5, 90966849), -871, -243},
+  {GRISU_UINT64_C(0xa6b34ad8, c9dfc070), -854, -238},
+  {GRISU_UINT64_C(0xfe5d5415, 0b090b03), -838, -233},
+  {GRISU_UINT64_C(0xc2109436, 4dfb5637), -821, -228},
+  {GRISU_UINT64_C(0x940f4613, ae5ed137), -804, -223},
+  {GRISU_UINT64_C(0xe1ebce4d, c7f16dfc), -788, -218},
+  {GRISU_UINT64_C(0xac5d37d5, b79b6239), -771, -213},
+  {GRISU_UINT64_C(0x8380dea9, 3da4bc60), -754, -208},
+  {GRISU_UINT64_C(0xc8a883c0, fdaf7df0), -738, -203},
+  {GRISU_UINT64_C(0x99171105, 2d8bf3c5), -721, -198},
+  {GRISU_UINT64_C(0xe998d258, 869facd7), -705, -193},
+  {GRISU_UINT64_C(0xb23867fb, 2a35b28e), -688, -188},
+  {GRISU_UINT64_C(0x87f8a8d4, cfa417ca), -671, -183},
+  {GRISU_UINT64_C(0xcf79cc9d, b955c2cc), -655, -178},
+  {GRISU_UINT64_C(0x9e4a9cec, 15763e2f), -638, -173},
+  {GRISU_UINT64_C(0xf18899b1, bc3f8ca2), -622, -168},
+  {GRISU_UINT64_C(0xb84687c2, 69ef3bfb), -605, -163},
+  {GRISU_UINT64_C(0x8c974f73, 83725573), -588, -158},
+  {GRISU_UINT64_C(0xd686619b, a27255a3), -572, -153},
+  {GRISU_UINT64_C(0xa3ab6658, 0d5fdaf6), -555, -148},
+  {GRISU_UINT64_C(0xf9bd690a, 1b68637b), -539, -143},
+  {GRISU_UINT64_C(0xbe895233, 86091466), -522, -138},
+  {GRISU_UINT64_C(0x915e2486, ef32cd60), -505, -133},
+  {GRISU_UINT64_C(0xddd0467c, 64bce4a1), -489, -128},
+  {GRISU_UINT64_C(0xa93af6c6, c79b5d2e), -472, -123},
+  {GRISU_UINT64_C(0x811ccc66, 8829b887), -455, -118},
+  {GRISU_UINT64_C(0xc5029163, f384a931), -439, -113},
+  {GRISU_UINT64_C(0x964e858c, 91ba2655), -422, -108},
+  {GRISU_UINT64_C(0xe5599087, 9ddcaabe), -406, -103},
+  {GRISU_UINT64_C(0xaefae514, 77a06b04), -389, -98},
+  {GRISU_UINT64_C(0x857fcae6, 2d8493a5), -372, -93},
+  {GRISU_UINT64_C(0xcbb41ef9, 79346bca), -356, -88},
+  {GRISU_UINT64_C(0x9b69dbe1, b548ce7d), -339, -83},
+  {GRISU_UINT64_C(0xed246723, 473e3813), -323, -78},
+  {GRISU_UINT64_C(0xb4ecd5f0, 1a4aa828), -306, -73},
+  {GRISU_UINT64_C(0x8a08f0f8, bf0f156b), -289, -68},
+  {GRISU_UINT64_C(0xd29fe4b1, 8e88640f), -273, -63},
+  {GRISU_UINT64_C(0xa0b19d2a, b70e6ed6), -256, -58},
+  {GRISU_UINT64_C(0xf5330471, 4d9265e0), -240, -53},
+  {GRISU_UINT64_C(0xbb127c53, b17ec159), -223, -48},
+  {GRISU_UINT64_C(0x8eb98a7a, 9a5b04e3), -206, -43},
+  {GRISU_UINT64_C(0xd9c7dced, 53c72256), -190, -38},
+  {GRISU_UINT64_C(0xa6274bbd, d0fadd62), -173, -33},
+  {GRISU_UINT64_C(0xfd87b5f2, 8300ca0e), -157, -28},
+  {GRISU_UINT64_C(0xc16d9a00, 95928a27), -140, -23},
+  {GRISU_UINT64_C(0x9392ee8e, 921d5d07), -123, -18},
+  {GRISU_UINT64_C(0xe12e1342, 4bb40e13), -107, -13},
+  {GRISU_UINT64_C(0xabcc7711, 8461cefd), -90, -8},
+  {GRISU_UINT64_C(0x83126e97, 8d4fdf3b), -73, -3},
+  {GRISU_UINT64_C(0xc8000000, 00000000), -57, 2},
+  {GRISU_UINT64_C(0x98968000, 00000000), -40, 7},
+  {GRISU_UINT64_C(0xe8d4a510, 00000000), -24, 12},
+  {GRISU_UINT64_C(0xb1a2bc2e, c5000000), -7, 17},
+  {GRISU_UINT64_C(0x87867832, 6eac9000), 10, 22},
+  {GRISU_UINT64_C(0xcecb8f27, f4200f3a), 26, 27},
+  {GRISU_UINT64_C(0x9dc5ada8, 2b70b59e), 43, 32},
+  {GRISU_UINT64_C(0xf0bdc21a, bb48db20), 59, 37},
+  {GRISU_UINT64_C(0xb7abc627, 050305ae), 76, 42},
+  {GRISU_UINT64_C(0x8c213d9d, a502de45), 93, 47},
+  {GRISU_UINT64_C(0xd5d238a4, abe98068), 109, 52},
+  {GRISU_UINT64_C(0xa321f2d7, 226895c8), 126, 57},
+  {GRISU_UINT64_C(0xf8ebad2b, 84e0d58c), 142, 62},
+  {GRISU_UINT64_C(0xbde94e8e, 43d0c8ec), 159, 67},
+  {GRISU_UINT64_C(0x90e40fbe, ea1d3a4b), 176, 72},
+  {GRISU_UINT64_C(0xdd15fe86, affad912), 192, 77},
+  {GRISU_UINT64_C(0xa8acd7c0, 222311bd), 209, 82},
+  {GRISU_UINT64_C(0x80b05e5a, c60b6178), 226, 87},
+  {GRISU_UINT64_C(0xc45d1df9, 42711d9a), 242, 92},
+  {GRISU_UINT64_C(0x95d04aee, 3b80ece6), 259, 97},
+  {GRISU_UINT64_C(0xe498f455, c38b997a), 275, 102},
+  {GRISU_UINT64_C(0xae67f1e9, aec07188), 292, 107},
+  {GRISU_UINT64_C(0x850fadc0, 9923329e), 309, 112},
+  {GRISU_UINT64_C(0xcb090c80, 01ab551c), 325, 117},
+  {GRISU_UINT64_C(0x9ae75759, 6946075f), 342, 122},
+  {GRISU_UINT64_C(0xec5d3fa8, ce427b00), 358, 127},
+  {GRISU_UINT64_C(0xb454e4a1, 79dd1877), 375, 132},
+  {GRISU_UINT64_C(0x899504ae, 72497eba), 392, 137},
+  {GRISU_UINT64_C(0xd1ef0244, af2364ff), 408, 142},
+  {GRISU_UINT64_C(0xa02aa96b, 06deb0fe), 425, 147},
+  {GRISU_UINT64_C(0xf46518c2, ef5b8cd1), 441, 152},
+  {GRISU_UINT64_C(0xba756174, 393d88e0), 458, 157},
+  {GRISU_UINT64_C(0x8e41ade9, fbebc27d), 475, 162},
+  {GRISU_UINT64_C(0xd910f7ff, 28069da4), 491, 167},
+  {GRISU_UINT64_C(0xa59bc234, db398c25), 508, 172},
+  {GRISU_UINT64_C(0xfcb2cb35, e702af78), 524, 177},
+  {GRISU_UINT64_C(0xc0cb28a9, 8fcf3c80), 541, 182},
+  {GRISU_UINT64_C(0x9316ff75, dd87cbd8), 558, 187},
+  {GRISU_UINT64_C(0xe070f78d, 3927556b), 574, 192},
+  {GRISU_UINT64_C(0xab3c2fdd, eeaad25b), 591, 197},
+  {GRISU_UINT64_C(0x82a45b45, 0226b39d), 608, 202},
+  {GRISU_UINT64_C(0xc75809c4, 2c684dd1), 624, 207},
+  {GRISU_UINT64_C(0x98165af3, 7b2153df), 641, 212},
+  {GRISU_UINT64_C(0xe8111c87, c5c1ba9a), 657, 217},
+  {GRISU_UINT64_C(0xb10d8e14, 56105dad), 674, 222},
+  {GRISU_UINT64_C(0x8714a775, e3e95c78), 691, 227},
+  {GRISU_UINT64_C(0xce1de406, 42e3f4b9), 707, 232},
+  {GRISU_UINT64_C(0x9d412e08, 06e88aa6), 724, 237},
+  {GRISU_UINT64_C(0xeff394dc, ff8a948f), 740, 242},
+  {GRISU_UINT64_C(0xb7118682, dbb66a77), 757, 247},
+  {GRISU_UINT64_C(0x8bab8eef, b6409c1a), 774, 252},
+  {GRISU_UINT64_C(0xd51ea6fa, 85785631), 790, 257},
+  {GRISU_UINT64_C(0xa298f2c5, 01f45f43), 807, 262},
+  {GRISU_UINT64_C(0xf81aa16f, dc1b81db), 823, 267},
+  {GRISU_UINT64_C(0xbd49d14a, a79dbc82), 840, 272},
+  {GRISU_UINT64_C(0x906a617d, 450187e2), 857, 277},
+  {GRISU_UINT64_C(0xdc5c5301, c56b75f7), 873, 282},
+  {GRISU_UINT64_C(0xa81f3014, 49ee8c70), 890, 287},
+  {GRISU_UINT64_C(0x80444b5e, 7aa7cf85), 907, 292},
+  {GRISU_UINT64_C(0xc3b83581, 09e84f07), 923, 297},
+  {GRISU_UINT64_C(0x95527a52, 02df0ccb), 940, 302},
+  {GRISU_UINT64_C(0xe3d8f9e5, 63a198e5), 956, 307},
+  {GRISU_UINT64_C(0xadd57a27, d29339f6), 973, 312},
+  {GRISU_UINT64_C(0x849feec2, 81d7f329), 990, 317},
+  {GRISU_UINT64_C(0xca5e89b1, 8b602368), 1006, 322},
+  {GRISU_UINT64_C(0x9a65406d, 44a5c903), 1023, 327},
+  {GRISU_UINT64_C(0xeb96bf6e, badf77d9), 1039, 332},
+  {GRISU_UINT64_C(0xb3bd72ed, 2af29e20), 1056, 337},
+  {GRISU_UINT64_C(0x892179be, 91d43a44), 1073, 342},
+  };
+static const int GRISU_CACHE_MAX_DISTANCE(5) = 17;
+// nb elements (5): 131
+static const GRISU_CACHE_STRUCT GRISU_CACHE_NAME(6)[] = {
+  {GRISU_UINT64_C(0xe61acf03, 3d1a45df), -1087, -308},
+  {GRISU_UINT64_C(0xdb71e914, 32b1a24b), -1067, -302},
+  {GRISU_UINT64_C(0xd1476e2c, 07286faa), -1047, -296},
+  {GRISU_UINT64_C(0xc795830d, 75038c1e), -1027, -290},
+  {GRISU_UINT64_C(0xbe5691ef, 416bd60c), -1007, -284},
+  {GRISU_UINT64_C(0xb5854744, 8ffffb2e), -987, -278},
+  {GRISU_UINT64_C(0xad1c8eab, 5ee43b67), -967, -272},
+  {GRISU_UINT64_C(0xa5178fff, 668ae0b6), -947, -266},
+  {GRISU_UINT64_C(0x9d71ac8f, ada6c9b5), -927, -260},
+  {GRISU_UINT64_C(0x96267c75, 35b763b5), -907, -254},
+  {GRISU_UINT64_C(0x8f31cc09, 37ae58d3), -887, -248},
+  {GRISU_UINT64_C(0x888f9979, 7a5e012d), -867, -242},
+  {GRISU_UINT64_C(0x823c1279, 5db6ce57), -847, -236},
+  {GRISU_UINT64_C(0xf867241c, 8cc6d4c1), -828, -230},
+  {GRISU_UINT64_C(0xece53cec, 4a314ebe), -808, -224},
+  {GRISU_UINT64_C(0xe1ebce4d, c7f16dfc), -788, -218},
+  {GRISU_UINT64_C(0xd77485cb, 25823ac7), -768, -212},
+  {GRISU_UINT64_C(0xcd795be8, 70516656), -748, -206},
+  {GRISU_UINT64_C(0xc3f490aa, 77bd60fd), -728, -200},
+  {GRISU_UINT64_C(0xbae0a846, d2195713), -708, -194},
+  {GRISU_UINT64_C(0xb23867fb, 2a35b28e), -688, -188},
+  {GRISU_UINT64_C(0xa9f6d30a, 038d1dbc), -668, -182},
+  {GRISU_UINT64_C(0xa21727db, 38cb0030), -648, -176},
+  {GRISU_UINT64_C(0x9a94dd3e, 8cf578ba), -628, -170},
+  {GRISU_UINT64_C(0x936b9fce, bb25c996), -608, -164},
+  {GRISU_UINT64_C(0x8c974f73, 83725573), -588, -158},
+  {GRISU_UINT64_C(0x8613fd01, 45877586), -568, -152},
+  {GRISU_UINT64_C(0xffbbcfe9, 94e5c620), -549, -146},
+  {GRISU_UINT64_C(0xf3e2f893, dec3f126), -529, -140},
+  {GRISU_UINT64_C(0xe896a0d7, e51e1566), -509, -134},
+  {GRISU_UINT64_C(0xddd0467c, 64bce4a1), -489, -128},
+  {GRISU_UINT64_C(0xd389b478, 79823479), -469, -122},
+  {GRISU_UINT64_C(0xc9bcff60, 34c13053), -449, -116},
+  {GRISU_UINT64_C(0xc06481fb, 9bcf8d3a), -429, -110},
+  {GRISU_UINT64_C(0xb77ada06, 17e3bbcb), -409, -104},
+  {GRISU_UINT64_C(0xaefae514, 77a06b04), -389, -98},
+  {GRISU_UINT64_C(0xa6dfbd9f, b8e5b88f), -369, -92},
+  {GRISU_UINT64_C(0x9f24b832, e6b0f436), -349, -86},
+  {GRISU_UINT64_C(0x97c560ba, 6b0919a6), -329, -80},
+  {GRISU_UINT64_C(0x90bd77f3, 483bb9ba), -309, -74},
+  {GRISU_UINT64_C(0x8a08f0f8, bf0f156b), -289, -68},
+  {GRISU_UINT64_C(0x83a3eeee, f9153e89), -269, -62},
+  {GRISU_UINT64_C(0xfb158592, be068d2f), -250, -56},
+  {GRISU_UINT64_C(0xef73d256, a5c0f77d), -230, -50},
+  {GRISU_UINT64_C(0xe45c10c4, 2a2b3b06), -210, -44},
+  {GRISU_UINT64_C(0xd9c7dced, 53c72256), -190, -38},
+  {GRISU_UINT64_C(0xcfb11ead, 453994ba), -170, -32},
+  {GRISU_UINT64_C(0xc6120625, 76589ddb), -150, -26},
+  {GRISU_UINT64_C(0xbce50864, 92111aeb), -130, -20},
+  {GRISU_UINT64_C(0xb424dc35, 095cd80f), -110, -14},
+  {GRISU_UINT64_C(0xabcc7711, 8461cefd), -90, -8},
+  {GRISU_UINT64_C(0xa3d70a3d, 70a3d70a), -70, -2},
+  {GRISU_UINT64_C(0x9c400000, 00000000), -50, 4},
+  {GRISU_UINT64_C(0x9502f900, 00000000), -30, 10},
+  {GRISU_UINT64_C(0x8e1bc9bf, 04000000), -10, 16},
+  {GRISU_UINT64_C(0x87867832, 6eac9000), 10, 22},
+  {GRISU_UINT64_C(0x813f3978, f8940984), 30, 28},
+  {GRISU_UINT64_C(0xf684df56, c3e01bc7), 49, 34},
+  {GRISU_UINT64_C(0xeb194f8e, 1ae525fd), 69, 40},
+  {GRISU_UINT64_C(0xe0352f62, a19e306f), 89, 46},
+  {GRISU_UINT64_C(0xd5d238a4, abe98068), 109, 52},
+  {GRISU_UINT64_C(0xcbea6f8c, eb02bb3a), 129, 58},
+  {GRISU_UINT64_C(0xc2781f49, ffcfa6d5), 149, 64},
+  {GRISU_UINT64_C(0xb975d6b6, ee39e437), 169, 70},
+  {GRISU_UINT64_C(0xb0de6538, 8cc8ada8), 189, 76},
+  {GRISU_UINT64_C(0xa8acd7c0, 222311bd), 209, 82},
+  {GRISU_UINT64_C(0xa0dc75f1, 778e39d6), 229, 88},
+  {GRISU_UINT64_C(0x9968bf6a, bbe85f20), 249, 94},
+  {GRISU_UINT64_C(0x924d692c, a61be758), 269, 100},
+  {GRISU_UINT64_C(0x8b865b21, 5899f46d), 289, 106},
+  {GRISU_UINT64_C(0x850fadc0, 9923329e), 309, 112},
+  {GRISU_UINT64_C(0xfdcb4fa0, 02162a63), 328, 118},
+  {GRISU_UINT64_C(0xf209787b, b47d6b85), 348, 124},
+  {GRISU_UINT64_C(0xe6d3102a, d96cec1e), 368, 130},
+  {GRISU_UINT64_C(0xdc21a117, 1d42645d), 388, 136},
+  {GRISU_UINT64_C(0xd1ef0244, af2364ff), 408, 142},
+  {GRISU_UINT64_C(0xc83553c5, c8965d3d), 428, 148},
+  {GRISU_UINT64_C(0xbeeefb58, 4aff8604), 448, 154},
+  {GRISU_UINT64_C(0xb616a12b, 7fe617aa), 468, 160},
+  {GRISU_UINT64_C(0xada72ccc, 20054aea), 488, 166},
+  {GRISU_UINT64_C(0xa59bc234, db398c25), 508, 172},
+  {GRISU_UINT64_C(0x9defbf01, b061adab), 528, 178},
+  {GRISU_UINT64_C(0x969eb7c4, 7859e744), 548, 184},
+  {GRISU_UINT64_C(0x8fa47579, 1a569d11), 568, 190},
+  {GRISU_UINT64_C(0x88fcf317, f22241e2), 588, 196},
+  {GRISU_UINT64_C(0x82a45b45, 0226b39d), 608, 202},
+  {GRISU_UINT64_C(0xf92e0c35, 37826146), 627, 208},
+  {GRISU_UINT64_C(0xeda2ee1c, 7064130c), 647, 214},
+  {GRISU_UINT64_C(0xe2a0b5dc, 971f303a), 667, 220},
+  {GRISU_UINT64_C(0xd8210bef, d30efa5a), 687, 226},
+  {GRISU_UINT64_C(0xce1de406, 42e3f4b9), 707, 232},
+  {GRISU_UINT64_C(0xc491798a, 08a2ad4f), 727, 238},
+  {GRISU_UINT64_C(0xbb764c4c, a7a44410), 747, 244},
+  {GRISU_UINT64_C(0xb2c71d5b, ca9023f8), 767, 250},
+  {GRISU_UINT64_C(0xaa7eebfb, 9df9de8e), 787, 256},
+  {GRISU_UINT64_C(0xa298f2c5, 01f45f43), 807, 262},
+  {GRISU_UINT64_C(0x9b10a4e5, e9913129), 827, 268},
+  {GRISU_UINT64_C(0x93e1ab82, 52f33b46), 847, 274},
+  {GRISU_UINT64_C(0x8d07e334, 55637eb3), 867, 280},
+  {GRISU_UINT64_C(0x867f59a9, d4bed6c0), 887, 286},
+  {GRISU_UINT64_C(0x80444b5e, 7aa7cf85), 907, 292},
+  {GRISU_UINT64_C(0xf4a642e1, 4c6262c9), 926, 298},
+  {GRISU_UINT64_C(0xe950df20, 247c83fd), 946, 304},
+  {GRISU_UINT64_C(0xde81e40a, 034bcf50), 966, 310},
+  {GRISU_UINT64_C(0xd433179d, 9c8cb841), 986, 316},
+  {GRISU_UINT64_C(0xca5e89b1, 8b602368), 1006, 322},
+  {GRISU_UINT64_C(0xc0fe9088, 95cf3b44), 1026, 328},
+  {GRISU_UINT64_C(0xb80dc58e, 81fe95a1), 1046, 334},
+  {GRISU_UINT64_C(0xaf87023b, 9bf0ee6b), 1066, 340},
+  };
+static const int GRISU_CACHE_MAX_DISTANCE(6) = 20;
+// nb elements (6): 109
+static const GRISU_CACHE_STRUCT GRISU_CACHE_NAME(7)[] = {
+  {GRISU_UINT64_C(0xe61acf03, 3d1a45df), -1087, -308},
+  {GRISU_UINT64_C(0x892731ac, 9faf056f), -1063, -301},
+  {GRISU_UINT64_C(0xa37fce12, 6597973d), -1040, -294},
+  {GRISU_UINT64_C(0xc2e801fb, 244576d5), -1017, -287},
+  {GRISU_UINT64_C(0xe858ad24, 8f5c22ca), -994, -280},
+  {GRISU_UINT64_C(0x8a7d3eef, 7f1cfc52), -970, -273},
+  {GRISU_UINT64_C(0xa5178fff, 668ae0b6), -947, -266},
+  {GRISU_UINT64_C(0xc4ce17b3, 99107c23), -924, -259},
+  {GRISU_UINT64_C(0xea9c2277, 23ee8bcb), -901, -252},
+  {GRISU_UINT64_C(0x8bd6a141, 006042be), -877, -245},
+  {GRISU_UINT64_C(0xa6b34ad8, c9dfc070), -854, -238},
+  {GRISU_UINT64_C(0xc6b8e9b0, 709f109a), -831, -231},
+  {GRISU_UINT64_C(0xece53cec, 4a314ebe), -808, -224},
+  {GRISU_UINT64_C(0x8d3360f0, 9cf6e4bd), -784, -217},
+  {GRISU_UINT64_C(0xa8530886, b54dbdec), -761, -210},
+  {GRISU_UINT64_C(0xc8a883c0, fdaf7df0), -738, -203},
+  {GRISU_UINT64_C(0xef340a98, 172aace5), -715, -196},
+  {GRISU_UINT64_C(0x8e938662, 882af53e), -691, -189},
+  {GRISU_UINT64_C(0xa9f6d30a, 038d1dbc), -668, -182},
+  {GRISU_UINT64_C(0xca9cf1d2, 06fdc03c), -645, -175},
+  {GRISU_UINT64_C(0xf18899b1, bc3f8ca2), -622, -168},
+  {GRISU_UINT64_C(0x8ff71a0f, e2c2e6dc), -598, -161},
+  {GRISU_UINT64_C(0xab9eb47c, 81f5114f), -575, -154},
+  {GRISU_UINT64_C(0xcc963fee, 10b7d1b3), -552, -147},
+  {GRISU_UINT64_C(0xf3e2f893, dec3f126), -529, -140},
+  {GRISU_UINT64_C(0x915e2486, ef32cd60), -505, -133},
+  {GRISU_UINT64_C(0xad4ab711, 2eb3929e), -482, -126},
+  {GRISU_UINT64_C(0xce947a3d, a6a9273e), -459, -119},
+  {GRISU_UINT64_C(0xf64335bc, f065d37d), -436, -112},
+  {GRISU_UINT64_C(0x92c8ae6b, 464fc96f), -412, -105},
+  {GRISU_UINT64_C(0xaefae514, 77a06b04), -389, -98},
+  {GRISU_UINT64_C(0xd097ad07, a71f26b2), -366, -91},
+  {GRISU_UINT64_C(0xf8a95fcf, 88747d94), -343, -84},
+  {GRISU_UINT64_C(0x9436c076, 0c86e30c), -319, -77},
+  {GRISU_UINT64_C(0xb0af48ec, 79ace837), -296, -70},
+  {GRISU_UINT64_C(0xd29fe4b1, 8e88640f), -273, -63},
+  {GRISU_UINT64_C(0xfb158592, be068d2f), -250, -56},
+  {GRISU_UINT64_C(0x95a86376, 27989aae), -226, -49},
+  {GRISU_UINT64_C(0xb267ed19, 40f1c61c), -203, -42},
+  {GRISU_UINT64_C(0xd4ad2dbf, c3d07788), -180, -35},
+  {GRISU_UINT64_C(0xfd87b5f2, 8300ca0e), -157, -28},
+  {GRISU_UINT64_C(0x971da050, 74da7bef), -133, -21},
+  {GRISU_UINT64_C(0xb424dc35, 095cd80f), -110, -14},
+  {GRISU_UINT64_C(0xd6bf94d5, e57a42bc), -87, -7},
+  {GRISU_UINT64_C(0x80000000, 00000000), -63, 0},
+  {GRISU_UINT64_C(0x98968000, 00000000), -40, 7},
+  {GRISU_UINT64_C(0xb5e620f4, 80000000), -17, 14},
+  {GRISU_UINT64_C(0xd8d726b7, 177a8000), 6, 21},
+  {GRISU_UINT64_C(0x813f3978, f8940984), 30, 28},
+  {GRISU_UINT64_C(0x9a130b96, 3a6c115c), 53, 35},
+  {GRISU_UINT64_C(0xb7abc627, 050305ae), 76, 42},
+  {GRISU_UINT64_C(0xdaf3f046, 51d47b4c), 99, 49},
+  {GRISU_UINT64_C(0x82818f12, 81ed44a0), 123, 56},
+  {GRISU_UINT64_C(0x9b934c3b, 330c8577), 146, 63},
+  {GRISU_UINT64_C(0xb975d6b6, ee39e437), 169, 70},
+  {GRISU_UINT64_C(0xdd15fe86, affad912), 192, 77},
+  {GRISU_UINT64_C(0x83c7088e, 1aab65db), 216, 84},
+  {GRISU_UINT64_C(0x9d174b2d, cec0e47b), 239, 91},
+  {GRISU_UINT64_C(0xbb445da9, ca61281f), 262, 98},
+  {GRISU_UINT64_C(0xdf3d5e9b, c0f653e1), 285, 105},
+  {GRISU_UINT64_C(0x850fadc0, 9923329e), 309, 112},
+  {GRISU_UINT64_C(0x9e9f11c4, 014dda7e), 332, 119},
+  {GRISU_UINT64_C(0xbd176620, a501fc00), 355, 126},
+  {GRISU_UINT64_C(0xe16a1dc9, d8545e95), 378, 133},
+  {GRISU_UINT64_C(0x865b8692, 5b9bc5c2), 402, 140},
+  {GRISU_UINT64_C(0xa02aa96b, 06deb0fe), 425, 147},
+  {GRISU_UINT64_C(0xbeeefb58, 4aff8604), 448, 154},
+  {GRISU_UINT64_C(0xe39c4976, 5fdf9d95), 471, 161},
+  {GRISU_UINT64_C(0x87aa9aff, 79042287), 495, 168},
+  {GRISU_UINT64_C(0xa1ba1ba7, 9e1632dc), 518, 175},
+  {GRISU_UINT64_C(0xc0cb28a9, 8fcf3c80), 541, 182},
+  {GRISU_UINT64_C(0xe5d3ef28, 2a242e82), 564, 189},
+  {GRISU_UINT64_C(0x88fcf317, f22241e2), 588, 196},
+  {GRISU_UINT64_C(0xa34d7216, 42b06084), 611, 203},
+  {GRISU_UINT64_C(0xc2abf989, 935ddbfe), 634, 210},
+  {GRISU_UINT64_C(0xe8111c87, c5c1ba9a), 657, 217},
+  {GRISU_UINT64_C(0x8a5296ff, e33cc930), 681, 224},
+  {GRISU_UINT64_C(0xa4e4b66b, 68b65d61), 704, 231},
+  {GRISU_UINT64_C(0xc491798a, 08a2ad4f), 727, 238},
+  {GRISU_UINT64_C(0xea53df5f, d18d5514), 750, 245},
+  {GRISU_UINT64_C(0x8bab8eef, b6409c1a), 774, 252},
+  {GRISU_UINT64_C(0xa67ff273, b8460357), 797, 259},
+  {GRISU_UINT64_C(0xc67bb459, 7ce2ce49), 820, 266},
+  {GRISU_UINT64_C(0xec9c459d, 51852ba3), 843, 273},
+  {GRISU_UINT64_C(0x8d07e334, 55637eb3), 867, 280},
+  {GRISU_UINT64_C(0xa81f3014, 49ee8c70), 890, 287},
+  {GRISU_UINT64_C(0xc86ab5c3, 9fa63441), 913, 294},
+  {GRISU_UINT64_C(0xeeea5d50, 04981478), 936, 301},
+  {GRISU_UINT64_C(0x8e679c2f, 5e44ff8f), 960, 308},
+  {GRISU_UINT64_C(0xa9c2794a, e3a3c69b), 983, 315},
+  {GRISU_UINT64_C(0xca5e89b1, 8b602368), 1006, 322},
+  {GRISU_UINT64_C(0xf13e34aa, bb430a15), 1029, 329},
+  {GRISU_UINT64_C(0x8fcac257, 558ee4e6), 1053, 336},
+  };
+static const int GRISU_CACHE_MAX_DISTANCE(7) = 24;
+// nb elements (7): 93
+static const GRISU_CACHE_STRUCT GRISU_CACHE_NAME(8)[] = {
+  {GRISU_UINT64_C(0xe61acf03, 3d1a45df), -1087, -308},
+  {GRISU_UINT64_C(0xab70fe17, c79ac6ca), -1060, -300},
+  {GRISU_UINT64_C(0xff77b1fc, bebcdc4f), -1034, -292},
+  {GRISU_UINT64_C(0xbe5691ef, 416bd60c), -1007, -284},
+  {GRISU_UINT64_C(0x8dd01fad, 907ffc3c), -980, -276},
+  {GRISU_UINT64_C(0xd3515c28, 31559a83), -954, -268},
+  {GRISU_UINT64_C(0x9d71ac8f, ada6c9b5), -927, -260},
+  {GRISU_UINT64_C(0xea9c2277, 23ee8bcb), -901, -252},
+  {GRISU_UINT64_C(0xaecc4991, 4078536d), -874, -244},
+  {GRISU_UINT64_C(0x823c1279, 5db6ce57), -847, -236},
+  {GRISU_UINT64_C(0xc2109436, 4dfb5637), -821, -228},
+  {GRISU_UINT64_C(0x9096ea6f, 3848984f), -794, -220},
+  {GRISU_UINT64_C(0xd77485cb, 25823ac7), -768, -212},
+  {GRISU_UINT64_C(0xa086cfcd, 97bf97f4), -741, -204},
+  {GRISU_UINT64_C(0xef340a98, 172aace5), -715, -196},
+  {GRISU_UINT64_C(0xb23867fb, 2a35b28e), -688, -188},
+  {GRISU_UINT64_C(0x84c8d4df, d2c63f3b), -661, -180},
+  {GRISU_UINT64_C(0xc5dd4427, 1ad3cdba), -635, -172},
+  {GRISU_UINT64_C(0x936b9fce, bb25c996), -608, -164},
+  {GRISU_UINT64_C(0xdbac6c24, 7d62a584), -582, -156},
+  {GRISU_UINT64_C(0xa3ab6658, 0d5fdaf6), -555, -148},
+  {GRISU_UINT64_C(0xf3e2f893, dec3f126), -529, -140},
+  {GRISU_UINT64_C(0xb5b5ada8, aaff80b8), -502, -132},
+  {GRISU_UINT64_C(0x87625f05, 6c7c4a8b), -475, -124},
+  {GRISU_UINT64_C(0xc9bcff60, 34c13053), -449, -116},
+  {GRISU_UINT64_C(0x964e858c, 91ba2655), -422, -108},
+  {GRISU_UINT64_C(0xdff97724, 70297ebd), -396, -100},
+  {GRISU_UINT64_C(0xa6dfbd9f, b8e5b88f), -369, -92},
+  {GRISU_UINT64_C(0xf8a95fcf, 88747d94), -343, -84},
+  {GRISU_UINT64_C(0xb9447093, 8fa89bcf), -316, -76},
+  {GRISU_UINT64_C(0x8a08f0f8, bf0f156b), -289, -68},
+  {GRISU_UINT64_C(0xcdb02555, 653131b6), -263, -60},
+  {GRISU_UINT64_C(0x993fe2c6, d07b7fac), -236, -52},
+  {GRISU_UINT64_C(0xe45c10c4, 2a2b3b06), -210, -44},
+  {GRISU_UINT64_C(0xaa242499, 697392d3), -183, -36},
+  {GRISU_UINT64_C(0xfd87b5f2, 8300ca0e), -157, -28},
+  {GRISU_UINT64_C(0xbce50864, 92111aeb), -130, -20},
+  {GRISU_UINT64_C(0x8cbccc09, 6f5088cc), -103, -12},
+  {GRISU_UINT64_C(0xd1b71758, e219652c), -77, -4},
+  {GRISU_UINT64_C(0x9c400000, 00000000), -50, 4},
+  {GRISU_UINT64_C(0xe8d4a510, 00000000), -24, 12},
+  {GRISU_UINT64_C(0xad78ebc5, ac620000), 3, 20},
+  {GRISU_UINT64_C(0x813f3978, f8940984), 30, 28},
+  {GRISU_UINT64_C(0xc097ce7b, c90715b3), 56, 36},
+  {GRISU_UINT64_C(0x8f7e32ce, 7bea5c70), 83, 44},
+  {GRISU_UINT64_C(0xd5d238a4, abe98068), 109, 52},
+  {GRISU_UINT64_C(0x9f4f2726, 179a2245), 136, 60},
+  {GRISU_UINT64_C(0xed63a231, d4c4fb27), 162, 68},
+  {GRISU_UINT64_C(0xb0de6538, 8cc8ada8), 189, 76},
+  {GRISU_UINT64_C(0x83c7088e, 1aab65db), 216, 84},
+  {GRISU_UINT64_C(0xc45d1df9, 42711d9a), 242, 92},
+  {GRISU_UINT64_C(0x924d692c, a61be758), 269, 100},
+  {GRISU_UINT64_C(0xda01ee64, 1a708dea), 295, 108},
+  {GRISU_UINT64_C(0xa26da399, 9aef774a), 322, 116},
+  {GRISU_UINT64_C(0xf209787b, b47d6b85), 348, 124},
+  {GRISU_UINT64_C(0xb454e4a1, 79dd1877), 375, 132},
+  {GRISU_UINT64_C(0x865b8692, 5b9bc5c2), 402, 140},
+  {GRISU_UINT64_C(0xc83553c5, c8965d3d), 428, 148},
+  {GRISU_UINT64_C(0x952ab45c, fa97a0b3), 455, 156},
+  {GRISU_UINT64_C(0xde469fbd, 99a05fe3), 481, 164},
+  {GRISU_UINT64_C(0xa59bc234, db398c25), 508, 172},
+  {GRISU_UINT64_C(0xf6c69a72, a3989f5c), 534, 180},
+  {GRISU_UINT64_C(0xb7dcbf53, 54e9bece), 561, 188},
+  {GRISU_UINT64_C(0x88fcf317, f22241e2), 588, 196},
+  {GRISU_UINT64_C(0xcc20ce9b, d35c78a5), 614, 204},
+  {GRISU_UINT64_C(0x98165af3, 7b2153df), 641, 212},
+  {GRISU_UINT64_C(0xe2a0b5dc, 971f303a), 667, 220},
+  {GRISU_UINT64_C(0xa8d9d153, 5ce3b396), 694, 228},
+  {GRISU_UINT64_C(0xfb9b7cd9, a4a7443c), 720, 236},
+  {GRISU_UINT64_C(0xbb764c4c, a7a44410), 747, 244},
+  {GRISU_UINT64_C(0x8bab8eef, b6409c1a), 774, 252},
+  {GRISU_UINT64_C(0xd01fef10, a657842c), 800, 260},
+  {GRISU_UINT64_C(0x9b10a4e5, e9913129), 827, 268},
+  {GRISU_UINT64_C(0xe7109bfb, a19c0c9d), 853, 276},
+  {GRISU_UINT64_C(0xac2820d9, 623bf429), 880, 284},
+  {GRISU_UINT64_C(0x80444b5e, 7aa7cf85), 907, 292},
+  {GRISU_UINT64_C(0xbf21e440, 03acdd2d), 933, 300},
+  {GRISU_UINT64_C(0x8e679c2f, 5e44ff8f), 960, 308},
+  {GRISU_UINT64_C(0xd433179d, 9c8cb841), 986, 316},
+  {GRISU_UINT64_C(0x9e19db92, b4e31ba9), 1013, 324},
+  {GRISU_UINT64_C(0xeb96bf6e, badf77d9), 1039, 332},
+  {GRISU_UINT64_C(0xaf87023b, 9bf0ee6b), 1066, 340},
+  };
+static const int GRISU_CACHE_MAX_DISTANCE(8) = 27;
+// nb elements (8): 82
+static const GRISU_CACHE_STRUCT GRISU_CACHE_NAME(9)[] = {
+  {GRISU_UINT64_C(0xe61acf03, 3d1a45df), -1087, -308},
+  {GRISU_UINT64_C(0xd64d3d9d, b981787d), -1057, -299},
+  {GRISU_UINT64_C(0xc795830d, 75038c1e), -1027, -290},
+  {GRISU_UINT64_C(0xb9e08a83, a5e34f08), -997, -281},
+  {GRISU_UINT64_C(0xad1c8eab, 5ee43b67), -967, -272},
+  {GRISU_UINT64_C(0xa139029f, 6a239f72), -937, -263},
+  {GRISU_UINT64_C(0x96267c75, 35b763b5), -907, -254},
+  {GRISU_UINT64_C(0x8bd6a141, 006042be), -877, -245},
+  {GRISU_UINT64_C(0x823c1279, 5db6ce57), -847, -236},
+  {GRISU_UINT64_C(0xf294b943, e17a2bc4), -818, -227},
+  {GRISU_UINT64_C(0xe1ebce4d, c7f16dfc), -788, -218},
+  {GRISU_UINT64_C(0xd267caa8, 62a12d67), -758, -209},
+  {GRISU_UINT64_C(0xc3f490aa, 77bd60fd), -728, -200},
+  {GRISU_UINT64_C(0xb67f6455, 292cbf08), -698, -191},
+  {GRISU_UINT64_C(0xa9f6d30a, 038d1dbc), -668, -182},
+  {GRISU_UINT64_C(0x9e4a9cec, 15763e2f), -638, -173},
+  {GRISU_UINT64_C(0x936b9fce, bb25c996), -608, -164},
+  {GRISU_UINT64_C(0x894bc396, ce5da772), -578, -155},
+  {GRISU_UINT64_C(0xffbbcfe9, 94e5c620), -549, -146},
+  {GRISU_UINT64_C(0xee2ba6c0, 678b597f), -519, -137},
+  {GRISU_UINT64_C(0xddd0467c, 64bce4a1), -489, -128},
+  {GRISU_UINT64_C(0xce947a3d, a6a9273e), -459, -119},
+  {GRISU_UINT64_C(0xc06481fb, 9bcf8d3a), -429, -110},
+  {GRISU_UINT64_C(0xb32df8e9, f3546564), -399, -101},
+  {GRISU_UINT64_C(0xa6dfbd9f, b8e5b88f), -369, -92},
+  {GRISU_UINT64_C(0x9b69dbe1, b548ce7d), -339, -83},
+  {GRISU_UINT64_C(0x90bd77f3, 483bb9ba), -309, -74},
+  {GRISU_UINT64_C(0x86ccbb52, ea94baeb), -279, -65},
+  {GRISU_UINT64_C(0xfb158592, be068d2f), -250, -56},
+  {GRISU_UINT64_C(0xe9d71b68, 9dde71b0), -220, -47},
+  {GRISU_UINT64_C(0xd9c7dced, 53c72256), -190, -38},
+  {GRISU_UINT64_C(0xcad2f7f5, 359a3b3e), -160, -29},
+  {GRISU_UINT64_C(0xbce50864, 92111aeb), -130, -20},
+  {GRISU_UINT64_C(0xafebff0b, cb24aaff), -100, -11},
+  {GRISU_UINT64_C(0xa3d70a3d, 70a3d70a), -70, -2},
+  {GRISU_UINT64_C(0x98968000, 00000000), -40, 7},
+  {GRISU_UINT64_C(0x8e1bc9bf, 04000000), -10, 16},
+  {GRISU_UINT64_C(0x84595161, 401484a0), 20, 25},
+  {GRISU_UINT64_C(0xf684df56, c3e01bc7), 49, 34},
+  {GRISU_UINT64_C(0xe596b7b0, c643c719), 79, 43},
+  {GRISU_UINT64_C(0xd5d238a4, abe98068), 109, 52},
+  {GRISU_UINT64_C(0xc722f0ef, 9d80aad6), 139, 61},
+  {GRISU_UINT64_C(0xb975d6b6, ee39e437), 169, 70},
+  {GRISU_UINT64_C(0xacb92ed9, 397bf996), 199, 79},
+  {GRISU_UINT64_C(0xa0dc75f1, 778e39d6), 229, 88},
+  {GRISU_UINT64_C(0x95d04aee, 3b80ece6), 259, 97},
+  {GRISU_UINT64_C(0x8b865b21, 5899f46d), 289, 106},
+  {GRISU_UINT64_C(0x81f14fae, 158c5f6e), 319, 115},
+  {GRISU_UINT64_C(0xf209787b, b47d6b85), 348, 124},
+  {GRISU_UINT64_C(0xe16a1dc9, d8545e95), 378, 133},
+  {GRISU_UINT64_C(0xd1ef0244, af2364ff), 408, 142},
+  {GRISU_UINT64_C(0xc38413cf, 25e2d70e), 438, 151},
+  {GRISU_UINT64_C(0xb616a12b, 7fe617aa), 468, 160},
+  {GRISU_UINT64_C(0xa99541bf, 57452b28), 498, 169},
+  {GRISU_UINT64_C(0x9defbf01, b061adab), 528, 178},
+  {GRISU_UINT64_C(0x9316ff75, dd87cbd8), 558, 187},
+  {GRISU_UINT64_C(0x88fcf317, f22241e2), 588, 196},
+  {GRISU_UINT64_C(0xff290242, c83396ce), 617, 205},
+  {GRISU_UINT64_C(0xeda2ee1c, 7064130c), 647, 214},
+  {GRISU_UINT64_C(0xdd50f199, 6b947519), 677, 223},
+  {GRISU_UINT64_C(0xce1de406, 42e3f4b9), 707, 232},
+  {GRISU_UINT64_C(0xbff610b0, cc6edd3f), 737, 241},
+  {GRISU_UINT64_C(0xb2c71d5b, ca9023f8), 767, 250},
+  {GRISU_UINT64_C(0xa67ff273, b8460357), 797, 259},
+  {GRISU_UINT64_C(0x9b10a4e5, e9913129), 827, 268},
+  {GRISU_UINT64_C(0x906a617d, 450187e2), 857, 277},
+  {GRISU_UINT64_C(0x867f59a9, d4bed6c0), 887, 286},
+  {GRISU_UINT64_C(0xfa856334, 878fc151), 916, 295},
+  {GRISU_UINT64_C(0xe950df20, 247c83fd), 946, 304},
+  {GRISU_UINT64_C(0xd94ad8b1, c7380874), 976, 313},
+  {GRISU_UINT64_C(0xca5e89b1, 8b602368), 1006, 322},
+  {GRISU_UINT64_C(0xbc789925, 624c5fe1), 1036, 331},
+  {GRISU_UINT64_C(0xaf87023b, 9bf0ee6b), 1066, 340},
+  };
+static const int GRISU_CACHE_MAX_DISTANCE(9) = 30;
+// nb elements (9): 73
+static const GRISU_CACHE_STRUCT GRISU_CACHE_NAME(10)[] = {
+  {GRISU_UINT64_C(0xe61acf03, 3d1a45df), -1087, -308},
+  {GRISU_UINT64_C(0x85f04682, 93f0eb4e), -1053, -298},
+  {GRISU_UINT64_C(0x9becce62, 836ac577), -1020, -288},
+  {GRISU_UINT64_C(0xb5854744, 8ffffb2e), -987, -278},
+  {GRISU_UINT64_C(0xd3515c28, 31559a83), -954, -268},
+  {GRISU_UINT64_C(0xf6019da0, 7f549b2b), -921, -258},
+  {GRISU_UINT64_C(0x8f31cc09, 37ae58d3), -887, -248},
+  {GRISU_UINT64_C(0xa6b34ad8, c9dfc070), -854, -238},
+  {GRISU_UINT64_C(0xc2109436, 4dfb5637), -821, -228},
+  {GRISU_UINT64_C(0xe1ebce4d, c7f16dfc), -788, -218},
+  {GRISU_UINT64_C(0x8380dea9, 3da4bc60), -754, -208},
+  {GRISU_UINT64_C(0x99171105, 2d8bf3c5), -721, -198},
+  {GRISU_UINT64_C(0xb23867fb, 2a35b28e), -688, -188},
+  {GRISU_UINT64_C(0xcf79cc9d, b955c2cc), -655, -178},
+  {GRISU_UINT64_C(0xf18899b1, bc3f8ca2), -622, -168},
+  {GRISU_UINT64_C(0x8c974f73, 83725573), -588, -158},
+  {GRISU_UINT64_C(0xa3ab6658, 0d5fdaf6), -555, -148},
+  {GRISU_UINT64_C(0xbe895233, 86091466), -522, -138},
+  {GRISU_UINT64_C(0xddd0467c, 64bce4a1), -489, -128},
+  {GRISU_UINT64_C(0x811ccc66, 8829b887), -455, -118},
+  {GRISU_UINT64_C(0x964e858c, 91ba2655), -422, -108},
+  {GRISU_UINT64_C(0xaefae514, 77a06b04), -389, -98},
+  {GRISU_UINT64_C(0xcbb41ef9, 79346bca), -356, -88},
+  {GRISU_UINT64_C(0xed246723, 473e3813), -323, -78},
+  {GRISU_UINT64_C(0x8a08f0f8, bf0f156b), -289, -68},
+  {GRISU_UINT64_C(0xa0b19d2a, b70e6ed6), -256, -58},
+  {GRISU_UINT64_C(0xbb127c53, b17ec159), -223, -48},
+  {GRISU_UINT64_C(0xd9c7dced, 53c72256), -190, -38},
+  {GRISU_UINT64_C(0xfd87b5f2, 8300ca0e), -157, -28},
+  {GRISU_UINT64_C(0x9392ee8e, 921d5d07), -123, -18},
+  {GRISU_UINT64_C(0xabcc7711, 8461cefd), -90, -8},
+  {GRISU_UINT64_C(0xc8000000, 00000000), -57, 2},
+  {GRISU_UINT64_C(0xe8d4a510, 00000000), -24, 12},
+  {GRISU_UINT64_C(0x87867832, 6eac9000), 10, 22},
+  {GRISU_UINT64_C(0x9dc5ada8, 2b70b59e), 43, 32},
+  {GRISU_UINT64_C(0xb7abc627, 050305ae), 76, 42},
+  {GRISU_UINT64_C(0xd5d238a4, abe98068), 109, 52},
+  {GRISU_UINT64_C(0xf8ebad2b, 84e0d58c), 142, 62},
+  {GRISU_UINT64_C(0x90e40fbe, ea1d3a4b), 176, 72},
+  {GRISU_UINT64_C(0xa8acd7c0, 222311bd), 209, 82},
+  {GRISU_UINT64_C(0xc45d1df9, 42711d9a), 242, 92},
+  {GRISU_UINT64_C(0xe498f455, c38b997a), 275, 102},
+  {GRISU_UINT64_C(0x850fadc0, 9923329e), 309, 112},
+  {GRISU_UINT64_C(0x9ae75759, 6946075f), 342, 122},
+  {GRISU_UINT64_C(0xb454e4a1, 79dd1877), 375, 132},
+  {GRISU_UINT64_C(0xd1ef0244, af2364ff), 408, 142},
+  {GRISU_UINT64_C(0xf46518c2, ef5b8cd1), 441, 152},
+  {GRISU_UINT64_C(0x8e41ade9, fbebc27d), 475, 162},
+  {GRISU_UINT64_C(0xa59bc234, db398c25), 508, 172},
+  {GRISU_UINT64_C(0xc0cb28a9, 8fcf3c80), 541, 182},
+  {GRISU_UINT64_C(0xe070f78d, 3927556b), 574, 192},
+  {GRISU_UINT64_C(0x82a45b45, 0226b39d), 608, 202},
+  {GRISU_UINT64_C(0x98165af3, 7b2153df), 641, 212},
+  {GRISU_UINT64_C(0xb10d8e14, 56105dad), 674, 222},
+  {GRISU_UINT64_C(0xce1de406, 42e3f4b9), 707, 232},
+  {GRISU_UINT64_C(0xeff394dc, ff8a948f), 740, 242},
+  {GRISU_UINT64_C(0x8bab8eef, b6409c1a), 774, 252},
+  {GRISU_UINT64_C(0xa298f2c5, 01f45f43), 807, 262},
+  {GRISU_UINT64_C(0xbd49d14a, a79dbc82), 840, 272},
+  {GRISU_UINT64_C(0xdc5c5301, c56b75f7), 873, 282},
+  {GRISU_UINT64_C(0x80444b5e, 7aa7cf85), 907, 292},
+  {GRISU_UINT64_C(0x95527a52, 02df0ccb), 940, 302},
+  {GRISU_UINT64_C(0xadd57a27, d29339f6), 973, 312},
+  {GRISU_UINT64_C(0xca5e89b1, 8b602368), 1006, 322},
+  {GRISU_UINT64_C(0xeb96bf6e, badf77d9), 1039, 332},
+  {GRISU_UINT64_C(0x892179be, 91d43a44), 1073, 342},
+  };
+static const int GRISU_CACHE_MAX_DISTANCE(10) = 34;
+// nb elements (10): 66
+static const GRISU_CACHE_STRUCT GRISU_CACHE_NAME(11)[] = {
+  {GRISU_UINT64_C(0xe61acf03, 3d1a45df), -1087, -308},
+  {GRISU_UINT64_C(0xa76c5823, 38ed2622), -1050, -297},
+  {GRISU_UINT64_C(0xf3a20279, ed56d48a), -1014, -286},
+  {GRISU_UINT64_C(0xb1442798, f49ffb4b), -977, -275},
+  {GRISU_UINT64_C(0x80fa687f, 881c7f8e), -940, -264},
+  {GRISU_UINT64_C(0xbbb01b92, 83253ca3), -904, -253},
+  {GRISU_UINT64_C(0x888f9979, 7a5e012d), -867, -242},
+  {GRISU_UINT64_C(0xc6b8e9b0, 709f109a), -831, -231},
+  {GRISU_UINT64_C(0x9096ea6f, 3848984f), -794, -220},
+  {GRISU_UINT64_C(0xd267caa8, 62a12d67), -758, -209},
+  {GRISU_UINT64_C(0x99171105, 2d8bf3c5), -721, -198},
+  {GRISU_UINT64_C(0xdec681f9, f4c31f31), -685, -187},
+  {GRISU_UINT64_C(0xa21727db, 38cb0030), -648, -176},
+  {GRISU_UINT64_C(0xebdf6617, 91d60f56), -612, -165},
+  {GRISU_UINT64_C(0xab9eb47c, 81f5114f), -575, -154},
+  {GRISU_UINT64_C(0xf9bd690a, 1b68637b), -539, -143},
+  {GRISU_UINT64_C(0xb5b5ada8, aaff80b8), -502, -132},
+  {GRISU_UINT64_C(0x843610cb, 4bf160cc), -465, -121},
+  {GRISU_UINT64_C(0xc06481fb, 9bcf8d3a), -429, -110},
+  {GRISU_UINT64_C(0x8bfbea76, c619ef36), -392, -99},
+  {GRISU_UINT64_C(0xcbb41ef9, 79346bca), -356, -88},
+  {GRISU_UINT64_C(0x9436c076, 0c86e30c), -319, -77},
+  {GRISU_UINT64_C(0xd7adf884, aa879177), -283, -66},
+  {GRISU_UINT64_C(0x9ced737b, b6c4183d), -246, -55},
+  {GRISU_UINT64_C(0xe45c10c4, 2a2b3b06), -210, -44},
+  {GRISU_UINT64_C(0xa6274bbd, d0fadd62), -173, -33},
+  {GRISU_UINT64_C(0xf1c90080, baf72cb1), -137, -22},
+  {GRISU_UINT64_C(0xafebff0b, cb24aaff), -100, -11},
+  {GRISU_UINT64_C(0x80000000, 00000000), -63, 0},
+  {GRISU_UINT64_C(0xba43b740, 00000000), -27, 11},
+  {GRISU_UINT64_C(0x87867832, 6eac9000), 10, 22},
+  {GRISU_UINT64_C(0xc5371912, 364ce305), 46, 33},
+  {GRISU_UINT64_C(0x8f7e32ce, 7bea5c70), 83, 44},
+  {GRISU_UINT64_C(0xd0cf4b50, cfe20766), 119, 55},
+  {GRISU_UINT64_C(0x97edd871, cfda3a57), 156, 66},
+  {GRISU_UINT64_C(0xdd15fe86, affad912), 192, 77},
+  {GRISU_UINT64_C(0xa0dc75f1, 778e39d6), 229, 88},
+  {GRISU_UINT64_C(0xea157514, 3cf97227), 265, 99},
+  {GRISU_UINT64_C(0xaa51823e, 34a7eedf), 302, 110},
+  {GRISU_UINT64_C(0xf7d88bc2, 4209a565), 338, 121},
+  {GRISU_UINT64_C(0xb454e4a1, 79dd1877), 375, 132},
+  {GRISU_UINT64_C(0x8335616a, ed761f1f), 412, 143},
+  {GRISU_UINT64_C(0xbeeefb58, 4aff8604), 448, 154},
+  {GRISU_UINT64_C(0x8aec23d6, 80043bee), 485, 165},
+  {GRISU_UINT64_C(0xca28a291, 859bbf93), 521, 176},
+  {GRISU_UINT64_C(0x9316ff75, dd87cbd8), 558, 187},
+  {GRISU_UINT64_C(0xd60b3bd5, 6a5586f2), 594, 198},
+  {GRISU_UINT64_C(0x9bbcc7a1, 42b17ccc), 631, 209},
+  {GRISU_UINT64_C(0xe2a0b5dc, 971f303a), 667, 220},
+  {GRISU_UINT64_C(0xa4e4b66b, 68b65d61), 704, 231},
+  {GRISU_UINT64_C(0xeff394dc, ff8a948f), 740, 242},
+  {GRISU_UINT64_C(0xae9672ab, a3d0c321), 777, 253},
+  {GRISU_UINT64_C(0xfe0efb53, d30dd4d8), 813, 264},
+  {GRISU_UINT64_C(0xb8da1662, e7b00a17), 850, 275},
+  {GRISU_UINT64_C(0x867f59a9, d4bed6c0), 887, 286},
+  {GRISU_UINT64_C(0xc3b83581, 09e84f07), 923, 297},
+  {GRISU_UINT64_C(0x8e679c2f, 5e44ff8f), 960, 308},
+  {GRISU_UINT64_C(0xcf39e50f, eae16bf0), 996, 319},
+  {GRISU_UINT64_C(0x96c6e0ea, b509e64d), 1033, 330},
+  {GRISU_UINT64_C(0xdb68c2ca, 82ed2a06), 1069, 341},
+  };
+static const int GRISU_CACHE_MAX_DISTANCE(11) = 37;
+// nb elements (11): 60
+static const GRISU_CACHE_STRUCT GRISU_CACHE_NAME(12)[] = {
+  {GRISU_UINT64_C(0xe61acf03, 3d1a45df), -1087, -308},
+  {GRISU_UINT64_C(0xd1476e2c, 07286faa), -1047, -296},
+  {GRISU_UINT64_C(0xbe5691ef, 416bd60c), -1007, -284},
+  {GRISU_UINT64_C(0xad1c8eab, 5ee43b67), -967, -272},
+  {GRISU_UINT64_C(0x9d71ac8f, ada6c9b5), -927, -260},
+  {GRISU_UINT64_C(0x8f31cc09, 37ae58d3), -887, -248},
+  {GRISU_UINT64_C(0x823c1279, 5db6ce57), -847, -236},
+  {GRISU_UINT64_C(0xece53cec, 4a314ebe), -808, -224},
+  {GRISU_UINT64_C(0xd77485cb, 25823ac7), -768, -212},
+  {GRISU_UINT64_C(0xc3f490aa, 77bd60fd), -728, -200},
+  {GRISU_UINT64_C(0xb23867fb, 2a35b28e), -688, -188},
+  {GRISU_UINT64_C(0xa21727db, 38cb0030), -648, -176},
+  {GRISU_UINT64_C(0x936b9fce, bb25c996), -608, -164},
+  {GRISU_UINT64_C(0x8613fd01, 45877586), -568, -152},
+  {GRISU_UINT64_C(0xf3e2f893, dec3f126), -529, -140},
+  {GRISU_UINT64_C(0xddd0467c, 64bce4a1), -489, -128},
+  {GRISU_UINT64_C(0xc9bcff60, 34c13053), -449, -116},
+  {GRISU_UINT64_C(0xb77ada06, 17e3bbcb), -409, -104},
+  {GRISU_UINT64_C(0xa6dfbd9f, b8e5b88f), -369, -92},
+  {GRISU_UINT64_C(0x97c560ba, 6b0919a6), -329, -80},
+  {GRISU_UINT64_C(0x8a08f0f8, bf0f156b), -289, -68},
+  {GRISU_UINT64_C(0xfb158592, be068d2f), -250, -56},
+  {GRISU_UINT64_C(0xe45c10c4, 2a2b3b06), -210, -44},
+  {GRISU_UINT64_C(0xcfb11ead, 453994ba), -170, -32},
+  {GRISU_UINT64_C(0xbce50864, 92111aeb), -130, -20},
+  {GRISU_UINT64_C(0xabcc7711, 8461cefd), -90, -8},
+  {GRISU_UINT64_C(0x9c400000, 00000000), -50, 4},
+  {GRISU_UINT64_C(0x8e1bc9bf, 04000000), -10, 16},
+  {GRISU_UINT64_C(0x813f3978, f8940984), 30, 28},
+  {GRISU_UINT64_C(0xeb194f8e, 1ae525fd), 69, 40},
+  {GRISU_UINT64_C(0xd5d238a4, abe98068), 109, 52},
+  {GRISU_UINT64_C(0xc2781f49, ffcfa6d5), 149, 64},
+  {GRISU_UINT64_C(0xb0de6538, 8cc8ada8), 189, 76},
+  {GRISU_UINT64_C(0xa0dc75f1, 778e39d6), 229, 88},
+  {GRISU_UINT64_C(0x924d692c, a61be758), 269, 100},
+  {GRISU_UINT64_C(0x850fadc0, 9923329e), 309, 112},
+  {GRISU_UINT64_C(0xf209787b, b47d6b85), 348, 124},
+  {GRISU_UINT64_C(0xdc21a117, 1d42645d), 388, 136},
+  {GRISU_UINT64_C(0xc83553c5, c8965d3d), 428, 148},
+  {GRISU_UINT64_C(0xb616a12b, 7fe617aa), 468, 160},
+  {GRISU_UINT64_C(0xa59bc234, db398c25), 508, 172},
+  {GRISU_UINT64_C(0x969eb7c4, 7859e744), 548, 184},
+  {GRISU_UINT64_C(0x88fcf317, f22241e2), 588, 196},
+  {GRISU_UINT64_C(0xf92e0c35, 37826146), 627, 208},
+  {GRISU_UINT64_C(0xe2a0b5dc, 971f303a), 667, 220},
+  {GRISU_UINT64_C(0xce1de406, 42e3f4b9), 707, 232},
+  {GRISU_UINT64_C(0xbb764c4c, a7a44410), 747, 244},
+  {GRISU_UINT64_C(0xaa7eebfb, 9df9de8e), 787, 256},
+  {GRISU_UINT64_C(0x9b10a4e5, e9913129), 827, 268},
+  {GRISU_UINT64_C(0x8d07e334, 55637eb3), 867, 280},
+  {GRISU_UINT64_C(0x80444b5e, 7aa7cf85), 907, 292},
+  {GRISU_UINT64_C(0xe950df20, 247c83fd), 946, 304},
+  {GRISU_UINT64_C(0xd433179d, 9c8cb841), 986, 316},
+  {GRISU_UINT64_C(0xc0fe9088, 95cf3b44), 1026, 328},
+  {GRISU_UINT64_C(0xaf87023b, 9bf0ee6b), 1066, 340},
+  };
+static const int GRISU_CACHE_MAX_DISTANCE(12) = 40;
+// nb elements (12): 55
+static const GRISU_CACHE_STRUCT GRISU_CACHE_NAME(13)[] = {
+  {GRISU_UINT64_C(0xe61acf03, 3d1a45df), -1087, -308},
+  {GRISU_UINT64_C(0x82cca4db, 847945ca), -1043, -295},
+  {GRISU_UINT64_C(0x94b3a202, eb1c3f39), -1000, -282},
+  {GRISU_UINT64_C(0xa90de353, 5aaae202), -957, -269},
+  {GRISU_UINT64_C(0xc0314325, 637a193a), -914, -256},
+  {GRISU_UINT64_C(0xda7f5bf5, 90966849), -871, -243},
+  {GRISU_UINT64_C(0xf867241c, 8cc6d4c1), -828, -230},
+  {GRISU_UINT64_C(0x8d3360f0, 9cf6e4bd), -784, -217},
+  {GRISU_UINT64_C(0xa086cfcd, 97bf97f4), -741, -204},
+  {GRISU_UINT64_C(0xb67f6455, 292cbf08), -698, -191},
+  {GRISU_UINT64_C(0xcf79cc9d, b955c2cc), -655, -178},
+  {GRISU_UINT64_C(0xebdf6617, 91d60f56), -612, -165},
+  {GRISU_UINT64_C(0x8613fd01, 45877586), -568, -152},
+  {GRISU_UINT64_C(0x986ddb5c, 6b3a76b8), -525, -139},
+  {GRISU_UINT64_C(0xad4ab711, 2eb3929e), -482, -126},
+  {GRISU_UINT64_C(0xc5029163, f384a931), -439, -113},
+  {GRISU_UINT64_C(0xdff97724, 70297ebd), -396, -100},
+  {GRISU_UINT64_C(0xfea126b7, d78186bd), -353, -87},
+  {GRISU_UINT64_C(0x90bd77f3, 483bb9ba), -309, -74},
+  {GRISU_UINT64_C(0xa48ceaaa, b75a8e2b), -266, -61},
+  {GRISU_UINT64_C(0xbb127c53, b17ec159), -223, -48},
+  {GRISU_UINT64_C(0xd4ad2dbf, c3d07788), -180, -35},
+  {GRISU_UINT64_C(0xf1c90080, baf72cb1), -137, -22},
+  {GRISU_UINT64_C(0x89705f41, 36b4a597), -93, -9},
+  {GRISU_UINT64_C(0x9c400000, 00000000), -50, 4},
+  {GRISU_UINT64_C(0xb1a2bc2e, c5000000), -7, 17},
+  {GRISU_UINT64_C(0xc9f2c9cd, 04674edf), 36, 30},
+  {GRISU_UINT64_C(0xe596b7b0, c643c719), 79, 43},
+  {GRISU_UINT64_C(0x82818f12, 81ed44a0), 123, 56},
+  {GRISU_UINT64_C(0x945e455f, 24fb1cf9), 166, 69},
+  {GRISU_UINT64_C(0xa8acd7c0, 222311bd), 209, 82},
+  {GRISU_UINT64_C(0xbfc2ef45, 6ae276e9), 252, 95},
+  {GRISU_UINT64_C(0xda01ee64, 1a708dea), 295, 108},
+  {GRISU_UINT64_C(0xf7d88bc2, 4209a565), 338, 121},
+  {GRISU_UINT64_C(0x8ce2529e, 2734bb1d), 382, 134},
+  {GRISU_UINT64_C(0xa02aa96b, 06deb0fe), 425, 147},
+  {GRISU_UINT64_C(0xb616a12b, 7fe617aa), 468, 160},
+  {GRISU_UINT64_C(0xcf02b2c2, 1207ef2f), 511, 173},
+  {GRISU_UINT64_C(0xeb57ff22, fc0c795a), 554, 186},
+  {GRISU_UINT64_C(0x85c70565, 62757457), 598, 199},
+  {GRISU_UINT64_C(0x98165af3, 7b2153df), 641, 212},
+  {GRISU_UINT64_C(0xace73cbf, dc0bfb7b), 684, 225},
+  {GRISU_UINT64_C(0xc491798a, 08a2ad4f), 727, 238},
+  {GRISU_UINT64_C(0xdf78e4b2, bd342cf7), 770, 251},
+  {GRISU_UINT64_C(0xfe0efb53, d30dd4d8), 813, 264},
+  {GRISU_UINT64_C(0x906a617d, 450187e2), 857, 277},
+  {GRISU_UINT64_C(0xa42e74f3, d032f526), 900, 290},
+  {GRISU_UINT64_C(0xbaa718e6, 8396cffe), 943, 303},
+  {GRISU_UINT64_C(0xd433179d, 9c8cb841), 986, 316},
+  {GRISU_UINT64_C(0xf13e34aa, bb430a15), 1029, 329},
+  {GRISU_UINT64_C(0x892179be, 91d43a44), 1073, 342},
+  };
+static const int GRISU_CACHE_MAX_DISTANCE(13) = 44;
+// nb elements (13): 51
+static const GRISU_CACHE_STRUCT GRISU_CACHE_NAME(14)[] = {
+  {GRISU_UINT64_C(0xe61acf03, 3d1a45df), -1087, -308},
+  {GRISU_UINT64_C(0xa37fce12, 6597973d), -1040, -294},
+  {GRISU_UINT64_C(0xe858ad24, 8f5c22ca), -994, -280},
+  {GRISU_UINT64_C(0xa5178fff, 668ae0b6), -947, -266},
+  {GRISU_UINT64_C(0xea9c2277, 23ee8bcb), -901, -252},
+  {GRISU_UINT64_C(0xa6b34ad8, c9dfc070), -854, -238},
+  {GRISU_UINT64_C(0xece53cec, 4a314ebe), -808, -224},
+  {GRISU_UINT64_C(0xa8530886, b54dbdec), -761, -210},
+  {GRISU_UINT64_C(0xef340a98, 172aace5), -715, -196},
+  {GRISU_UINT64_C(0xa9f6d30a, 038d1dbc), -668, -182},
+  {GRISU_UINT64_C(0xf18899b1, bc3f8ca2), -622, -168},
+  {GRISU_UINT64_C(0xab9eb47c, 81f5114f), -575, -154},
+  {GRISU_UINT64_C(0xf3e2f893, dec3f126), -529, -140},
+  {GRISU_UINT64_C(0xad4ab711, 2eb3929e), -482, -126},
+  {GRISU_UINT64_C(0xf64335bc, f065d37d), -436, -112},
+  {GRISU_UINT64_C(0xaefae514, 77a06b04), -389, -98},
+  {GRISU_UINT64_C(0xf8a95fcf, 88747d94), -343, -84},
+  {GRISU_UINT64_C(0xb0af48ec, 79ace837), -296, -70},
+  {GRISU_UINT64_C(0xfb158592, be068d2f), -250, -56},
+  {GRISU_UINT64_C(0xb267ed19, 40f1c61c), -203, -42},
+  {GRISU_UINT64_C(0xfd87b5f2, 8300ca0e), -157, -28},
+  {GRISU_UINT64_C(0xb424dc35, 095cd80f), -110, -14},
+  {GRISU_UINT64_C(0x80000000, 00000000), -63, 0},
+  {GRISU_UINT64_C(0xb5e620f4, 80000000), -17, 14},
+  {GRISU_UINT64_C(0x813f3978, f8940984), 30, 28},
+  {GRISU_UINT64_C(0xb7abc627, 050305ae), 76, 42},
+  {GRISU_UINT64_C(0x82818f12, 81ed44a0), 123, 56},
+  {GRISU_UINT64_C(0xb975d6b6, ee39e437), 169, 70},
+  {GRISU_UINT64_C(0x83c7088e, 1aab65db), 216, 84},
+  {GRISU_UINT64_C(0xbb445da9, ca61281f), 262, 98},
+  {GRISU_UINT64_C(0x850fadc0, 9923329e), 309, 112},
+  {GRISU_UINT64_C(0xbd176620, a501fc00), 355, 126},
+  {GRISU_UINT64_C(0x865b8692, 5b9bc5c2), 402, 140},
+  {GRISU_UINT64_C(0xbeeefb58, 4aff8604), 448, 154},
+  {GRISU_UINT64_C(0x87aa9aff, 79042287), 495, 168},
+  {GRISU_UINT64_C(0xc0cb28a9, 8fcf3c80), 541, 182},
+  {GRISU_UINT64_C(0x88fcf317, f22241e2), 588, 196},
+  {GRISU_UINT64_C(0xc2abf989, 935ddbfe), 634, 210},
+  {GRISU_UINT64_C(0x8a5296ff, e33cc930), 681, 224},
+  {GRISU_UINT64_C(0xc491798a, 08a2ad4f), 727, 238},
+  {GRISU_UINT64_C(0x8bab8eef, b6409c1a), 774, 252},
+  {GRISU_UINT64_C(0xc67bb459, 7ce2ce49), 820, 266},
+  {GRISU_UINT64_C(0x8d07e334, 55637eb3), 867, 280},
+  {GRISU_UINT64_C(0xc86ab5c3, 9fa63441), 913, 294},
+  {GRISU_UINT64_C(0x8e679c2f, 5e44ff8f), 960, 308},
+  {GRISU_UINT64_C(0xca5e89b1, 8b602368), 1006, 322},
+  {GRISU_UINT64_C(0x8fcac257, 558ee4e6), 1053, 336},
+  };
+static const int GRISU_CACHE_MAX_DISTANCE(14) = 47;
+// nb elements (14): 47
+static const GRISU_CACHE_STRUCT GRISU_CACHE_NAME(15)[] = {
+  {GRISU_UINT64_C(0xe61acf03, 3d1a45df), -1087, -308},
+  {GRISU_UINT64_C(0xcc5fc196, fefd7d0c), -1037, -293},
+  {GRISU_UINT64_C(0xb5854744, 8ffffb2e), -987, -278},
+  {GRISU_UINT64_C(0xa139029f, 6a239f72), -937, -263},
+  {GRISU_UINT64_C(0x8f31cc09, 37ae58d3), -887, -248},
+  {GRISU_UINT64_C(0xfe5d5415, 0b090b03), -838, -233},
+  {GRISU_UINT64_C(0xe1ebce4d, c7f16dfc), -788, -218},
+  {GRISU_UINT64_C(0xc8a883c0, fdaf7df0), -738, -203},
+  {GRISU_UINT64_C(0xb23867fb, 2a35b28e), -688, -188},
+  {GRISU_UINT64_C(0x9e4a9cec, 15763e2f), -638, -173},
+  {GRISU_UINT64_C(0x8c974f73, 83725573), -588, -158},
+  {GRISU_UINT64_C(0xf9bd690a, 1b68637b), -539, -143},
+  {GRISU_UINT64_C(0xddd0467c, 64bce4a1), -489, -128},
+  {GRISU_UINT64_C(0xc5029163, f384a931), -439, -113},
+  {GRISU_UINT64_C(0xaefae514, 77a06b04), -389, -98},
+  {GRISU_UINT64_C(0x9b69dbe1, b548ce7d), -339, -83},
+  {GRISU_UINT64_C(0x8a08f0f8, bf0f156b), -289, -68},
+  {GRISU_UINT64_C(0xf5330471, 4d9265e0), -240, -53},
+  {GRISU_UINT64_C(0xd9c7dced, 53c72256), -190, -38},
+  {GRISU_UINT64_C(0xc16d9a00, 95928a27), -140, -23},
+  {GRISU_UINT64_C(0xabcc7711, 8461cefd), -90, -8},
+  {GRISU_UINT64_C(0x98968000, 00000000), -40, 7},
+  {GRISU_UINT64_C(0x87867832, 6eac9000), 10, 22},
+  {GRISU_UINT64_C(0xf0bdc21a, bb48db20), 59, 37},
+  {GRISU_UINT64_C(0xd5d238a4, abe98068), 109, 52},
+  {GRISU_UINT64_C(0xbde94e8e, 43d0c8ec), 159, 67},
+  {GRISU_UINT64_C(0xa8acd7c0, 222311bd), 209, 82},
+  {GRISU_UINT64_C(0x95d04aee, 3b80ece6), 259, 97},
+  {GRISU_UINT64_C(0x850fadc0, 9923329e), 309, 112},
+  {GRISU_UINT64_C(0xec5d3fa8, ce427b00), 358, 127},
+  {GRISU_UINT64_C(0xd1ef0244, af2364ff), 408, 142},
+  {GRISU_UINT64_C(0xba756174, 393d88e0), 458, 157},
+  {GRISU_UINT64_C(0xa59bc234, db398c25), 508, 172},
+  {GRISU_UINT64_C(0x9316ff75, dd87cbd8), 558, 187},
+  {GRISU_UINT64_C(0x82a45b45, 0226b39d), 608, 202},
+  {GRISU_UINT64_C(0xe8111c87, c5c1ba9a), 657, 217},
+  {GRISU_UINT64_C(0xce1de406, 42e3f4b9), 707, 232},
+  {GRISU_UINT64_C(0xb7118682, dbb66a77), 757, 247},
+  {GRISU_UINT64_C(0xa298f2c5, 01f45f43), 807, 262},
+  {GRISU_UINT64_C(0x906a617d, 450187e2), 857, 277},
+  {GRISU_UINT64_C(0x80444b5e, 7aa7cf85), 907, 292},
+  {GRISU_UINT64_C(0xe3d8f9e5, 63a198e5), 956, 307},
+  {GRISU_UINT64_C(0xca5e89b1, 8b602368), 1006, 322},
+  {GRISU_UINT64_C(0xb3bd72ed, 2af29e20), 1056, 337},
+  };
+static const int GRISU_CACHE_MAX_DISTANCE(15) = 50;
+// nb elements (15): 44
+static const GRISU_CACHE_STRUCT GRISU_CACHE_NAME(16)[] = {
+  {GRISU_UINT64_C(0xe61acf03, 3d1a45df), -1087, -308},
+  {GRISU_UINT64_C(0xff77b1fc, bebcdc4f), -1034, -292},
+  {GRISU_UINT64_C(0x8dd01fad, 907ffc3c), -980, -276},
+  {GRISU_UINT64_C(0x9d71ac8f, ada6c9b5), -927, -260},
+  {GRISU_UINT64_C(0xaecc4991, 4078536d), -874, -244},
+  {GRISU_UINT64_C(0xc2109436, 4dfb5637), -821, -228},
+  {GRISU_UINT64_C(0xd77485cb, 25823ac7), -768, -212},
+  {GRISU_UINT64_C(0xef340a98, 172aace5), -715, -196},
+  {GRISU_UINT64_C(0x84c8d4df, d2c63f3b), -661, -180},
+  {GRISU_UINT64_C(0x936b9fce, bb25c996), -608, -164},
+  {GRISU_UINT64_C(0xa3ab6658, 0d5fdaf6), -555, -148},
+  {GRISU_UINT64_C(0xb5b5ada8, aaff80b8), -502, -132},
+  {GRISU_UINT64_C(0xc9bcff60, 34c13053), -449, -116},
+  {GRISU_UINT64_C(0xdff97724, 70297ebd), -396, -100},
+  {GRISU_UINT64_C(0xf8a95fcf, 88747d94), -343, -84},
+  {GRISU_UINT64_C(0x8a08f0f8, bf0f156b), -289, -68},
+  {GRISU_UINT64_C(0x993fe2c6, d07b7fac), -236, -52},
+  {GRISU_UINT64_C(0xaa242499, 697392d3), -183, -36},
+  {GRISU_UINT64_C(0xbce50864, 92111aeb), -130, -20},
+  {GRISU_UINT64_C(0xd1b71758, e219652c), -77, -4},
+  {GRISU_UINT64_C(0xe8d4a510, 00000000), -24, 12},
+  {GRISU_UINT64_C(0x813f3978, f8940984), 30, 28},
+  {GRISU_UINT64_C(0x8f7e32ce, 7bea5c70), 83, 44},
+  {GRISU_UINT64_C(0x9f4f2726, 179a2245), 136, 60},
+  {GRISU_UINT64_C(0xb0de6538, 8cc8ada8), 189, 76},
+  {GRISU_UINT64_C(0xc45d1df9, 42711d9a), 242, 92},
+  {GRISU_UINT64_C(0xda01ee64, 1a708dea), 295, 108},
+  {GRISU_UINT64_C(0xf209787b, b47d6b85), 348, 124},
+  {GRISU_UINT64_C(0x865b8692, 5b9bc5c2), 402, 140},
+  {GRISU_UINT64_C(0x952ab45c, fa97a0b3), 455, 156},
+  {GRISU_UINT64_C(0xa59bc234, db398c25), 508, 172},
+  {GRISU_UINT64_C(0xb7dcbf53, 54e9bece), 561, 188},
+  {GRISU_UINT64_C(0xcc20ce9b, d35c78a5), 614, 204},
+  {GRISU_UINT64_C(0xe2a0b5dc, 971f303a), 667, 220},
+  {GRISU_UINT64_C(0xfb9b7cd9, a4a7443c), 720, 236},
+  {GRISU_UINT64_C(0x8bab8eef, b6409c1a), 774, 252},
+  {GRISU_UINT64_C(0x9b10a4e5, e9913129), 827, 268},
+  {GRISU_UINT64_C(0xac2820d9, 623bf429), 880, 284},
+  {GRISU_UINT64_C(0xbf21e440, 03acdd2d), 933, 300},
+  {GRISU_UINT64_C(0xd433179d, 9c8cb841), 986, 316},
+  {GRISU_UINT64_C(0xeb96bf6e, badf77d9), 1039, 332},
+  };
+static const int GRISU_CACHE_MAX_DISTANCE(16) = 54;
+// nb elements (16): 41
+static const GRISU_CACHE_STRUCT GRISU_CACHE_NAME(17)[] = {
+  {GRISU_UINT64_C(0xe61acf03, 3d1a45df), -1087, -308},
+  {GRISU_UINT64_C(0x9faacf3d, f73609b1), -1030, -291},
+  {GRISU_UINT64_C(0xdd95317f, 31c7fa1d), -974, -274},
+  {GRISU_UINT64_C(0x99c10284, 4f94e0fb), -917, -257},
+  {GRISU_UINT64_C(0xd5605fcd, cf32e1d7), -861, -240},
+  {GRISU_UINT64_C(0x940f4613, ae5ed137), -804, -223},
+  {GRISU_UINT64_C(0xcd795be8, 70516656), -748, -206},
+  {GRISU_UINT64_C(0x8e938662, 882af53e), -691, -189},
+  {GRISU_UINT64_C(0xc5dd4427, 1ad3cdba), -635, -172},
+  {GRISU_UINT64_C(0x894bc396, ce5da772), -578, -155},
+  {GRISU_UINT64_C(0xbe895233, 86091466), -522, -138},
+  {GRISU_UINT64_C(0x843610cb, 4bf160cc), -465, -121},
+  {GRISU_UINT64_C(0xb77ada06, 17e3bbcb), -409, -104},
+  {GRISU_UINT64_C(0xfea126b7, d78186bd), -353, -87},
+  {GRISU_UINT64_C(0xb0af48ec, 79ace837), -296, -70},
+  {GRISU_UINT64_C(0xf5330471, 4d9265e0), -240, -53},
+  {GRISU_UINT64_C(0xaa242499, 697392d3), -183, -36},
+  {GRISU_UINT64_C(0xec1e4a7d, b69561a5), -127, -19},
+  {GRISU_UINT64_C(0xa3d70a3d, 70a3d70a), -70, -2},
+  {GRISU_UINT64_C(0xe35fa931, a0000000), -14, 15},
+  {GRISU_UINT64_C(0x9dc5ada8, 2b70b59e), 43, 32},
+  {GRISU_UINT64_C(0xdaf3f046, 51d47b4c), 99, 49},
+  {GRISU_UINT64_C(0x97edd871, cfda3a57), 156, 66},
+  {GRISU_UINT64_C(0xd2d80db0, 2aabd62c), 212, 83},
+  {GRISU_UINT64_C(0x924d692c, a61be758), 269, 100},
+  {GRISU_UINT64_C(0xcb090c80, 01ab551c), 325, 117},
+  {GRISU_UINT64_C(0x8ce2529e, 2734bb1d), 382, 134},
+  {GRISU_UINT64_C(0xc38413cf, 25e2d70e), 438, 151},
+  {GRISU_UINT64_C(0x87aa9aff, 79042287), 495, 168},
+  {GRISU_UINT64_C(0xbc4665b5, 96706115), 551, 185},
+  {GRISU_UINT64_C(0x82a45b45, 0226b39d), 608, 202},
+  {GRISU_UINT64_C(0xb54d5e4a, 127f59c8), 664, 219},
+  {GRISU_UINT64_C(0xfb9b7cd9, a4a7443c), 720, 236},
+  {GRISU_UINT64_C(0xae9672ab, a3d0c321), 777, 253},
+  {GRISU_UINT64_C(0xf24a01a7, 3cf2dcd0), 833, 270},
+  {GRISU_UINT64_C(0xa81f3014, 49ee8c70), 890, 287},
+  {GRISU_UINT64_C(0xe950df20, 247c83fd), 946, 304},
+  {GRISU_UINT64_C(0xa1e53af4, 6f801c53), 1003, 321},
+  {GRISU_UINT64_C(0xe0accfa8, 75af45a8), 1059, 338},
+  };
+static const int GRISU_CACHE_MAX_DISTANCE(17) = 57;
+// nb elements (17): 39
+static const GRISU_CACHE_STRUCT GRISU_CACHE_NAME(18)[] = {
+  {GRISU_UINT64_C(0xe61acf03, 3d1a45df), -1087, -308},
+  {GRISU_UINT64_C(0xc795830d, 75038c1e), -1027, -290},
+  {GRISU_UINT64_C(0xad1c8eab, 5ee43b67), -967, -272},
+  {GRISU_UINT64_C(0x96267c75, 35b763b5), -907, -254},
+  {GRISU_UINT64_C(0x823c1279, 5db6ce57), -847, -236},
+  {GRISU_UINT64_C(0xe1ebce4d, c7f16dfc), -788, -218},
+  {GRISU_UINT64_C(0xc3f490aa, 77bd60fd), -728, -200},
+  {GRISU_UINT64_C(0xa9f6d30a, 038d1dbc), -668, -182},
+  {GRISU_UINT64_C(0x936b9fce, bb25c996), -608, -164},
+  {GRISU_UINT64_C(0xffbbcfe9, 94e5c620), -549, -146},
+  {GRISU_UINT64_C(0xddd0467c, 64bce4a1), -489, -128},
+  {GRISU_UINT64_C(0xc06481fb, 9bcf8d3a), -429, -110},
+  {GRISU_UINT64_C(0xa6dfbd9f, b8e5b88f), -369, -92},
+  {GRISU_UINT64_C(0x90bd77f3, 483bb9ba), -309, -74},
+  {GRISU_UINT64_C(0xfb158592, be068d2f), -250, -56},
+  {GRISU_UINT64_C(0xd9c7dced, 53c72256), -190, -38},
+  {GRISU_UINT64_C(0xbce50864, 92111aeb), -130, -20},
+  {GRISU_UINT64_C(0xa3d70a3d, 70a3d70a), -70, -2},
+  {GRISU_UINT64_C(0x8e1bc9bf, 04000000), -10, 16},
+  {GRISU_UINT64_C(0xf684df56, c3e01bc7), 49, 34},
+  {GRISU_UINT64_C(0xd5d238a4, abe98068), 109, 52},
+  {GRISU_UINT64_C(0xb975d6b6, ee39e437), 169, 70},
+  {GRISU_UINT64_C(0xa0dc75f1, 778e39d6), 229, 88},
+  {GRISU_UINT64_C(0x8b865b21, 5899f46d), 289, 106},
+  {GRISU_UINT64_C(0xf209787b, b47d6b85), 348, 124},
+  {GRISU_UINT64_C(0xd1ef0244, af2364ff), 408, 142},
+  {GRISU_UINT64_C(0xb616a12b, 7fe617aa), 468, 160},
+  {GRISU_UINT64_C(0x9defbf01, b061adab), 528, 178},
+  {GRISU_UINT64_C(0x88fcf317, f22241e2), 588, 196},
+  {GRISU_UINT64_C(0xeda2ee1c, 7064130c), 647, 214},
+  {GRISU_UINT64_C(0xce1de406, 42e3f4b9), 707, 232},
+  {GRISU_UINT64_C(0xb2c71d5b, ca9023f8), 767, 250},
+  {GRISU_UINT64_C(0x9b10a4e5, e9913129), 827, 268},
+  {GRISU_UINT64_C(0x867f59a9, d4bed6c0), 887, 286},
+  {GRISU_UINT64_C(0xe950df20, 247c83fd), 946, 304},
+  {GRISU_UINT64_C(0xca5e89b1, 8b602368), 1006, 322},
+  {GRISU_UINT64_C(0xaf87023b, 9bf0ee6b), 1066, 340},
+  };
+static const int GRISU_CACHE_MAX_DISTANCE(18) = 60;
+// nb elements (18): 37
+static const GRISU_CACHE_STRUCT GRISU_CACHE_NAME(19)[] = {
+  {GRISU_UINT64_C(0xe61acf03, 3d1a45df), -1087, -308},
+  {GRISU_UINT64_C(0xf97ae3d0, d2446f25), -1024, -289},
+  {GRISU_UINT64_C(0x873e4f75, e2224e68), -960, -270},
+  {GRISU_UINT64_C(0x92a1958a, 7675175f), -897, -251},
+  {GRISU_UINT64_C(0x9efa548d, 26e5a6e2), -834, -232},
+  {GRISU_UINT64_C(0xac5d37d5, b79b6239), -771, -213},
+  {GRISU_UINT64_C(0xbae0a846, d2195713), -708, -194},
+  {GRISU_UINT64_C(0xca9cf1d2, 06fdc03c), -645, -175},
+  {GRISU_UINT64_C(0xdbac6c24, 7d62a584), -582, -156},
+  {GRISU_UINT64_C(0xee2ba6c0, 678b597f), -519, -137},
+  {GRISU_UINT64_C(0x811ccc66, 8829b887), -455, -118},
+  {GRISU_UINT64_C(0x8bfbea76, c619ef36), -392, -99},
+  {GRISU_UINT64_C(0x97c560ba, 6b0919a6), -329, -80},
+  {GRISU_UINT64_C(0xa48ceaaa, b75a8e2b), -266, -61},
+  {GRISU_UINT64_C(0xb267ed19, 40f1c61c), -203, -42},
+  {GRISU_UINT64_C(0xc16d9a00, 95928a27), -140, -23},
+  {GRISU_UINT64_C(0xd1b71758, e219652c), -77, -4},
+  {GRISU_UINT64_C(0xe35fa931, a0000000), -14, 15},
+  {GRISU_UINT64_C(0xf684df56, c3e01bc7), 49, 34},
+  {GRISU_UINT64_C(0x85a36366, eb71f041), 113, 53},
+  {GRISU_UINT64_C(0x90e40fbe, ea1d3a4b), 176, 72},
+  {GRISU_UINT64_C(0x9d174b2d, cec0e47b), 239, 91},
+  {GRISU_UINT64_C(0xaa51823e, 34a7eedf), 302, 110},
+  {GRISU_UINT64_C(0xb8a8d9bb, e123f018), 365, 129},
+  {GRISU_UINT64_C(0xc83553c5, c8965d3d), 428, 148},
+  {GRISU_UINT64_C(0xd910f7ff, 28069da4), 491, 167},
+  {GRISU_UINT64_C(0xeb57ff22, fc0c795a), 554, 186},
+  {GRISU_UINT64_C(0xff290242, c83396ce), 617, 205},
+  {GRISU_UINT64_C(0x8a5296ff, e33cc930), 681, 224},
+  {GRISU_UINT64_C(0x95f83d0a, 1fb69cd9), 744, 243},
+  {GRISU_UINT64_C(0xa298f2c5, 01f45f43), 807, 262},
+  {GRISU_UINT64_C(0xb049dc01, 6abc5e60), 870, 281},
+  {GRISU_UINT64_C(0xbf21e440, 03acdd2d), 933, 300},
+  {GRISU_UINT64_C(0xcf39e50f, eae16bf0), 996, 319},
+  {GRISU_UINT64_C(0xe0accfa8, 75af45a8), 1059, 338},
+  };
+static const int GRISU_CACHE_MAX_DISTANCE(19) = 64;
+// nb elements (19): 35
+static const GRISU_CACHE_STRUCT GRISU_CACHE_NAME(20)[] = {
+  {GRISU_UINT64_C(0xe61acf03, 3d1a45df), -1087, -308},
+  {GRISU_UINT64_C(0x9becce62, 836ac577), -1020, -288},
+  {GRISU_UINT64_C(0xd3515c28, 31559a83), -954, -268},
+  {GRISU_UINT64_C(0x8f31cc09, 37ae58d3), -887, -248},
+  {GRISU_UINT64_C(0xc2109436, 4dfb5637), -821, -228},
+  {GRISU_UINT64_C(0x8380dea9, 3da4bc60), -754, -208},
+  {GRISU_UINT64_C(0xb23867fb, 2a35b28e), -688, -188},
+  {GRISU_UINT64_C(0xf18899b1, bc3f8ca2), -622, -168},
+  {GRISU_UINT64_C(0xa3ab6658, 0d5fdaf6), -555, -148},
+  {GRISU_UINT64_C(0xddd0467c, 64bce4a1), -489, -128},
+  {GRISU_UINT64_C(0x964e858c, 91ba2655), -422, -108},
+  {GRISU_UINT64_C(0xcbb41ef9, 79346bca), -356, -88},
+  {GRISU_UINT64_C(0x8a08f0f8, bf0f156b), -289, -68},
+  {GRISU_UINT64_C(0xbb127c53, b17ec159), -223, -48},
+  {GRISU_UINT64_C(0xfd87b5f2, 8300ca0e), -157, -28},
+  {GRISU_UINT64_C(0xabcc7711, 8461cefd), -90, -8},
+  {GRISU_UINT64_C(0xe8d4a510, 00000000), -24, 12},
+  {GRISU_UINT64_C(0x9dc5ada8, 2b70b59e), 43, 32},
+  {GRISU_UINT64_C(0xd5d238a4, abe98068), 109, 52},
+  {GRISU_UINT64_C(0x90e40fbe, ea1d3a4b), 176, 72},
+  {GRISU_UINT64_C(0xc45d1df9, 42711d9a), 242, 92},
+  {GRISU_UINT64_C(0x850fadc0, 9923329e), 309, 112},
+  {GRISU_UINT64_C(0xb454e4a1, 79dd1877), 375, 132},
+  {GRISU_UINT64_C(0xf46518c2, ef5b8cd1), 441, 152},
+  {GRISU_UINT64_C(0xa59bc234, db398c25), 508, 172},
+  {GRISU_UINT64_C(0xe070f78d, 3927556b), 574, 192},
+  {GRISU_UINT64_C(0x98165af3, 7b2153df), 641, 212},
+  {GRISU_UINT64_C(0xce1de406, 42e3f4b9), 707, 232},
+  {GRISU_UINT64_C(0x8bab8eef, b6409c1a), 774, 252},
+  {GRISU_UINT64_C(0xbd49d14a, a79dbc82), 840, 272},
+  {GRISU_UINT64_C(0x80444b5e, 7aa7cf85), 907, 292},
+  {GRISU_UINT64_C(0xadd57a27, d29339f6), 973, 312},
+  {GRISU_UINT64_C(0xeb96bf6e, badf77d9), 1039, 332},
+  };
+static const int GRISU_CACHE_MAX_DISTANCE(20) = 67;
+// nb elements (20): 33
+static const int GRISU_CACHE_OFFSET = 308;
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index ca570a6..75f6fc3 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -227,10 +227,10 @@
 }
 
 
-void PrettyPrinter::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* node) {
+void PrettyPrinter::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* node) {
   Print("(");
-  PrintLiteral(node->boilerplate(), true);
+  PrintLiteral(node->shared_function_info(), true);
   Print(")");
 }
 
@@ -604,7 +604,7 @@
         ast_printer_->Print(StaticType::Type2String(expr->type()));
         printed_first = true;
       }
-      if (expr->num() != Expression::kNoLabel) {
+      if (expr->num() != AstNode::kNoNumber) {
         ast_printer_->Print(printed_first ? ", num = " : " (num = ");
         ast_printer_->Print("%d", expr->num());
         printed_first = true;
@@ -668,7 +668,8 @@
                                               Variable* var,
                                               Handle<Object> value,
                                               StaticType* type,
-                                              int num) {
+                                              int num,
+                                              bool is_primitive) {
   if (var == NULL) {
     PrintLiteralIndented(info, value, true);
   } else {
@@ -679,9 +680,11 @@
       pos += OS::SNPrintF(buf + pos, ", type = %s",
                           StaticType::Type2String(type));
     }
-    if (num != Expression::kNoLabel) {
+    if (num != AstNode::kNoNumber) {
       pos += OS::SNPrintF(buf + pos, ", num = %d", num);
     }
+    pos += OS::SNPrintF(buf + pos,
+                        is_primitive ? ", primitive" : ", non-primitive");
     OS::SNPrintF(buf + pos, ")");
     PrintLiteralIndented(buf.start(), value, true);
   }
@@ -740,7 +743,8 @@
       PrintLiteralWithModeIndented("VAR", scope->parameter(i),
                                    scope->parameter(i)->name(),
                                    scope->parameter(i)->type(),
-                                   Expression::kNoLabel);
+                                   AstNode::kNoNumber,
+                                   false);
     }
   }
 }
@@ -786,7 +790,8 @@
                                  node->proxy()->AsVariable(),
                                  node->proxy()->name(),
                                  node->proxy()->AsVariable()->type(),
-                                 Expression::kNoLabel);
+                                 AstNode::kNoNumber,
+                                 node->proxy()->IsPrimitive());
   } else {
     // function declarations
     PrintIndented("FUNCTION ");
@@ -918,10 +923,10 @@
 }
 
 
-void AstPrinter::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* node) {
+void AstPrinter::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* node) {
   IndentedScope indent("FUNC LITERAL");
-  PrintLiteralIndented("BOILERPLATE", node->boilerplate(), true);
+  PrintLiteralIndented("SHARED INFO", node->shared_function_info(), true);
 }
 
 
@@ -1022,7 +1027,7 @@
 
 void AstPrinter::VisitVariableProxy(VariableProxy* node) {
   PrintLiteralWithModeIndented("VAR PROXY", node->AsVariable(), node->name(),
-                               node->type(), node->num());
+                               node->type(), node->num(), node->IsPrimitive());
   Variable* var = node->var();
   if (var != NULL && var->rewrite() != NULL) {
     IndentedScope indent;
@@ -1326,9 +1331,9 @@
 }
 
 
-void JsonAstBuilder::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* expr) {
-  TagScope tag(this, "FunctionBoilerplateLiteral");
+void JsonAstBuilder::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
+  TagScope tag(this, "SharedFunctionInfoLiteral");
 }
 
 
diff --git a/src/prettyprinter.h b/src/prettyprinter.h
index 8e958c7..93ba0d9 100644
--- a/src/prettyprinter.h
+++ b/src/prettyprinter.h
@@ -103,7 +103,8 @@
                                     Variable* var,
                                     Handle<Object> value,
                                     StaticType* type,
-                                    int num);
+                                    int num,
+                                    bool is_primitive);
   void PrintLabelsIndented(const char* info, ZoneStringList* labels);
 
   void inc_indent() { indent_++; }
diff --git a/src/profile-generator-inl.h b/src/profile-generator-inl.h
new file mode 100644
index 0000000..628fa44
--- /dev/null
+++ b/src/profile-generator-inl.h
@@ -0,0 +1,124 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PROFILE_GENERATOR_INL_H_
+#define V8_PROFILE_GENERATOR_INL_H_
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+#include "profile-generator.h"
+
+namespace v8 {
+namespace internal {
+
+CodeEntry::CodeEntry(Logger::LogEventsAndTags tag,
+                     const char* name_prefix,
+                     const char* name,
+                     const char* resource_name,
+                     int line_number)
+    : call_uid_(next_call_uid_++),
+      tag_(tag),
+      name_prefix_(name_prefix),
+      name_(name),
+      resource_name_(resource_name),
+      line_number_(line_number) {
+}
+
+
+bool CodeEntry::is_js_function_tag(Logger::LogEventsAndTags tag) {
+  return tag == Logger::FUNCTION_TAG
+      || tag == Logger::LAZY_COMPILE_TAG
+      || tag == Logger::SCRIPT_TAG
+      || tag == Logger::NATIVE_FUNCTION_TAG
+      || tag == Logger::NATIVE_LAZY_COMPILE_TAG
+      || tag == Logger::NATIVE_SCRIPT_TAG;
+}
+
+
+ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry)
+    : tree_(tree),
+      entry_(entry),
+      total_ticks_(0),
+      self_ticks_(0),
+      children_(CodeEntriesMatch) {
+}
+
+
+void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
+  CodeTree::Locator locator;
+  tree_.Insert(addr, &locator);
+  locator.set_value(CodeEntryInfo(entry, size));
+}
+
+
+void CodeMap::MoveCode(Address from, Address to) {
+  tree_.Move(from, to);
+}
+
+void CodeMap::DeleteCode(Address addr) {
+  tree_.Remove(addr);
+}
+
+
+bool CpuProfilesCollection::is_last_profile() {
+  // Called from VM thread, and only it can mutate the list,
+  // so no locking is needed here.
+  return current_profiles_.length() == 1;
+}
+
+
+const char* CpuProfilesCollection::GetFunctionName(String* name) {
+  return GetFunctionName(GetName(name));
+}
+
+
+const char* CpuProfilesCollection::GetFunctionName(const char* name) {
+  return strlen(name) > 0 ? name : ProfileGenerator::kAnonymousFunctionName;
+}
+
+
+CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
+  switch (tag) {
+    case GC:
+      return gc_entry_;
+    case JS:
+    case COMPILER:
+    // DOM events handlers are reported as OTHER / EXTERNAL entries.
+    // To avoid confusing people, let's put all these entries into
+    // one bucket.
+    case OTHER:
+    case EXTERNAL:
+      return program_entry_;
+    default: return NULL;
+  }
+}
+
+} }  // namespace v8::internal
+
+#endif  // ENABLE_LOGGING_AND_PROFILING
+
+#endif  // V8_PROFILE_GENERATOR_INL_H_
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
new file mode 100644
index 0000000..4c2a330
--- /dev/null
+++ b/src/profile-generator.cc
@@ -0,0 +1,583 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+#include "v8.h"
+
+#include "profile-generator-inl.h"
+
+#include "../include/v8-profiler.h"
+
+namespace v8 {
+namespace internal {
+
+
+const char* CodeEntry::kEmptyNamePrefix = "";
+unsigned CodeEntry::next_call_uid_ = 1;
+
+
+ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
+  HashMap::Entry* map_entry =
+      children_.Lookup(entry, CodeEntryHash(entry), false);
+  return map_entry != NULL ?
+      reinterpret_cast<ProfileNode*>(map_entry->value) : NULL;
+}
+
+
+ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) {
+  HashMap::Entry* map_entry =
+      children_.Lookup(entry, CodeEntryHash(entry), true);
+  if (map_entry->value == NULL) {
+    // New node added.
+    ProfileNode* new_node = new ProfileNode(tree_, entry);
+    map_entry->value = new_node;
+    children_list_.Add(new_node);
+  }
+  return reinterpret_cast<ProfileNode*>(map_entry->value);
+}
+
+
+double ProfileNode::GetSelfMillis() const {
+  return tree_->TicksToMillis(self_ticks_);
+}
+
+
+double ProfileNode::GetTotalMillis() const {
+  return tree_->TicksToMillis(total_ticks_);
+}
+
+
+void ProfileNode::Print(int indent) {
+  OS::Print("%5u %5u %*c %s%s",
+            total_ticks_, self_ticks_,
+            indent, ' ',
+            entry_->name_prefix(),
+            entry_->name());
+  if (entry_->resource_name()[0] != '\0')
+    OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
+  OS::Print("\n");
+  for (HashMap::Entry* p = children_.Start();
+       p != NULL;
+       p = children_.Next(p)) {
+    reinterpret_cast<ProfileNode*>(p->value)->Print(indent + 2);
+  }
+}
+
+
+namespace {
+
+class DeleteNodesCallback {
+ public:
+  void AfterAllChildrenTraversed(ProfileNode* node) {
+    delete node;
+  }
+
+  void AfterChildTraversed(ProfileNode*, ProfileNode*) { }
+};
+
+}  // namespace
+
+
+ProfileTree::ProfileTree()
+    : root_entry_(Logger::FUNCTION_TAG, "", "(root)", "", 0),
+      root_(new ProfileNode(this, &root_entry_)) {
+}
+
+
+ProfileTree::~ProfileTree() {
+  DeleteNodesCallback cb;
+  TraverseDepthFirstPostOrder(&cb);
+}
+
+
+void ProfileTree::AddPathFromEnd(const Vector<CodeEntry*>& path) {
+  ProfileNode* node = root_;
+  for (CodeEntry** entry = path.start() + path.length() - 1;
+       entry != path.start() - 1;
+       --entry) {
+    if (*entry != NULL) {
+      node = node->FindOrAddChild(*entry);
+    }
+  }
+  node->IncrementSelfTicks();
+}
+
+
+void ProfileTree::AddPathFromStart(const Vector<CodeEntry*>& path) {
+  ProfileNode* node = root_;
+  for (CodeEntry** entry = path.start();
+       entry != path.start() + path.length();
+       ++entry) {
+    if (*entry != NULL) {
+      node = node->FindOrAddChild(*entry);
+    }
+  }
+  node->IncrementSelfTicks();
+}
+
+
+void ProfileTree::SetTickRatePerMs(double ticks_per_ms) {
+  ms_to_ticks_scale_ = ticks_per_ms > 0 ? 1.0 / ticks_per_ms : 1.0;
+}
+
+
+namespace {
+
+class Position {
+ public:
+  explicit Position(ProfileNode* node)
+      : node(node), child_idx_(0) { }
+  INLINE(ProfileNode* current_child()) {
+    return node->children()->at(child_idx_);
+  }
+  INLINE(bool has_current_child()) {
+    return child_idx_ < node->children()->length();
+  }
+  INLINE(void next_child()) { ++child_idx_; }
+
+  ProfileNode* node;
+ private:
+  int child_idx_;
+};
+
+}  // namespace
+
+
+// Non-recursive implementation of a depth-first post-order tree traversal.
+template <typename Callback>
+void ProfileTree::TraverseDepthFirstPostOrder(Callback* callback) {
+  List<Position> stack(10);
+  stack.Add(Position(root_));
+  do {
+    Position& current = stack.last();
+    if (current.has_current_child()) {
+      stack.Add(Position(current.current_child()));
+    } else {
+      callback->AfterAllChildrenTraversed(current.node);
+      if (stack.length() > 1) {
+        Position& parent = stack[stack.length() - 2];
+        callback->AfterChildTraversed(parent.node, current.node);
+        parent.next_child();
+        // Remove child from the stack.
+        stack.RemoveLast();
+      }
+    }
+  } while (stack.length() > 1 || stack.last().has_current_child());
+}
+
+
+namespace {
+
+class CalculateTotalTicksCallback {
+ public:
+  void AfterAllChildrenTraversed(ProfileNode* node) {
+    node->IncreaseTotalTicks(node->self_ticks());
+  }
+
+  void AfterChildTraversed(ProfileNode* parent, ProfileNode* child) {
+    parent->IncreaseTotalTicks(child->total_ticks());
+  }
+};
+
+}  // namespace
+
+
+void ProfileTree::CalculateTotalTicks() {
+  CalculateTotalTicksCallback cb;
+  TraverseDepthFirstPostOrder(&cb);
+}
+
+
+void ProfileTree::ShortPrint() {
+  OS::Print("root: %u %u %.2fms %.2fms\n",
+            root_->total_ticks(), root_->self_ticks(),
+            root_->GetTotalMillis(), root_->GetSelfMillis());
+}
+
+
+void CpuProfile::AddPath(const Vector<CodeEntry*>& path) {
+  top_down_.AddPathFromEnd(path);
+  bottom_up_.AddPathFromStart(path);
+}
+
+
+void CpuProfile::CalculateTotalTicks() {
+  top_down_.CalculateTotalTicks();
+  bottom_up_.CalculateTotalTicks();
+}
+
+
+void CpuProfile::SetActualSamplingRate(double actual_sampling_rate) {
+  top_down_.SetTickRatePerMs(actual_sampling_rate);
+  bottom_up_.SetTickRatePerMs(actual_sampling_rate);
+}
+
+
+void CpuProfile::ShortPrint() {
+  OS::Print("top down ");
+  top_down_.ShortPrint();
+  OS::Print("bottom up ");
+  bottom_up_.ShortPrint();
+}
+
+
+void CpuProfile::Print() {
+  OS::Print("[Top down]:\n");
+  top_down_.Print();
+  OS::Print("[Bottom up]:\n");
+  bottom_up_.Print();
+}
+
+
+const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL;
+const CodeMap::CodeTreeConfig::Value CodeMap::CodeTreeConfig::kNoValue =
+    CodeMap::CodeEntryInfo(NULL, 0);
+
+
+void CodeMap::AddAlias(Address alias, Address addr) {
+  CodeTree::Locator locator;
+  if (tree_.Find(addr, &locator)) {
+    const CodeEntryInfo& entry_info = locator.value();
+    tree_.Insert(alias, &locator);
+    locator.set_value(entry_info);
+  }
+}
+
+
+CodeEntry* CodeMap::FindEntry(Address addr) {
+  CodeTree::Locator locator;
+  if (tree_.FindGreatestLessThan(addr, &locator)) {
+    // locator.key() <= addr. Need to check that addr is within entry.
+    const CodeEntryInfo& entry = locator.value();
+    if (addr < (locator.key() + entry.size))
+      return entry.entry;
+  }
+  return NULL;
+}
+
+
+void CodeMap::CodeTreePrinter::Call(
+    const Address& key, const CodeMap::CodeEntryInfo& value) {
+  OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
+}
+
+
+void CodeMap::Print() {
+  CodeTreePrinter printer;
+  tree_.ForEach(&printer);
+}
+
+
+CpuProfilesCollection::CpuProfilesCollection()
+    : function_and_resource_names_(StringsMatch),
+      profiles_uids_(CpuProfilesMatch),
+      current_profiles_semaphore_(OS::CreateSemaphore(1)) {
+}
+
+
+static void DeleteArgsCountName(char** name_ptr) {
+  DeleteArray(*name_ptr);
+}
+
+
+static void DeleteCodeEntry(CodeEntry** entry_ptr) {
+  delete *entry_ptr;
+}
+
+static void DeleteCpuProfile(CpuProfile** profile_ptr) {
+  delete *profile_ptr;
+}
+
+
+CpuProfilesCollection::~CpuProfilesCollection() {
+  delete current_profiles_semaphore_;
+  current_profiles_.Iterate(DeleteCpuProfile);
+  profiles_.Iterate(DeleteCpuProfile);
+  code_entries_.Iterate(DeleteCodeEntry);
+  args_count_names_.Iterate(DeleteArgsCountName);
+  for (HashMap::Entry* p = function_and_resource_names_.Start();
+       p != NULL;
+       p = function_and_resource_names_.Next(p)) {
+    DeleteArray(reinterpret_cast<const char*>(p->value));
+  }
+}
+
+
+bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid) {
+  ASSERT(uid > 0);
+  current_profiles_semaphore_->Wait();
+  for (int i = 0; i < current_profiles_.length(); ++i) {
+    if (strcmp(current_profiles_[i]->title(), title) == 0) {
+      // Ignore attempts to start profile with the same title.
+      current_profiles_semaphore_->Signal();
+      return false;
+    }
+  }
+  current_profiles_.Add(new CpuProfile(title, uid));
+  current_profiles_semaphore_->Signal();
+  return true;
+}
+
+
+bool CpuProfilesCollection::StartProfiling(String* title, unsigned uid) {
+  return StartProfiling(GetName(title), uid);
+}
+
+
+CpuProfile* CpuProfilesCollection::StopProfiling(const char* title,
+                                                 double actual_sampling_rate) {
+  const int title_len = StrLength(title);
+  CpuProfile* profile = NULL;
+  current_profiles_semaphore_->Wait();
+  for (int i = current_profiles_.length() - 1; i >= 0; --i) {
+    if (title_len == 0 || strcmp(current_profiles_[i]->title(), title) == 0) {
+      profile = current_profiles_.Remove(i);
+      break;
+    }
+  }
+  current_profiles_semaphore_->Signal();
+
+  if (profile != NULL) {
+    profile->CalculateTotalTicks();
+    profile->SetActualSamplingRate(actual_sampling_rate);
+    profiles_.Add(profile);
+    HashMap::Entry* entry =
+        profiles_uids_.Lookup(reinterpret_cast<void*>(profile->uid()),
+                              static_cast<uint32_t>(profile->uid()),
+                              true);
+    ASSERT(entry->value == NULL);
+    entry->value = profile;
+  }
+  return profile;
+}
+
+
+CpuProfile* CpuProfilesCollection::StopProfiling(String* title,
+                                                 double actual_sampling_rate) {
+  return StopProfiling(GetName(title), actual_sampling_rate);
+}
+
+
+CpuProfile* CpuProfilesCollection::GetProfile(unsigned uid) {
+  HashMap::Entry* entry = profiles_uids_.Lookup(reinterpret_cast<void*>(uid),
+                                                static_cast<uint32_t>(uid),
+                                                false);
+  return entry != NULL ? reinterpret_cast<CpuProfile*>(entry->value) : NULL;
+}
+
+
+CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
+                                               String* name,
+                                               String* resource_name,
+                                               int line_number) {
+  CodeEntry* entry = new CodeEntry(tag,
+                                   CodeEntry::kEmptyNamePrefix,
+                                   GetFunctionName(name),
+                                   GetName(resource_name),
+                                   line_number);
+  code_entries_.Add(entry);
+  return entry;
+}
+
+
+CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
+                                               const char* name) {
+  CodeEntry* entry = new CodeEntry(tag,
+                                   CodeEntry::kEmptyNamePrefix,
+                                   GetFunctionName(name),
+                                   "",
+                                   v8::CpuProfileNode::kNoLineNumberInfo);
+  code_entries_.Add(entry);
+  return entry;
+}
+
+
+CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
+                                               const char* name_prefix,
+                                               String* name) {
+  CodeEntry* entry = new CodeEntry(tag,
+                                   name_prefix,
+                                   GetName(name),
+                                   "",
+                                   v8::CpuProfileNode::kNoLineNumberInfo);
+  code_entries_.Add(entry);
+  return entry;
+}
+
+
+CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
+                                               int args_count) {
+  CodeEntry* entry = new CodeEntry(tag,
+                                   "args_count: ",
+                                   GetName(args_count),
+                                   "",
+                                   v8::CpuProfileNode::kNoLineNumberInfo);
+  code_entries_.Add(entry);
+  return entry;
+}
+
+
+const char* CpuProfilesCollection::GetName(String* name) {
+  if (name->IsString()) {
+    char* c_name =
+        name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL).Detach();
+    HashMap::Entry* cache_entry =
+        function_and_resource_names_.Lookup(c_name,
+                                            name->Hash(),
+                                            true);
+    if (cache_entry->value == NULL) {
+      // New entry added.
+      cache_entry->value = c_name;
+    } else {
+      DeleteArray(c_name);
+    }
+    return reinterpret_cast<const char*>(cache_entry->value);
+  } else {
+    return "";
+  }
+}
+
+
+const char* CpuProfilesCollection::GetName(int args_count) {
+  ASSERT(args_count >= 0);
+  if (args_count_names_.length() <= args_count) {
+    args_count_names_.AddBlock(
+        NULL, args_count - args_count_names_.length() + 1);
+  }
+  if (args_count_names_[args_count] == NULL) {
+    const int kMaximumNameLength = 32;
+    char* name = NewArray<char>(kMaximumNameLength);
+    OS::SNPrintF(Vector<char>(name, kMaximumNameLength), "%d", args_count);
+    args_count_names_[args_count] = name;
+  }
+  return args_count_names_[args_count];
+}
+
+
+void CpuProfilesCollection::AddPathToCurrentProfiles(
+    const Vector<CodeEntry*>& path) {
+  // As starting / stopping profiles is rare relatively to this
+  // method, we don't bother minimizing the duration of lock holding,
+  // e.g. copying contents of the list to a local vector.
+  current_profiles_semaphore_->Wait();
+  for (int i = 0; i < current_profiles_.length(); ++i) {
+    current_profiles_[i]->AddPath(path);
+  }
+  current_profiles_semaphore_->Signal();
+}
+
+
+void SampleRateCalculator::Tick() {
+  if (--wall_time_query_countdown_ == 0)
+    UpdateMeasurements(OS::TimeCurrentMillis());
+}
+
+
+void SampleRateCalculator::UpdateMeasurements(double current_time) {
+  if (measurements_count_++ != 0) {
+    const double measured_ticks_per_ms =
+        (kWallTimeQueryIntervalMs * ticks_per_ms_) /
+        (current_time - last_wall_time_);
+    // Update the average value.
+    ticks_per_ms_ +=
+        (measured_ticks_per_ms - ticks_per_ms_) / measurements_count_;
+    // Update the externally accessible result.
+    result_ = static_cast<AtomicWord>(ticks_per_ms_ * kResultScale);
+  }
+  last_wall_time_ = current_time;
+  wall_time_query_countdown_ =
+      static_cast<unsigned>(kWallTimeQueryIntervalMs * ticks_per_ms_);
+}
+
+
+const char* ProfileGenerator::kAnonymousFunctionName = "(anonymous function)";
+const char* ProfileGenerator::kProgramEntryName = "(program)";
+const char* ProfileGenerator::kGarbageCollectorEntryName =
+  "(garbage collector)";
+
+
+ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
+    : profiles_(profiles),
+      program_entry_(
+          profiles->NewCodeEntry(Logger::FUNCTION_TAG, kProgramEntryName)),
+      gc_entry_(
+          profiles->NewCodeEntry(Logger::BUILTIN_TAG,
+                                 kGarbageCollectorEntryName)) {
+}
+
+
+void ProfileGenerator::RecordTickSample(const TickSample& sample) {
+  // Allocate space for stack frames + pc + function + vm-state.
+  ScopedVector<CodeEntry*> entries(sample.frames_count + 3);
+  // As actual number of decoded code entries may vary, initialize
+  // entries vector with NULL values.
+  CodeEntry** entry = entries.start();
+  memset(entry, 0, entries.length() * sizeof(*entry));
+  if (sample.pc != NULL) {
+    *entry++ = code_map_.FindEntry(sample.pc);
+
+    if (sample.function != NULL) {
+      *entry = code_map_.FindEntry(sample.function);
+      if (*entry != NULL && !(*entry)->is_js_function()) {
+        *entry = NULL;
+      } else {
+        CodeEntry* pc_entry = *entries.start();
+        if (pc_entry == NULL || pc_entry->is_js_function())
+          *entry = NULL;
+      }
+      entry++;
+    }
+
+    for (const Address *stack_pos = sample.stack,
+           *stack_end = stack_pos + sample.frames_count;
+         stack_pos != stack_end;
+         ++stack_pos) {
+      *entry++ = code_map_.FindEntry(*stack_pos);
+    }
+  }
+
+  if (FLAG_prof_browser_mode) {
+    bool no_symbolized_entries = true;
+    for (CodeEntry** e = entries.start(); e != entry; ++e) {
+      if (*e != NULL) {
+        no_symbolized_entries = false;
+        break;
+      }
+    }
+    // If no frames were symbolized, put the VM state entry in.
+    if (no_symbolized_entries) {
+      *entry++ = EntryForVMState(sample.state);
+    }
+  }
+
+  profiles_->AddPathToCurrentProfiles(entries);
+}
+
+} }  // namespace v8::internal
+
+#endif  // ENABLE_LOGGING_AND_PROFILING
diff --git a/src/profile-generator.h b/src/profile-generator.h
new file mode 100644
index 0000000..bd5b0cd
--- /dev/null
+++ b/src/profile-generator.h
@@ -0,0 +1,364 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PROFILE_GENERATOR_H_
+#define V8_PROFILE_GENERATOR_H_
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+#include "hashmap.h"
+
+namespace v8 {
+namespace internal {
+
+class CodeEntry {
+ public:
+  // CodeEntry doesn't own name strings, just references them.
+  INLINE(CodeEntry(Logger::LogEventsAndTags tag,
+                   const char* name_prefix,
+                   const char* name,
+                   const char* resource_name,
+                   int line_number));
+
+  INLINE(bool is_js_function() const) { return is_js_function_tag(tag_); }
+  INLINE(const char* name_prefix() const) { return name_prefix_; }
+  INLINE(bool has_name_prefix() const) { return name_prefix_[0] != '\0'; }
+  INLINE(const char* name() const) { return name_; }
+  INLINE(const char* resource_name() const) { return resource_name_; }
+  INLINE(int line_number() const) { return line_number_; }
+  INLINE(unsigned call_uid() const) { return call_uid_; }
+
+  INLINE(static bool is_js_function_tag(Logger::LogEventsAndTags tag));
+
+  static const char* kEmptyNamePrefix;
+
+ private:
+  const unsigned call_uid_;
+  Logger::LogEventsAndTags tag_;
+  const char* name_prefix_;
+  const char* name_;
+  const char* resource_name_;
+  int line_number_;
+
+  static unsigned next_call_uid_;
+
+  DISALLOW_COPY_AND_ASSIGN(CodeEntry);
+};
+
+
+class ProfileTree;
+
+class ProfileNode {
+ public:
+  INLINE(ProfileNode(ProfileTree* tree, CodeEntry* entry));
+
+  ProfileNode* FindChild(CodeEntry* entry);
+  ProfileNode* FindOrAddChild(CodeEntry* entry);
+  INLINE(void IncrementSelfTicks()) { ++self_ticks_; }
+  INLINE(void IncreaseTotalTicks(unsigned amount)) { total_ticks_ += amount; }
+
+  INLINE(CodeEntry* entry() const) { return entry_; }
+  INLINE(unsigned self_ticks() const) { return self_ticks_; }
+  INLINE(unsigned total_ticks() const) { return total_ticks_; }
+  INLINE(const List<ProfileNode*>* children() const) { return &children_list_; }
+  double GetSelfMillis() const;
+  double GetTotalMillis() const;
+
+  void Print(int indent);
+
+ private:
+  INLINE(static bool CodeEntriesMatch(void* entry1, void* entry2)) {
+    return entry1 == entry2;
+  }
+
+  INLINE(static uint32_t CodeEntryHash(CodeEntry* entry)) {
+    return static_cast<int32_t>(reinterpret_cast<intptr_t>(entry));
+  }
+
+  ProfileTree* tree_;
+  CodeEntry* entry_;
+  unsigned total_ticks_;
+  unsigned self_ticks_;
+  // CodeEntry* -> ProfileNode*
+  HashMap children_;
+  List<ProfileNode*> children_list_;
+
+  DISALLOW_COPY_AND_ASSIGN(ProfileNode);
+};
+
+
+class ProfileTree {
+ public:
+  ProfileTree();
+  ~ProfileTree();
+
+  void AddPathFromEnd(const Vector<CodeEntry*>& path);
+  void AddPathFromStart(const Vector<CodeEntry*>& path);
+  void CalculateTotalTicks();
+
+  double TicksToMillis(unsigned ticks) const {
+    return ticks * ms_to_ticks_scale_;
+  }
+  ProfileNode* root() const { return root_; }
+  void SetTickRatePerMs(double ticks_per_ms);
+
+  void ShortPrint();
+  void Print() {
+    root_->Print(0);
+  }
+
+ private:
+  template <typename Callback>
+  void TraverseDepthFirstPostOrder(Callback* callback);
+
+  CodeEntry root_entry_;
+  ProfileNode* root_;
+  double ms_to_ticks_scale_;
+
+  DISALLOW_COPY_AND_ASSIGN(ProfileTree);
+};
+
+
+class CpuProfile {
+ public:
+  CpuProfile(const char* title, unsigned uid)
+      : title_(title), uid_(uid) { }
+
+  // Add pc -> ... -> main() call path to the profile.
+  void AddPath(const Vector<CodeEntry*>& path);
+  void CalculateTotalTicks();
+  void SetActualSamplingRate(double actual_sampling_rate);
+
+  INLINE(const char* title() const) { return title_; }
+  INLINE(unsigned uid() const) { return uid_; }
+  INLINE(const ProfileTree* top_down() const) { return &top_down_; }
+  INLINE(const ProfileTree* bottom_up() const) { return &bottom_up_; }
+
+  void UpdateTicksScale();
+
+  void ShortPrint();
+  void Print();
+
+ private:
+  const char* title_;
+  unsigned uid_;
+  ProfileTree top_down_;
+  ProfileTree bottom_up_;
+
+  DISALLOW_COPY_AND_ASSIGN(CpuProfile);
+};
+
+
+class CodeMap {
+ public:
+  CodeMap() { }
+  INLINE(void AddCode(Address addr, CodeEntry* entry, unsigned size));
+  INLINE(void MoveCode(Address from, Address to));
+  INLINE(void DeleteCode(Address addr));
+  void AddAlias(Address alias, Address addr);
+  CodeEntry* FindEntry(Address addr);
+
+  void Print();
+
+ private:
+  struct CodeEntryInfo {
+    CodeEntryInfo(CodeEntry* an_entry, unsigned a_size)
+        : entry(an_entry), size(a_size) { }
+    CodeEntry* entry;
+    unsigned size;
+  };
+
+  struct CodeTreeConfig {
+    typedef Address Key;
+    typedef CodeEntryInfo Value;
+    static const Key kNoKey;
+    static const Value kNoValue;
+    static int Compare(const Key& a, const Key& b) {
+      return a < b ? -1 : (a > b ? 1 : 0);
+    }
+  };
+  typedef SplayTree<CodeTreeConfig> CodeTree;
+
+  class CodeTreePrinter {
+   public:
+    void Call(const Address& key, const CodeEntryInfo& value);
+  };
+
+  CodeTree tree_;
+
+  DISALLOW_COPY_AND_ASSIGN(CodeMap);
+};
+
+
+class CpuProfilesCollection {
+ public:
+  CpuProfilesCollection();
+  ~CpuProfilesCollection();
+
+  bool StartProfiling(const char* title, unsigned uid);
+  bool StartProfiling(String* title, unsigned uid);
+  CpuProfile* StopProfiling(const char* title, double actual_sampling_rate);
+  CpuProfile* StopProfiling(String* title, double actual_sampling_rate);
+  INLINE(List<CpuProfile*>* profiles()) { return &profiles_; }
+  CpuProfile* GetProfile(unsigned uid);
+  inline bool is_last_profile();
+
+  CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
+                          String* name, String* resource_name, int line_number);
+  CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, const char* name);
+  CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
+                          const char* name_prefix, String* name);
+  CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, int args_count);
+
+  // Called from profile generator thread.
+  void AddPathToCurrentProfiles(const Vector<CodeEntry*>& path);
+
+ private:
+  INLINE(const char* GetFunctionName(String* name));
+  INLINE(const char* GetFunctionName(const char* name));
+  const char* GetName(String* name);
+  const char* GetName(int args_count);
+
+  INLINE(static bool StringsMatch(void* key1, void* key2)) {
+    return strcmp(reinterpret_cast<char*>(key1),
+                  reinterpret_cast<char*>(key2)) == 0;
+  }
+
+  INLINE(static bool CpuProfilesMatch(void* key1, void* key2)) {
+    return key1 == key2;
+  }
+
+  // String::Hash -> const char*
+  HashMap function_and_resource_names_;
+  // args_count -> char*
+  List<char*> args_count_names_;
+  List<CodeEntry*> code_entries_;
+  List<CpuProfile*> profiles_;
+  // uid -> CpuProfile*
+  HashMap profiles_uids_;
+
+  // Accessed by VM thread and profile generator thread.
+  List<CpuProfile*> current_profiles_;
+  Semaphore* current_profiles_semaphore_;
+
+  DISALLOW_COPY_AND_ASSIGN(CpuProfilesCollection);
+};
+
+
+class SampleRateCalculator {
+ public:
+  SampleRateCalculator()
+      : result_(Logger::kSamplingIntervalMs * kResultScale),
+        ticks_per_ms_(Logger::kSamplingIntervalMs),
+        measurements_count_(0),
+        wall_time_query_countdown_(1) {
+  }
+
+  double ticks_per_ms() {
+    return result_ / static_cast<double>(kResultScale);
+  }
+  void Tick();
+  void UpdateMeasurements(double current_time);
+
+  // Instead of querying current wall time each tick,
+  // we use this constant to control query intervals.
+  static const unsigned kWallTimeQueryIntervalMs = 100;
+
+ private:
+  // As the result needs to be accessed from a different thread, we
+  // use type that guarantees atomic writes to memory.  There should
+  // be <= 1000 ticks per second, thus storing a value of a 10 ** 5
+  // order should provide enough precision while keeping away from a
+  // potential overflow.
+  static const int kResultScale = 100000;
+
+  AtomicWord result_;
+  // All other fields are accessed only from the sampler thread.
+  double ticks_per_ms_;
+  unsigned measurements_count_;
+  unsigned wall_time_query_countdown_;
+  double last_wall_time_;
+};
+
+
+class ProfileGenerator {
+ public:
+  explicit ProfileGenerator(CpuProfilesCollection* profiles);
+
+  INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
+                                 String* name,
+                                 String* resource_name,
+                                 int line_number)) {
+    return profiles_->NewCodeEntry(tag, name, resource_name, line_number);
+  }
+
+  INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
+                                 const char* name)) {
+    return profiles_->NewCodeEntry(tag, name);
+  }
+
+  INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
+                                 const char* name_prefix,
+                                 String* name)) {
+    return profiles_->NewCodeEntry(tag, name_prefix, name);
+  }
+
+  INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
+                                 int args_count)) {
+    return profiles_->NewCodeEntry(tag, args_count);
+  }
+
+  void RecordTickSample(const TickSample& sample);
+
+  INLINE(CodeMap* code_map()) { return &code_map_; }
+
+  INLINE(void Tick()) { sample_rate_calc_.Tick(); }
+  INLINE(double actual_sampling_rate()) {
+    return sample_rate_calc_.ticks_per_ms();
+  }
+
+  static const char* kAnonymousFunctionName;
+  static const char* kProgramEntryName;
+  static const char* kGarbageCollectorEntryName;
+
+ private:
+  INLINE(CodeEntry* EntryForVMState(StateTag tag));
+
+  CpuProfilesCollection* profiles_;
+  CodeMap code_map_;
+  CodeEntry* program_entry_;
+  CodeEntry* gc_entry_;
+  SampleRateCalculator sample_rate_calc_;
+
+  DISALLOW_COPY_AND_ASSIGN(ProfileGenerator);
+};
+
+} }  // namespace v8::internal
+
+#endif  // ENABLE_LOGGING_AND_PROFILING
+
+#endif  // V8_PROFILE_GENERATOR_H_
diff --git a/src/regexp-macro-assembler-irregexp-inl.h b/src/regexp-macro-assembler-irregexp-inl.h
index b487468..f2a4e85 100644
--- a/src/regexp-macro-assembler-irregexp-inl.h
+++ b/src/regexp-macro-assembler-irregexp-inl.h
@@ -38,7 +38,7 @@
 namespace v8 {
 namespace internal {
 
-#ifndef V8_NATIVE_REGEXP
+#ifdef V8_INTERPRETED_REGEXP
 
 void RegExpMacroAssemblerIrregexp::Emit(uint32_t byte,
                                         uint32_t twenty_four_bits) {
@@ -71,7 +71,7 @@
   pc_ += 4;
 }
 
-#endif  // ! V8_NATIVE_REGEXP
+#endif  // V8_INTERPRETED_REGEXP
 
 } }  // namespace v8::internal
 
diff --git a/src/regexp-macro-assembler-irregexp.cc b/src/regexp-macro-assembler-irregexp.cc
index f9c7eee..90abe91 100644
--- a/src/regexp-macro-assembler-irregexp.cc
+++ b/src/regexp-macro-assembler-irregexp.cc
@@ -36,7 +36,7 @@
 namespace v8 {
 namespace internal {
 
-#ifndef V8_NATIVE_REGEXP
+#ifdef V8_INTERPRETED_REGEXP
 
 RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Vector<byte> buffer)
     : buffer_(buffer),
@@ -459,6 +459,6 @@
   }
 }
 
-#endif  // !V8_NATIVE_REGEXP
+#endif  // V8_INTERPRETED_REGEXP
 
 } }  // namespace v8::internal
diff --git a/src/regexp-macro-assembler-irregexp.h b/src/regexp-macro-assembler-irregexp.h
index 642a283..3ddbc2f 100644
--- a/src/regexp-macro-assembler-irregexp.h
+++ b/src/regexp-macro-assembler-irregexp.h
@@ -31,7 +31,7 @@
 namespace v8 {
 namespace internal {
 
-#ifndef V8_NATIVE_REGEXP
+#ifdef V8_INTERPRETED_REGEXP
 
 class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
  public:
@@ -134,7 +134,7 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpMacroAssemblerIrregexp);
 };
 
-#endif  // !V8_NATIVE_REGEXP
+#endif  // V8_INTERPRETED_REGEXP
 
 } }  // namespace v8::internal
 
diff --git a/src/regexp-macro-assembler-tracer.cc b/src/regexp-macro-assembler-tracer.cc
index c5c2919..c08602e 100644
--- a/src/regexp-macro-assembler-tracer.cc
+++ b/src/regexp-macro-assembler-tracer.cc
@@ -37,8 +37,8 @@
     RegExpMacroAssembler* assembler) :
   assembler_(assembler) {
   unsigned int type = assembler->Implementation();
-  ASSERT(type < 3);
-  const char* impl_names[3] = {"IA32", "ARM", "Bytecode"};
+  ASSERT(type < 4);
+  const char* impl_names[4] = {"IA32", "ARM", "X64", "Bytecode"};
   PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
 }
 
diff --git a/src/regexp-macro-assembler.cc b/src/regexp-macro-assembler.cc
index 0fcfc33..fc65947 100644
--- a/src/regexp-macro-assembler.cc
+++ b/src/regexp-macro-assembler.cc
@@ -52,7 +52,7 @@
 }
 
 
-#ifdef V8_NATIVE_REGEXP  // Avoid unused code, e.g., on ARM.
+#ifndef V8_INTERPRETED_REGEXP  // Avoid unused code, e.g., on ARM.
 
 NativeRegExpMacroAssembler::NativeRegExpMacroAssembler() {
 }
@@ -258,5 +258,6 @@
   return new_stack_base - stack_content_size;
 }
 
-#endif  // V8_NATIVE_REGEXP
+#endif  // V8_INTERPRETED_REGEXP
+
 } }  // namespace v8::internal
diff --git a/src/regexp-macro-assembler.h b/src/regexp-macro-assembler.h
index 105d8cc..9f8e2c5 100644
--- a/src/regexp-macro-assembler.h
+++ b/src/regexp-macro-assembler.h
@@ -161,7 +161,7 @@
 };
 
 
-#ifdef V8_NATIVE_REGEXP  // Avoid compiling unused code.
+#ifndef V8_INTERPRETED_REGEXP  // Avoid compiling unused code.
 
 class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
  public:
@@ -221,7 +221,7 @@
                         int* output);
 };
 
-#endif  // V8_NATIVE_REGEXP
+#endif  // V8_INTERPRETED_REGEXP
 
 } }  // namespace v8::internal
 
diff --git a/src/regexp.js b/src/regexp.js
index 7bec455..24e3309 100644
--- a/src/regexp.js
+++ b/src/regexp.js
@@ -71,31 +71,10 @@
     }
   }
 
-  if (isConstructorCall) {
-    // ECMA-262, section 15.10.7.1.
-    %SetProperty(object, 'source', pattern,
-                 DONT_DELETE |  READ_ONLY | DONT_ENUM);
-
-    // ECMA-262, section 15.10.7.2.
-    %SetProperty(object, 'global', global, DONT_DELETE | READ_ONLY | DONT_ENUM);
-
-    // ECMA-262, section 15.10.7.3.
-    %SetProperty(object, 'ignoreCase', ignoreCase,
-                 DONT_DELETE | READ_ONLY | DONT_ENUM);
-
-    // ECMA-262, section 15.10.7.4.
-    %SetProperty(object, 'multiline', multiline,
-                 DONT_DELETE | READ_ONLY | DONT_ENUM);
-
-    // ECMA-262, section 15.10.7.5.
-    %SetProperty(object, 'lastIndex', 0, DONT_DELETE | DONT_ENUM);
-  } else { // RegExp is being recompiled via RegExp.prototype.compile.
-    %IgnoreAttributesAndSetProperty(object, 'source', pattern);
-    %IgnoreAttributesAndSetProperty(object, 'global', global);
-    %IgnoreAttributesAndSetProperty(object, 'ignoreCase', ignoreCase);
-    %IgnoreAttributesAndSetProperty(object, 'multiline', multiline);
-    %IgnoreAttributesAndSetProperty(object, 'lastIndex', 0);
+  if (!isConstructorCall) {
+    regExpCache.type = 'none';
   }
+  %RegExpInitializeObject(object, pattern, global, ignoreCase, multiline);
 
   // Call internal function to compile the pattern.
   %RegExpCompile(object, pattern, flags);
@@ -136,15 +115,94 @@
 
 
 function DoRegExpExec(regexp, string, index) {
-  return %_RegExpExec(regexp, string, index, lastMatchInfo);
+  var result = %_RegExpExec(regexp, string, index, lastMatchInfo);
+  if (result !== null) lastMatchInfoOverride = null;
+  return result;
+}
+
+
+function RegExpCache() {
+  this.type = 'none';
+  this.regExp = 0;
+  this.subject = 0;
+  this.replaceString = 0;
+  this.lastIndex = 0;
+  this.answer = 0;
+  // answerSaved marks whether the contents of answer is valid for a cache
+  // hit in RegExpExec, StringMatch and StringSplit.
+  this.answerSaved = false;
+}
+
+
+var regExpCache = new RegExpCache();
+
+
+function CloneRegExpResult(array) {
+  if (array == null) return null;
+  var length = array.length;
+  var answer = %_RegExpConstructResult(length, array.index, array.input);
+  for (var i = 0; i < length; i++) {
+    answer[i] = array[i];
+  }
+  return answer;
+}
+
+
+function BuildResultFromMatchInfo(lastMatchInfo, s) {
+  var numResults = NUMBER_OF_CAPTURES(lastMatchInfo) >> 1;
+  var result = %_RegExpConstructResult(numResults, lastMatchInfo[CAPTURE0], s);
+  if (numResults === 1) {
+    var matchStart = lastMatchInfo[CAPTURE(0)];
+    var matchEnd = lastMatchInfo[CAPTURE(1)];
+    result[0] = SubString(s, matchStart, matchEnd);
+  } else {
+    for (var i = 0; i < numResults; i++) {
+      var matchStart = lastMatchInfo[CAPTURE(i << 1)];
+      var matchEnd = lastMatchInfo[CAPTURE((i << 1) + 1)];
+      if (matchStart != -1 && matchEnd != -1) {
+        result[i] = SubString(s, matchStart, matchEnd);
+      } else {
+        // Make sure the element is present. Avoid reading the undefined
+        // property from the global object since this may change.
+        result[i] = void 0;
+      }
+    }
+  }
+  return result;
+}
+
+
+function RegExpExecNoTests(regexp, string, start) {
+  // Must be called with RegExp, string and positive integer as arguments.
+  var matchInfo = DoRegExpExec(regexp, string, start);
+  var result = null;
+  if (matchInfo !== null) {
+    result = BuildResultFromMatchInfo(matchInfo, string);
+  }
+  return result;
 }
 
 
 function RegExpExec(string) {
   if (!IS_REGEXP(this)) {
-    throw MakeTypeError('method_called_on_incompatible',
+    throw MakeTypeError('incompatible_method_receiver',
                         ['RegExp.prototype.exec', this]);
   }
+
+  var cache = regExpCache;
+  var saveAnswer = false;
+
+  if (%_ObjectEquals(cache.type, 'exec') &&
+      %_ObjectEquals(cache.lastIndex, this.lastIndex) &&
+      %_ObjectEquals(cache.regExp, this) &&
+      %_ObjectEquals(cache.subject, string)) {
+    if (cache.answerSaved) {
+      return CloneRegExpResult(cache.answer);
+    } else {
+      saveAnswer = true;
+    }
+  }
+
   if (%_ArgumentsLength() == 0) {
     var regExpInput = LAST_INPUT(lastMatchInfo);
     if (IS_UNDEFINED(regExpInput)) {
@@ -152,9 +210,14 @@
     }
     string = regExpInput;
   }
-  var s = ToString(string);
-  var length = s.length;
+  var s;
+  if (IS_STRING(string)) {
+    s = string;
+  } else {
+    s = ToString(string);
+  }
   var lastIndex = this.lastIndex;
+
   var i = this.global ? TO_INTEGER(lastIndex) : 0;
 
   if (i < 0 || i > s.length) {
@@ -168,28 +231,29 @@
 
   if (matchIndices == null) {
     if (this.global) this.lastIndex = 0;
-    return matchIndices; // no match
+    cache.lastIndex = lastIndex;
+    cache.regExp = this;
+    cache.subject = s;
+    cache.answer = matchIndices;  // Null.
+    cache.answerSaved = true;     // Safe since no cloning is needed.
+    cache.type = 'exec';
+    return matchIndices;        // No match.
   }
+  lastMatchInfoOverride = null;
+  var result = BuildResultFromMatchInfo(matchIndices, s);
 
-  var numResults = NUMBER_OF_CAPTURES(lastMatchInfo) >> 1;
-  var result = new $Array(numResults);
-  for (var i = 0; i < numResults; i++) {
-    var matchStart = lastMatchInfo[CAPTURE(i << 1)];
-    var matchEnd = lastMatchInfo[CAPTURE((i << 1) + 1)];
-    if (matchStart != -1 && matchEnd != -1) {
-      result[i] = SubString(s, matchStart, matchEnd);
-    } else {
-      // Make sure the element is present. Avoid reading the undefined
-      // property from the global object since this may change.
-      result[i] = void 0;
-    }
-  }
-
-  if (this.global)
+  if (this.global) {
     this.lastIndex = lastMatchInfo[CAPTURE1];
-  result.index = lastMatchInfo[CAPTURE0];
-  result.input = s;
+  } else {
+    cache.regExp = this;
+    cache.subject = s;
+    cache.lastIndex = lastIndex;
+    if (saveAnswer) cache.answer = CloneRegExpResult(result);
+    cache.answerSaved = saveAnswer;
+    cache.type = 'exec';
+  }
   return result;
+
 }
 
 
@@ -199,7 +263,7 @@
 // else implements.
 function RegExpTest(string) {
   if (!IS_REGEXP(this)) {
-    throw MakeTypeError('method_called_on_incompatible',
+    throw MakeTypeError('incompatible_method_receiver',
                         ['RegExp.prototype.test', this]);
   }
   if (%_ArgumentsLength() == 0) {
@@ -209,13 +273,35 @@
     }
     string = regExpInput;
   }
-  var s = ToString(string);
-  var length = s.length;
+  var s;
+  if (IS_STRING(string)) {
+    s = string;
+  } else {
+    s = ToString(string);
+  }
+
   var lastIndex = this.lastIndex;
+
+  var cache = regExpCache;
+
+  if (%_ObjectEquals(cache.type, 'test') &&
+      %_ObjectEquals(cache.regExp, this) &&
+      %_ObjectEquals(cache.subject, string) &&
+      %_ObjectEquals(cache.lastIndex, lastIndex)) {
+    return cache.answer;
+  }
+
+  var length = s.length;
   var i = this.global ? TO_INTEGER(lastIndex) : 0;
 
+  cache.type = 'test';
+  cache.regExp = this;
+  cache.subject = s;
+  cache.lastIndex = i;
+
   if (i < 0 || i > s.length) {
     this.lastIndex = 0;
+    cache.answer = false;
     return false;
   }
 
@@ -225,10 +311,12 @@
 
   if (matchIndices == null) {
     if (this.global) this.lastIndex = 0;
+    cache.answer = false;
     return false;
   }
-
+  lastMatchInfoOverride = null;
   if (this.global) this.lastIndex = lastMatchInfo[CAPTURE1];
+  cache.answer = true;
   return true;
 }
 
@@ -254,6 +342,9 @@
 // on the captures array of the last successful match and the subject string
 // of the last successful match.
 function RegExpGetLastMatch() {
+  if (lastMatchInfoOverride !== null) {
+    return lastMatchInfoOverride[0];
+  }
   var regExpSubject = LAST_SUBJECT(lastMatchInfo);
   return SubString(regExpSubject,
                    lastMatchInfo[CAPTURE0],
@@ -262,6 +353,11 @@
 
 
 function RegExpGetLastParen() {
+  if (lastMatchInfoOverride) {
+    var override = lastMatchInfoOverride;
+    if (override.length <= 3) return '';
+    return override[override.length - 3];
+  }
   var length = NUMBER_OF_CAPTURES(lastMatchInfo);
   if (length <= 2) return '';  // There were no captures.
   // We match the SpiderMonkey behavior: return the substring defined by the
@@ -278,17 +374,32 @@
 
 
 function RegExpGetLeftContext() {
-  return SubString(LAST_SUBJECT(lastMatchInfo),
-                   0,
-                   lastMatchInfo[CAPTURE0]);
+  var start_index;
+  var subject;
+  if (!lastMatchInfoOverride) {
+    start_index = lastMatchInfo[CAPTURE0];
+    subject = LAST_SUBJECT(lastMatchInfo);
+  } else {
+    var override = lastMatchInfoOverride;
+    start_index = override[override.length - 2];
+    subject = override[override.length - 1];
+  }
+  return SubString(subject, 0, start_index);
 }
 
 
 function RegExpGetRightContext() {
-  var subject = LAST_SUBJECT(lastMatchInfo);
-  return SubString(subject,
-                   lastMatchInfo[CAPTURE1],
-                   subject.length);
+  var start_index;
+  var subject;
+  if (!lastMatchInfoOverride) {
+    start_index = lastMatchInfo[CAPTURE1];
+    subject = LAST_SUBJECT(lastMatchInfo);
+  } else {
+    var override = lastMatchInfoOverride;
+    subject = override[override.length - 1];
+    start_index = override[override.length - 2] + subject.length;
+  }
+  return SubString(subject, start_index, subject.length);
 }
 
 
@@ -297,6 +408,10 @@
 // called with indices from 1 to 9.
 function RegExpMakeCaptureGetter(n) {
   return function() {
+    if (lastMatchInfoOverride) {
+      if (n < lastMatchInfoOverride.length - 2) return lastMatchInfoOverride[n];
+      return '';
+    }
     var index = n * 2;
     if (index >= NUMBER_OF_CAPTURES(lastMatchInfo)) return '';
     var matchStart = lastMatchInfo[CAPTURE(index)];
@@ -321,6 +436,12 @@
     0,                 // REGEXP_FIRST_CAPTURE + 1
 ];
 
+// Override last match info with an array of actual substrings.
+// Used internally by replace regexp with function.
+// The array has the format of an "apply" argument for a replacement
+// function.
+var lastMatchInfoOverride = null;
+
 // -------------------------------------------------------------------
 
 function SetupRegExp() {
@@ -340,13 +461,14 @@
   %FunctionSetLength($RegExp.prototype.compile, 1);
 
   // The properties input, $input, and $_ are aliases for each other.  When this
-  // value is set the value it is set to is coerced to a string. 
+  // value is set the value it is set to is coerced to a string.
   // Getter and setter for the input.
   function RegExpGetInput() {
     var regExpInput = LAST_INPUT(lastMatchInfo);
     return IS_UNDEFINED(regExpInput) ? "" : regExpInput;
   }
   function RegExpSetInput(string) {
+    regExpCache.type = 'none';
     LAST_INPUT(lastMatchInfo) = ToString(string);
   };
 
diff --git a/src/register-allocator-inl.h b/src/register-allocator-inl.h
index a99f455..e0ea9e1 100644
--- a/src/register-allocator-inl.h
+++ b/src/register-allocator-inl.h
@@ -30,7 +30,6 @@
 
 #include "codegen.h"
 #include "register-allocator.h"
-#include "virtual-frame.h"
 
 #if V8_TARGET_ARCH_IA32
 #include "ia32/register-allocator-ia32-inl.h"
@@ -104,6 +103,39 @@
   registers_.Unuse(ToNumber(reg));
 }
 
+
+TypeInfo Result::type_info() const {
+  ASSERT(is_valid());
+  return TypeInfo::FromInt(TypeInfoField::decode(value_));
+}
+
+
+void Result::set_type_info(TypeInfo info) {
+  ASSERT(is_valid());
+  value_ &= ~TypeInfoField::mask();
+  value_ |= TypeInfoField::encode(info.ToInt());
+}
+
+
+bool Result::is_number() const {
+  return type_info().IsNumber();
+}
+
+
+bool Result::is_smi() const {
+  return type_info().IsSmi();
+}
+
+
+bool Result::is_integer32() const {
+  return type_info().IsInteger32();
+}
+
+
+bool Result::is_double() const {
+  return type_info().IsDouble();
+}
+
 } }  // namespace v8::internal
 
 #endif  // V8_REGISTER_ALLOCATOR_INL_H_
diff --git a/src/register-allocator.cc b/src/register-allocator.cc
index 349cc24..b9989a5 100644
--- a/src/register-allocator.cc
+++ b/src/register-allocator.cc
@@ -29,6 +29,7 @@
 
 #include "codegen-inl.h"
 #include "register-allocator-inl.h"
+#include "virtual-frame-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -37,11 +38,11 @@
 // Result implementation.
 
 
-Result::Result(Register reg, NumberInfo::Type info) {
+Result::Result(Register reg, TypeInfo info) {
   ASSERT(reg.is_valid() && !RegisterAllocator::IsReserved(reg));
   CodeGeneratorScope::Current()->allocator()->Use(reg);
   value_ = TypeField::encode(REGISTER)
-      | NumberInfoField::encode(info)
+      | TypeInfoField::encode(info.ToInt())
       | DataField::encode(reg.code_);
 }
 
@@ -52,23 +53,6 @@
 }
 
 
-NumberInfo::Type Result::number_info() {
-  ASSERT(is_valid());
-  if (!is_constant()) return NumberInfoField::decode(value_);
-  Handle<Object> value = handle();
-  if (value->IsSmi()) return NumberInfo::kSmi;
-  if (value->IsHeapNumber()) return NumberInfo::kHeapNumber;
-  return NumberInfo::kUnknown;
-}
-
-
-void Result::set_number_info(NumberInfo::Type info) {
-  ASSERT(is_valid());
-  value_ = value_ & ~NumberInfoField::mask();
-  value_ = value_ | NumberInfoField::encode(info);
-}
-
-
 // -------------------------------------------------------------------------
 // RegisterAllocator implementation.
 
diff --git a/src/register-allocator.h b/src/register-allocator.h
index 747200a..a03a9d2 100644
--- a/src/register-allocator.h
+++ b/src/register-allocator.h
@@ -29,7 +29,7 @@
 #define V8_REGISTER_ALLOCATOR_H_
 
 #include "macro-assembler.h"
-#include "number-info.h"
+#include "type-info.h"
 
 #if V8_TARGET_ARCH_IA32
 #include "ia32/register-allocator-ia32.h"
@@ -65,12 +65,14 @@
   Result() { invalidate(); }
 
   // Construct a register Result.
-  explicit Result(Register reg, NumberInfo::Type info = NumberInfo::kUnknown);
+  explicit Result(Register reg, TypeInfo info = TypeInfo::Unknown());
 
   // Construct a Result whose value is a compile-time constant.
   explicit Result(Handle<Object> value) {
+    TypeInfo info = TypeInfo::TypeFromValue(value);
     value_ = TypeField::encode(CONSTANT)
-        | NumberInfoField::encode(NumberInfo::kUninitialized)
+        | TypeInfoField::encode(info.ToInt())
+        | IsUntaggedInt32Field::encode(false)
         | DataField::encode(ConstantList()->length());
     ConstantList()->Add(value);
   }
@@ -101,18 +103,30 @@
 
   void invalidate() { value_ = TypeField::encode(INVALID); }
 
-  NumberInfo::Type number_info();
-  void set_number_info(NumberInfo::Type info);
-  bool is_number() {
-    return (number_info() & NumberInfo::kNumber) != 0;
-  }
-  bool is_smi() { return number_info() == NumberInfo::kSmi; }
-  bool is_heap_number() { return number_info() == NumberInfo::kHeapNumber; }
+  inline TypeInfo type_info() const;
+  inline void set_type_info(TypeInfo info);
+  inline bool is_number() const;
+  inline bool is_smi() const;
+  inline bool is_integer32() const;
+  inline bool is_double() const;
 
   bool is_valid() const { return type() != INVALID; }
   bool is_register() const { return type() == REGISTER; }
   bool is_constant() const { return type() == CONSTANT; }
 
+  // An untagged int32 Result contains a signed int32 in a register
+  // or as a constant.  These are only allowed in a side-effect-free
+  // int32 calculation, and if a non-int32 input shows up or an overflow
+  // occurs, we bail out and drop all the int32 values.  Constants are
+  // not converted to int32 until they are loaded into a register.
+  bool is_untagged_int32() const {
+    return IsUntaggedInt32Field::decode(value_);
+  }
+  void set_untagged_int32(bool value) {
+    value_ &= ~IsUntaggedInt32Field::mask();
+    value_ |= IsUntaggedInt32Field::encode(value);
+  }
+
   Register reg() const {
     ASSERT(is_register());
     uint32_t reg = DataField::decode(value_);
@@ -139,9 +153,11 @@
  private:
   uint32_t value_;
 
+  // Declare BitFields with template parameters <type, start, size>.
   class TypeField: public BitField<Type, 0, 2> {};
-  class NumberInfoField : public BitField<NumberInfo::Type, 2, 3> {};
-  class DataField: public BitField<uint32_t, 5, 32 - 5> {};
+  class TypeInfoField : public BitField<int, 2, 6> {};
+  class IsUntaggedInt32Field : public BitField<bool, 8, 1> {};
+  class DataField: public BitField<uint32_t, 9, 32 - 9> {};
 
   inline void CopyTo(Result* destination) const;
 
@@ -197,7 +213,11 @@
   }
 
  private:
-  static const int kNumRegisters = RegisterAllocatorConstants::kNumRegisters;
+  // C++ doesn't like zero length arrays, so we make the array length 1 even if
+  // we don't need it.
+  static const int kNumRegisters =
+      (RegisterAllocatorConstants::kNumRegisters == 0) ?
+      1 : RegisterAllocatorConstants::kNumRegisters;
 
   int ref_counts_[kNumRegisters];
 
diff --git a/src/rewriter.cc b/src/rewriter.cc
index b05cfae..c97408e 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -213,13 +213,14 @@
 }
 
 
-void AstOptimizer::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* node) {
+void AstOptimizer::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* node) {
   USE(node);
 }
 
 
 void AstOptimizer::VisitConditional(Conditional* node) {
+  node->condition()->set_no_negative_zero(true);
   Visit(node->condition());
   Visit(node->then_expression());
   Visit(node->else_expression());
@@ -244,6 +245,14 @@
         !Heap::result_symbol()->Equals(*var->name())) {
       func_name_inferrer_.PushName(var->name());
     }
+
+    if (FLAG_safe_int32_compiler) {
+      if (var->IsStackAllocated() &&
+          !var->is_arguments() &&
+          var->mode() != Variable::CONST) {
+        node->set_side_effect_free(true);
+      }
+    }
   }
 }
 
@@ -252,11 +261,21 @@
   Handle<Object> literal = node->handle();
   if (literal->IsSmi()) {
     node->type()->SetAsLikelySmi();
+    node->set_side_effect_free(true);
   } else if (literal->IsString()) {
     Handle<String> lit_str(Handle<String>::cast(literal));
     if (!Heap::prototype_symbol()->Equals(*lit_str)) {
       func_name_inferrer_.PushName(lit_str);
     }
+  } else if (literal->IsHeapNumber()) {
+    if (node->to_int32()) {
+      // Any HeapNumber has an int32 value if it is the input to a bit op.
+      node->set_side_effect_free(true);
+    } else {
+      double double_value = HeapNumber::cast(*literal)->value();
+      int32_t int32_value = DoubleToInt32(double_value);
+      node->set_side_effect_free(double_value == int32_value);
+    }
   }
 }
 
@@ -310,6 +329,8 @@
       node->type()->SetAsLikelySmiIfUnknown();
       node->target()->type()->SetAsLikelySmiIfUnknown();
       node->value()->type()->SetAsLikelySmiIfUnknown();
+      node->value()->set_to_int32(true);
+      node->value()->set_no_negative_zero(true);
       break;
     case Token::ASSIGN_ADD:
     case Token::ASSIGN_SUB:
@@ -384,6 +405,7 @@
 
 
 void AstOptimizer::VisitProperty(Property* node) {
+  node->key()->set_no_negative_zero(true);
   Visit(node->obj());
   Visit(node->key());
 }
@@ -413,12 +435,41 @@
 
 
 void AstOptimizer::VisitUnaryOperation(UnaryOperation* node) {
+  if (node->op() == Token::ADD || node->op() == Token::SUB) {
+    node->expression()->set_no_negative_zero(node->no_negative_zero());
+  } else {
+    node->expression()->set_no_negative_zero(true);
+  }
   Visit(node->expression());
+  if (FLAG_safe_int32_compiler) {
+    switch (node->op()) {
+      case Token::BIT_NOT:
+        node->expression()->set_to_int32(true);
+        // Fall through.
+      case Token::ADD:
+      case Token::SUB:
+        node->set_side_effect_free(node->expression()->side_effect_free());
+        break;
+      case Token::NOT:
+      case Token::DELETE:
+      case Token::TYPEOF:
+      case Token::VOID:
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else if (node->op() == Token::BIT_NOT) {
+    node->expression()->set_to_int32(true);
+  }
 }
 
 
 void AstOptimizer::VisitCountOperation(CountOperation* node) {
   // Count operations assume that they work on Smis.
+  node->expression()->set_no_negative_zero(node->is_prefix() ?
+                                           true :
+                                           node->no_negative_zero());
   node->type()->SetAsLikelySmiIfUnknown();
   node->expression()->type()->SetAsLikelySmiIfUnknown();
   Visit(node->expression());
@@ -431,7 +482,12 @@
   switch (node->op()) {
     case Token::COMMA:
     case Token::OR:
+      node->left()->set_no_negative_zero(true);
+      node->right()->set_no_negative_zero(node->no_negative_zero());
+      break;
     case Token::AND:
+      node->left()->set_no_negative_zero(node->no_negative_zero());
+      node->right()->set_no_negative_zero(node->no_negative_zero());
       break;
     case Token::BIT_OR:
     case Token::BIT_XOR:
@@ -442,6 +498,10 @@
       node->type()->SetAsLikelySmiIfUnknown();
       node->left()->type()->SetAsLikelySmiIfUnknown();
       node->right()->type()->SetAsLikelySmiIfUnknown();
+      node->left()->set_to_int32(true);
+      node->right()->set_to_int32(true);
+      node->left()->set_no_negative_zero(true);
+      node->right()->set_no_negative_zero(true);
       break;
     case Token::ADD:
     case Token::SUB:
@@ -452,6 +512,13 @@
         node->left()->type()->SetAsLikelySmiIfUnknown();
         node->right()->type()->SetAsLikelySmiIfUnknown();
       }
+      node->left()->set_no_negative_zero(node->no_negative_zero());
+      node->right()->set_no_negative_zero(node->no_negative_zero());
+      if (node->op() == Token::DIV) {
+        node->right()->set_no_negative_zero(false);
+      } else if (node->op() == Token::MOD) {
+        node->right()->set_no_negative_zero(true);
+      }
       break;
     default:
       UNREACHABLE();
@@ -483,6 +550,41 @@
       }
     }
   }
+
+  if (FLAG_safe_int32_compiler) {
+    switch (node->op()) {
+      case Token::COMMA:
+      case Token::OR:
+      case Token::AND:
+        break;
+      case Token::BIT_OR:
+      case Token::BIT_XOR:
+      case Token::BIT_AND:
+      case Token::SHL:
+      case Token::SAR:
+      case Token::SHR:
+        // Add one to the number of bit operations in this expression.
+        node->set_num_bit_ops(1);
+        // Fall through.
+      case Token::ADD:
+      case Token::SUB:
+      case Token::MUL:
+      case Token::DIV:
+      case Token::MOD:
+        node->set_side_effect_free(node->left()->side_effect_free() &&
+                                   node->right()->side_effect_free());
+        node->set_num_bit_ops(node->num_bit_ops() +
+                                  node->left()->num_bit_ops() +
+                                  node->right()->num_bit_ops());
+        if (!node->no_negative_zero() && node->op() == Token::MUL) {
+          node->set_side_effect_free(false);
+        }
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
 }
 
 
@@ -493,6 +595,10 @@
     node->right()->type()->SetAsLikelySmiIfUnknown();
   }
 
+  node->left()->set_no_negative_zero(true);
+  // Only [[HasInstance]] has the right argument passed unchanged to it.
+  node->right()->set_no_negative_zero(true);
+
   Visit(node->left());
   Visit(node->right());
 
@@ -698,8 +804,8 @@
 }
 
 
-void Processor::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* node) {
+void Processor::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* node) {
   USE(node);
   UNREACHABLE();
 }
diff --git a/src/runtime.cc b/src/runtime.cc
index 7075542..823889a 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -32,12 +32,14 @@
 #include "accessors.h"
 #include "api.h"
 #include "arguments.h"
+#include "codegen.h"
 #include "compiler.h"
 #include "cpu.h"
 #include "dateparser-inl.h"
 #include "debug.h"
 #include "execution.h"
 #include "jsregexp.h"
+#include "liveedit.h"
 #include "parser.h"
 #include "platform.h"
 #include "runtime.h"
@@ -246,7 +248,8 @@
 
 static Handle<Object> CreateObjectLiteralBoilerplate(
     Handle<FixedArray> literals,
-    Handle<FixedArray> constant_properties) {
+    Handle<FixedArray> constant_properties,
+    bool should_have_fast_elements) {
   // Get the global context from the literals array.  This is the
   // context in which the function was created and we use the object
   // function from this context to create the object literal.  We do
@@ -262,6 +265,10 @@
                                             &is_result_from_cache);
 
   Handle<JSObject> boilerplate = Factory::NewJSObjectFromMap(map);
+
+  // Normalize the elements of the boilerplate to save space if needed.
+  if (!should_have_fast_elements) NormalizeElements(boilerplate);
+
   {  // Add the constant properties to the boilerplate.
     int length = constant_properties->length();
     OptimizedObjectForAddingMultipleProperties opt(boilerplate,
@@ -343,8 +350,10 @@
     Handle<FixedArray> array) {
   Handle<FixedArray> elements = CompileTimeValue::GetElements(array);
   switch (CompileTimeValue::GetType(array)) {
-    case CompileTimeValue::OBJECT_LITERAL:
-      return CreateObjectLiteralBoilerplate(literals, elements);
+    case CompileTimeValue::OBJECT_LITERAL_FAST_ELEMENTS:
+      return CreateObjectLiteralBoilerplate(literals, elements, true);
+    case CompileTimeValue::OBJECT_LITERAL_SLOW_ELEMENTS:
+      return CreateObjectLiteralBoilerplate(literals, elements, false);
     case CompileTimeValue::ARRAY_LITERAL:
       return CreateArrayLiteralBoilerplate(literals, elements);
     default:
@@ -354,26 +363,6 @@
 }
 
 
-static Object* Runtime_CreateObjectLiteralBoilerplate(Arguments args) {
-  HandleScope scope;
-  ASSERT(args.length() == 3);
-  // Copy the arguments.
-  CONVERT_ARG_CHECKED(FixedArray, literals, 0);
-  CONVERT_SMI_CHECKED(literals_index, args[1]);
-  CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
-
-  Handle<Object> result =
-    CreateObjectLiteralBoilerplate(literals, constant_properties);
-
-  if (result.is_null()) return Failure::Exception();
-
-  // Update the functions literal and return the boilerplate.
-  literals->set(literals_index, *result);
-
-  return *result;
-}
-
-
 static Object* Runtime_CreateArrayLiteralBoilerplate(Arguments args) {
   // Takes a FixedArray of elements containing the literal elements of
   // the array literal and produces JSArray with those elements.
@@ -397,15 +386,19 @@
 
 static Object* Runtime_CreateObjectLiteral(Arguments args) {
   HandleScope scope;
-  ASSERT(args.length() == 3);
+  ASSERT(args.length() == 4);
   CONVERT_ARG_CHECKED(FixedArray, literals, 0);
   CONVERT_SMI_CHECKED(literals_index, args[1]);
   CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
+  CONVERT_SMI_CHECKED(fast_elements, args[3]);
+  bool should_have_fast_elements = fast_elements == 1;
 
   // Check if boilerplate exists. If not, create it first.
   Handle<Object> boilerplate(literals->get(literals_index));
   if (*boilerplate == Heap::undefined_value()) {
-    boilerplate = CreateObjectLiteralBoilerplate(literals, constant_properties);
+    boilerplate = CreateObjectLiteralBoilerplate(literals,
+                                                 constant_properties,
+                                                 should_have_fast_elements);
     if (boilerplate.is_null()) return Failure::Exception();
     // Update the functions literal and return the boilerplate.
     literals->set(literals_index, *boilerplate);
@@ -416,15 +409,19 @@
 
 static Object* Runtime_CreateObjectLiteralShallow(Arguments args) {
   HandleScope scope;
-  ASSERT(args.length() == 3);
+  ASSERT(args.length() == 4);
   CONVERT_ARG_CHECKED(FixedArray, literals, 0);
   CONVERT_SMI_CHECKED(literals_index, args[1]);
   CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
+  CONVERT_SMI_CHECKED(fast_elements, args[3]);
+  bool should_have_fast_elements = fast_elements == 1;
 
   // Check if boilerplate exists. If not, create it first.
   Handle<Object> boilerplate(literals->get(literals_index));
   if (*boilerplate == Heap::undefined_value()) {
-    boilerplate = CreateObjectLiteralBoilerplate(literals, constant_properties);
+    boilerplate = CreateObjectLiteralBoilerplate(literals,
+                                                 constant_properties,
+                                                 should_have_fast_elements);
     if (boilerplate.is_null()) return Failure::Exception();
     // Update the functions literal and return the boilerplate.
     literals->set(literals_index, *boilerplate);
@@ -791,9 +788,10 @@
       }
     } else {
       // Copy the function and update its context. Use it as value.
-      Handle<JSFunction> boilerplate = Handle<JSFunction>::cast(value);
+      Handle<SharedFunctionInfo> shared =
+          Handle<SharedFunctionInfo>::cast(value);
       Handle<JSFunction> function =
-          Factory::NewFunctionFromBoilerplate(boilerplate, context, TENURED);
+          Factory::NewFunctionFromSharedFunctionInfo(shared, context, TENURED);
       value = function;
     }
 
@@ -1230,6 +1228,178 @@
 }
 
 
+static Object* Runtime_RegExpConstructResult(Arguments args) {
+  ASSERT(args.length() == 3);
+  CONVERT_SMI_CHECKED(elements_count, args[0]);
+  if (elements_count > JSArray::kMaxFastElementsLength) {
+    return Top::ThrowIllegalOperation();
+  }
+  Object* new_object = Heap::AllocateFixedArrayWithHoles(elements_count);
+  if (new_object->IsFailure()) return new_object;
+  FixedArray* elements = FixedArray::cast(new_object);
+  new_object = Heap::AllocateRaw(JSRegExpResult::kSize,
+                                 NEW_SPACE,
+                                 OLD_POINTER_SPACE);
+  if (new_object->IsFailure()) return new_object;
+  {
+    AssertNoAllocation no_gc;
+    HandleScope scope;
+    reinterpret_cast<HeapObject*>(new_object)->
+        set_map(Top::global_context()->regexp_result_map());
+  }
+  JSArray* array = JSArray::cast(new_object);
+  array->set_properties(Heap::empty_fixed_array());
+  array->set_elements(elements);
+  array->set_length(Smi::FromInt(elements_count));
+  // Write in-object properties after the length of the array.
+  array->InObjectPropertyAtPut(JSRegExpResult::kIndexIndex, args[1]);
+  array->InObjectPropertyAtPut(JSRegExpResult::kInputIndex, args[2]);
+  return array;
+}
+
+
+static Object* Runtime_RegExpInitializeObject(Arguments args) {
+  AssertNoAllocation no_alloc;
+  ASSERT(args.length() == 5);
+  CONVERT_CHECKED(JSRegExp, regexp, args[0]);
+  CONVERT_CHECKED(String, source, args[1]);
+
+  Object* global = args[2];
+  if (!global->IsTrue()) global = Heap::false_value();
+
+  Object* ignoreCase = args[3];
+  if (!ignoreCase->IsTrue()) ignoreCase = Heap::false_value();
+
+  Object* multiline = args[4];
+  if (!multiline->IsTrue()) multiline = Heap::false_value();
+
+  Map* map = regexp->map();
+  Object* constructor = map->constructor();
+  if (constructor->IsJSFunction() &&
+      JSFunction::cast(constructor)->initial_map() == map) {
+    // If we still have the original map, set in-object properties directly.
+    regexp->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, source);
+    // TODO(lrn): Consider skipping write barrier on booleans as well.
+    // Both true and false should be in oldspace at all times.
+    regexp->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex, global);
+    regexp->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex, ignoreCase);
+    regexp->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex, multiline);
+    regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
+                                  Smi::FromInt(0),
+                                  SKIP_WRITE_BARRIER);
+    return regexp;
+  }
+
+  // Map has changed, so use generic, but slower, method.
+  PropertyAttributes final =
+      static_cast<PropertyAttributes>(READ_ONLY | DONT_ENUM | DONT_DELETE);
+  PropertyAttributes writable =
+      static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
+  regexp->IgnoreAttributesAndSetLocalProperty(Heap::source_symbol(),
+                                              source,
+                                              final);
+  regexp->IgnoreAttributesAndSetLocalProperty(Heap::global_symbol(),
+                                              global,
+                                              final);
+  regexp->IgnoreAttributesAndSetLocalProperty(Heap::ignore_case_symbol(),
+                                              ignoreCase,
+                                              final);
+  regexp->IgnoreAttributesAndSetLocalProperty(Heap::multiline_symbol(),
+                                              multiline,
+                                              final);
+  regexp->IgnoreAttributesAndSetLocalProperty(Heap::last_index_symbol(),
+                                              Smi::FromInt(0),
+                                              writable);
+  return regexp;
+}
+
+
+static Object* Runtime_FinishArrayPrototypeSetup(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(JSArray, prototype, 0);
+  // This is necessary to enable fast checks for absence of elements
+  // on Array.prototype and below.
+  prototype->set_elements(Heap::empty_fixed_array());
+  return Smi::FromInt(0);
+}
+
+
+static void SetCustomCallGenerator(Handle<JSFunction> function,
+                                   ExternalReference* generator) {
+  if (function->shared()->function_data()->IsUndefined()) {
+    function->shared()->set_function_data(*FromCData(generator->address()));
+  }
+}
+
+
+static Handle<JSFunction> InstallBuiltin(Handle<JSObject> holder,
+                                         const char* name,
+                                         Builtins::Name builtin_name,
+                                         ExternalReference* generator = NULL) {
+  Handle<String> key = Factory::LookupAsciiSymbol(name);
+  Handle<Code> code(Builtins::builtin(builtin_name));
+  Handle<JSFunction> optimized = Factory::NewFunction(key,
+                                                      JS_OBJECT_TYPE,
+                                                      JSObject::kHeaderSize,
+                                                      code,
+                                                      false);
+  optimized->shared()->DontAdaptArguments();
+  if (generator != NULL) {
+    SetCustomCallGenerator(optimized, generator);
+  }
+  SetProperty(holder, key, optimized, NONE);
+  return optimized;
+}
+
+
+Object* CompileArrayPushCall(CallStubCompiler* compiler,
+                             Object* object,
+                             JSObject* holder,
+                             JSFunction* function,
+                             String* name,
+                             StubCompiler::CheckType check) {
+  return compiler->CompileArrayPushCall(object, holder, function, name, check);
+}
+
+
+Object* CompileArrayPopCall(CallStubCompiler* compiler,
+                            Object* object,
+                            JSObject* holder,
+                            JSFunction* function,
+                            String* name,
+                            StubCompiler::CheckType check) {
+  return compiler->CompileArrayPopCall(object, holder, function, name, check);
+}
+
+
+static Object* Runtime_SpecialArrayFunctions(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(JSObject, holder, 0);
+
+  ExternalReference pop = ExternalReference::compile_array_pop_call();
+  ExternalReference push = ExternalReference::compile_array_push_call();
+
+  InstallBuiltin(holder, "pop", Builtins::ArrayPop, &pop);
+  InstallBuiltin(holder, "push", Builtins::ArrayPush, &push);
+  InstallBuiltin(holder, "shift", Builtins::ArrayShift);
+  InstallBuiltin(holder, "unshift", Builtins::ArrayUnshift);
+  InstallBuiltin(holder, "slice", Builtins::ArraySlice);
+  InstallBuiltin(holder, "splice", Builtins::ArraySplice);
+  InstallBuiltin(holder, "concat", Builtins::ArrayConcat);
+
+  return *holder;
+}
+
+
+static Object* Runtime_GetGlobalReceiver(Arguments args) {
+  // Returns a real global receiver, not one of builtins object.
+  Context* global_context = Top::context()->global()->global_context();
+  return global_context->global()->global_receiver();
+}
+
+
 static Object* Runtime_MaterializeRegExpLiteral(Arguments args) {
   HandleScope scope;
   ASSERT(args.length() == 4);
@@ -1280,6 +1450,18 @@
 }
 
 
+static Object* Runtime_FunctionRemovePrototype(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_CHECKED(JSFunction, f, args[0]);
+  Object* obj = f->RemovePrototype();
+  if (obj->IsFailure()) return obj;
+
+  return Heap::undefined_value();
+}
+
+
 static Object* Runtime_FunctionGetScript(Arguments args) {
   HandleScope scope;
   ASSERT(args.length() == 1);
@@ -1353,6 +1535,7 @@
   ASSERT(args.length() == 2);
 
   CONVERT_CHECKED(JSFunction, fun, args[0]);
+  ASSERT(fun->should_have_prototype());
   Object* obj = Accessors::FunctionSetPrototype(fun, args[1], NULL);
   if (obj->IsFailure()) return obj;
   return args[0];  // return TOS
@@ -1364,10 +1547,8 @@
   ASSERT(args.length() == 1);
 
   CONVERT_CHECKED(JSFunction, f, args[0]);
-  // The function_data field of the shared function info is used exclusively by
-  // the API.
-  return !f->shared()->function_data()->IsUndefined() ? Heap::true_value()
-                                                      : Heap::false_value();
+  return f->shared()->IsApiFunction() ? Heap::true_value()
+                                      : Heap::false_value();
 }
 
 static Object* Runtime_FunctionIsBuiltin(Arguments args) {
@@ -1492,9 +1673,91 @@
   return CharFromCode(args[0]);
 }
 
+
+class FixedArrayBuilder {
+ public:
+  explicit FixedArrayBuilder(int initial_capacity)
+      : array_(Factory::NewFixedArrayWithHoles(initial_capacity)),
+        length_(0) {
+    // Require a non-zero initial size. Ensures that doubling the size to
+    // extend the array will work.
+    ASSERT(initial_capacity > 0);
+  }
+
+  explicit FixedArrayBuilder(Handle<FixedArray> backing_store)
+      : array_(backing_store),
+        length_(0) {
+    // Require a non-zero initial size. Ensures that doubling the size to
+    // extend the array will work.
+    ASSERT(backing_store->length() > 0);
+  }
+
+  bool HasCapacity(int elements) {
+    int length = array_->length();
+    int required_length = length_ + elements;
+    return (length >= required_length);
+  }
+
+  void EnsureCapacity(int elements) {
+    int length = array_->length();
+    int required_length = length_ + elements;
+    if (length < required_length) {
+      int new_length = length;
+      do {
+        new_length *= 2;
+      } while (new_length < required_length);
+      Handle<FixedArray> extended_array =
+          Factory::NewFixedArrayWithHoles(new_length);
+      array_->CopyTo(0, *extended_array, 0, length_);
+      array_ = extended_array;
+    }
+  }
+
+  void Add(Object* value) {
+    ASSERT(length_ < capacity());
+    array_->set(length_, value);
+    length_++;
+  }
+
+  void Add(Smi* value) {
+    ASSERT(length_ < capacity());
+    array_->set(length_, value);
+    length_++;
+  }
+
+  Handle<FixedArray> array() {
+    return array_;
+  }
+
+  int length() {
+    return length_;
+  }
+
+  int capacity() {
+    return array_->length();
+  }
+
+  Handle<JSArray> ToJSArray() {
+    Handle<JSArray> result_array = Factory::NewJSArrayWithElements(array_);
+    result_array->set_length(Smi::FromInt(length_));
+    return result_array;
+  }
+
+  Handle<JSArray> ToJSArray(Handle<JSArray> target_array) {
+    target_array->set_elements(*array_);
+    target_array->set_length(Smi::FromInt(length_));
+    return target_array;
+  }
+
+ private:
+  Handle<FixedArray> array_;
+  int length_;
+};
+
+
 // Forward declarations.
-static const int kStringBuilderConcatHelperLengthBits = 11;
-static const int kStringBuilderConcatHelperPositionBits = 19;
+const int kStringBuilderConcatHelperLengthBits = 11;
+const int kStringBuilderConcatHelperPositionBits = 19;
 
 template <typename schar>
 static inline void StringBuilderConcatHelper(String*,
@@ -1502,15 +1765,19 @@
                                              FixedArray*,
                                              int);
 
-typedef BitField<int, 0, 11> StringBuilderSubstringLength;
-typedef BitField<int, 11, 19> StringBuilderSubstringPosition;
+typedef BitField<int, 0, kStringBuilderConcatHelperLengthBits>
+    StringBuilderSubstringLength;
+typedef BitField<int,
+                 kStringBuilderConcatHelperLengthBits,
+                 kStringBuilderConcatHelperPositionBits>
+    StringBuilderSubstringPosition;
+
 
 class ReplacementStringBuilder {
  public:
   ReplacementStringBuilder(Handle<String> subject, int estimated_part_count)
-      : subject_(subject),
-        parts_(Factory::NewFixedArray(estimated_part_count)),
-        part_count_(0),
+      : array_builder_(estimated_part_count),
+        subject_(subject),
         character_count_(0),
         is_ascii_(subject->IsAsciiRepresentation()) {
     // Require a non-zero initial size. Ensures that doubling the size to
@@ -1518,38 +1785,33 @@
     ASSERT(estimated_part_count > 0);
   }
 
-  void EnsureCapacity(int elements) {
-    int length = parts_->length();
-    int required_length = part_count_ + elements;
-    if (length < required_length) {
-      int new_length = length;
-      do {
-        new_length *= 2;
-      } while (new_length < required_length);
-      Handle<FixedArray> extended_array =
-          Factory::NewFixedArray(new_length);
-      parts_->CopyTo(0, *extended_array, 0, part_count_);
-      parts_ = extended_array;
-    }
-  }
-
-  void AddSubjectSlice(int from, int to) {
+  static inline void AddSubjectSlice(FixedArrayBuilder* builder,
+                                     int from,
+                                     int to) {
     ASSERT(from >= 0);
     int length = to - from;
     ASSERT(length > 0);
-    // Can we encode the slice in 11 bits for length and 19 bits for
-    // start position - as used by StringBuilderConcatHelper?
     if (StringBuilderSubstringLength::is_valid(length) &&
         StringBuilderSubstringPosition::is_valid(from)) {
       int encoded_slice = StringBuilderSubstringLength::encode(length) |
           StringBuilderSubstringPosition::encode(from);
-      AddElement(Smi::FromInt(encoded_slice));
+      builder->Add(Smi::FromInt(encoded_slice));
     } else {
       // Otherwise encode as two smis.
-      AddElement(Smi::FromInt(-length));
-      AddElement(Smi::FromInt(from));
+      builder->Add(Smi::FromInt(-length));
+      builder->Add(Smi::FromInt(from));
     }
-    IncrementCharacterCount(length);
+  }
+
+
+  void EnsureCapacity(int elements) {
+    array_builder_.EnsureCapacity(elements);
+  }
+
+
+  void AddSubjectSlice(int from, int to) {
+    AddSubjectSlice(&array_builder_, from, to);
+    IncrementCharacterCount(to - from);
   }
 
 
@@ -1565,7 +1827,7 @@
 
 
   Handle<String> ToString() {
-    if (part_count_ == 0) {
+    if (array_builder_.length() == 0) {
       return Factory::empty_string();
     }
 
@@ -1577,8 +1839,8 @@
       char* char_buffer = seq->GetChars();
       StringBuilderConcatHelper(*subject_,
                                 char_buffer,
-                                *parts_,
-                                part_count_);
+                                *array_builder_.array(),
+                                array_builder_.length());
     } else {
       // Non-ASCII.
       joined_string = NewRawTwoByteString(character_count_);
@@ -1587,8 +1849,8 @@
       uc16* char_buffer = seq->GetChars();
       StringBuilderConcatHelper(*subject_,
                                 char_buffer,
-                                *parts_,
-                                part_count_);
+                                *array_builder_.array(),
+                                array_builder_.length());
     }
     return joined_string;
   }
@@ -1601,8 +1863,14 @@
     character_count_ += by;
   }
 
- private:
+  Handle<JSArray> GetParts() {
+    Handle<JSArray> result =
+        Factory::NewJSArrayWithElements(array_builder_.array());
+    result->set_length(Smi::FromInt(array_builder_.length()));
+    return result;
+  }
 
+ private:
   Handle<String> NewRawAsciiString(int size) {
     CALL_HEAP_FUNCTION(Heap::AllocateRawAsciiString(size), String);
   }
@@ -1615,14 +1883,12 @@
 
   void AddElement(Object* element) {
     ASSERT(element->IsSmi() || element->IsString());
-    ASSERT(parts_->length() > part_count_);
-    parts_->set(part_count_, element);
-    part_count_++;
+    ASSERT(array_builder_.capacity() > array_builder_.length());
+    array_builder_.Add(element);
   }
 
+  FixedArrayBuilder array_builder_;
   Handle<String> subject_;
-  Handle<FixedArray> parts_;
-  int part_count_;
   int character_count_;
   bool is_ascii_;
 };
@@ -2030,7 +2296,6 @@
 }
 
 
-
 // Cap on the maximal shift in the Boyer-Moore implementation. By setting a
 // limit, we can fix the size of tables.
 static const int kBMMaxShift = 0xff;
@@ -2076,10 +2341,23 @@
 static int bad_char_occurrence[kBMAlphabetSize];
 static BMGoodSuffixBuffers bmgs_buffers;
 
+// State of the string match tables.
+// SIMPLE: No usable content in the buffers.
+// BOYER_MOORE_HORSPOOL: The bad_char_occurences table has been populated.
+// BOYER_MOORE: The bmgs_buffers tables have also been populated.
+// Whenever starting with a new needle, one should call InitializeStringSearch
+// to determine which search strategy to use, and in the case of a long-needle
+// strategy, the call also initializes the algorithm to SIMPLE.
+enum StringSearchAlgorithm { SIMPLE_SEARCH, BOYER_MOORE_HORSPOOL, BOYER_MOORE };
+static StringSearchAlgorithm algorithm;
+
+
 // Compute the bad-char table for Boyer-Moore in the static buffer.
 template <typename pchar>
-static void BoyerMoorePopulateBadCharTable(Vector<const pchar> pattern,
-                                          int start) {
+static void BoyerMoorePopulateBadCharTable(Vector<const pchar> pattern) {
+  // Only preprocess at most kBMMaxShift last characters of pattern.
+  int start = pattern.length() < kBMMaxShift ? 0
+                                             : pattern.length() - kBMMaxShift;
   // Run forwards to populate bad_char_table, so that *last* instance
   // of character equivalence class is the one registered.
   // Notice: Doesn't include the last character.
@@ -2099,10 +2377,11 @@
   }
 }
 
+
 template <typename pchar>
-static void BoyerMoorePopulateGoodSuffixTable(Vector<const pchar> pattern,
-                                              int start) {
+static void BoyerMoorePopulateGoodSuffixTable(Vector<const pchar> pattern) {
   int m = pattern.length();
+  int start = m < kBMMaxShift ? 0 : m - kBMMaxShift;
   int len = m - start;
   // Compute Good Suffix tables.
   bmgs_buffers.init(m);
@@ -2149,6 +2428,7 @@
   }
 }
 
+
 template <typename schar, typename pchar>
 static inline int CharOccurrence(int char_code) {
   if (sizeof(schar) == 1) {
@@ -2163,6 +2443,7 @@
   return bad_char_occurrence[char_code % kBMAlphabetSize];
 }
 
+
 // Restricted simplified Boyer-Moore string matching.
 // Uses only the bad-shift table of Boyer-Moore and only uses it
 // for the character compared to the last character of the needle.
@@ -2171,14 +2452,13 @@
                               Vector<const pchar> pattern,
                               int start_index,
                               bool* complete) {
+  ASSERT(algorithm <= BOYER_MOORE_HORSPOOL);
   int n = subject.length();
   int m = pattern.length();
-  // Only preprocess at most kBMMaxShift last characters of pattern.
-  int start = m < kBMMaxShift ? 0 : m - kBMMaxShift;
 
-  BoyerMoorePopulateBadCharTable(pattern, start);
+  int badness = -m;
 
-  int badness = -m;  // How bad we are doing without a good-suffix table.
+  // How bad we are doing without a good-suffix table.
   int idx;  // No matches found prior to this index.
   pchar last_char = pattern[m - 1];
   int last_char_shift = m - 1 - CharOccurrence<schar, pchar>(last_char);
@@ -2223,13 +2503,12 @@
 static int BoyerMooreIndexOf(Vector<const schar> subject,
                              Vector<const pchar> pattern,
                              int idx) {
+  ASSERT(algorithm <= BOYER_MOORE);
   int n = subject.length();
   int m = pattern.length();
   // Only preprocess at most kBMMaxShift last characters of pattern.
   int start = m < kBMMaxShift ? 0 : m - kBMMaxShift;
 
-  // Build the Good Suffix table and continue searching.
-  BoyerMoorePopulateGoodSuffixTable(pattern, start);
   pchar last_char = pattern[m - 1];
   // Continue search from i.
   while (idx <= n - m) {
@@ -2265,9 +2544,17 @@
 
 
 template <typename schar>
-static int SingleCharIndexOf(Vector<const schar> string,
-                             schar pattern_char,
-                             int start_index) {
+static inline int SingleCharIndexOf(Vector<const schar> string,
+                                    schar pattern_char,
+                                    int start_index) {
+  if (sizeof(schar) == 1) {
+    const schar* pos = reinterpret_cast<const schar*>(
+        memchr(string.start() + start_index,
+               pattern_char,
+               string.length() - start_index));
+    if (pos == NULL) return -1;
+    return static_cast<int>(pos - string.start());
+  }
   for (int i = start_index, n = string.length(); i < n; i++) {
     if (pattern_char == string[i]) {
       return i;
@@ -2305,17 +2592,29 @@
   // done enough work we decide it's probably worth switching to a better
   // algorithm.
   int badness = -10 - (pattern.length() << 2);
+
   // We know our pattern is at least 2 characters, we cache the first so
   // the common case of the first character not matching is faster.
   pchar pattern_first_char = pattern[0];
-
   for (int i = idx, n = subject.length() - pattern.length(); i <= n; i++) {
     badness++;
     if (badness > 0) {
       *complete = false;
       return i;
     }
-    if (subject[i] != pattern_first_char) continue;
+    if (sizeof(schar) == 1 && sizeof(pchar) == 1) {
+      const schar* pos = reinterpret_cast<const schar*>(
+          memchr(subject.start() + i,
+                 pattern_first_char,
+                 n - i + 1));
+      if (pos == NULL) {
+        *complete = true;
+        return -1;
+      }
+      i = static_cast<int>(pos - subject.start());
+    } else {
+      if (subject[i] != pattern_first_char) continue;
+    }
     int j = 1;
     do {
       if (pattern[j] != subject[i+j]) {
@@ -2340,7 +2639,16 @@
                          int idx) {
   pchar pattern_first_char = pattern[0];
   for (int i = idx, n = subject.length() - pattern.length(); i <= n; i++) {
-    if (subject[i] != pattern_first_char) continue;
+    if (sizeof(schar) == 1 && sizeof(pchar) == 1) {
+      const schar* pos = reinterpret_cast<const schar*>(
+          memchr(subject.start() + i,
+                 pattern_first_char,
+                 n - i + 1));
+      if (pos == NULL) return -1;
+      i = static_cast<int>(pos - subject.start());
+    } else {
+      if (subject[i] != pattern_first_char) continue;
+    }
     int j = 1;
     do {
       if (pattern[j] != subject[i+j]) {
@@ -2356,39 +2664,84 @@
 }
 
 
-// Dispatch to different algorithms.
-template <typename schar, typename pchar>
-static int StringMatchStrategy(Vector<const schar> sub,
-                               Vector<const pchar> pat,
-                               int start_index) {
-  ASSERT(pat.length() > 1);
+// Strategy for searching for a string in another string.
+enum StringSearchStrategy { SEARCH_FAIL, SEARCH_SHORT, SEARCH_LONG };
 
+
+template <typename pchar>
+static inline StringSearchStrategy InitializeStringSearch(
+    Vector<const pchar> pat, bool ascii_subject) {
+  ASSERT(pat.length() > 1);
   // We have an ASCII haystack and a non-ASCII needle. Check if there
   // really is a non-ASCII character in the needle and bail out if there
   // is.
-  if (sizeof(schar) == 1 && sizeof(pchar) > 1) {
+  if (ascii_subject && sizeof(pchar) > 1) {
     for (int i = 0; i < pat.length(); i++) {
       uc16 c = pat[i];
       if (c > String::kMaxAsciiCharCode) {
-        return -1;
+        return SEARCH_FAIL;
       }
     }
   }
   if (pat.length() < kBMMinPatternLength) {
-    // We don't believe fancy searching can ever be more efficient.
-    // The max shift of Boyer-Moore on a pattern of this length does
-    // not compensate for the overhead.
-    return SimpleIndexOf(sub, pat, start_index);
+    return SEARCH_SHORT;
   }
+  algorithm = SIMPLE_SEARCH;
+  return SEARCH_LONG;
+}
+
+
+// Dispatch long needle searches to different algorithms.
+template <typename schar, typename pchar>
+static int ComplexIndexOf(Vector<const schar> sub,
+                          Vector<const pchar> pat,
+                          int start_index) {
+  ASSERT(pat.length() >= kBMMinPatternLength);
   // Try algorithms in order of increasing setup cost and expected performance.
   bool complete;
-  int idx = SimpleIndexOf(sub, pat, start_index, &complete);
-  if (complete) return idx;
-  idx = BoyerMooreHorspool(sub, pat, idx, &complete);
-  if (complete) return idx;
-  return BoyerMooreIndexOf(sub, pat, idx);
+  int idx = start_index;
+  switch (algorithm) {
+    case SIMPLE_SEARCH:
+      idx = SimpleIndexOf(sub, pat, idx, &complete);
+      if (complete) return idx;
+      BoyerMoorePopulateBadCharTable(pat);
+      algorithm = BOYER_MOORE_HORSPOOL;
+      // FALLTHROUGH.
+    case BOYER_MOORE_HORSPOOL:
+      idx = BoyerMooreHorspool(sub, pat, idx, &complete);
+      if (complete) return idx;
+      // Build the Good Suffix table and continue searching.
+      BoyerMoorePopulateGoodSuffixTable(pat);
+      algorithm = BOYER_MOORE;
+      // FALLTHROUGH.
+    case BOYER_MOORE:
+      return BoyerMooreIndexOf(sub, pat, idx);
+  }
+  UNREACHABLE();
+  return -1;
 }
 
+
+// Dispatch to different search strategies for a single search.
+// If searching multiple times on the same needle, the search
+// strategy should only be computed once and then dispatch to different
+// loops.
+template <typename schar, typename pchar>
+static int StringSearch(Vector<const schar> sub,
+                        Vector<const pchar> pat,
+                        int start_index) {
+  bool ascii_subject = (sizeof(schar) == 1);
+  StringSearchStrategy strategy = InitializeStringSearch(pat, ascii_subject);
+  switch (strategy) {
+    case SEARCH_FAIL: return -1;
+    case SEARCH_SHORT: return SimpleIndexOf(sub, pat, start_index);
+    case SEARCH_LONG: return ComplexIndexOf(sub, pat, start_index);
+  }
+  UNREACHABLE();
+  return -1;
+}
+
+
 // Perform string match of pattern on subject, starting at start index.
 // Caller must ensure that 0 <= start_index <= sub->length(),
 // and should check that pat->length() + start_index <= sub->length()
@@ -2407,6 +2760,7 @@
   if (!sub->IsFlat()) {
     FlattenString(sub);
   }
+
   // Searching for one specific character is common.  For one
   // character patterns linear search is necessary, so any smart
   // algorithm is unnecessary overhead.
@@ -2440,15 +2794,15 @@
   if (pat->IsAsciiRepresentation()) {
     Vector<const char> pat_vector = pat->ToAsciiVector();
     if (sub->IsAsciiRepresentation()) {
-      return StringMatchStrategy(sub->ToAsciiVector(), pat_vector, start_index);
+      return StringSearch(sub->ToAsciiVector(), pat_vector, start_index);
     }
-    return StringMatchStrategy(sub->ToUC16Vector(), pat_vector, start_index);
+    return StringSearch(sub->ToUC16Vector(), pat_vector, start_index);
   }
   Vector<const uc16> pat_vector = pat->ToUC16Vector();
   if (sub->IsAsciiRepresentation()) {
-    return StringMatchStrategy(sub->ToAsciiVector(), pat_vector, start_index);
+    return StringSearch(sub->ToAsciiVector(), pat_vector, start_index);
   }
-  return StringMatchStrategy(sub->ToUC16Vector(), pat_vector, start_index);
+  return StringSearch(sub->ToUC16Vector(), pat_vector, start_index);
 }
 
 
@@ -2608,8 +2962,8 @@
   int d = str1->Get(0) - str2->Get(0);
   if (d != 0) return Smi::FromInt(d);
 
-  str1->TryFlattenIfNotFlat();
-  str2->TryFlattenIfNotFlat();
+  str1->TryFlatten();
+  str2->TryFlatten();
 
   static StringInputBuffer buf1;
   static StringInputBuffer buf2;
@@ -2705,6 +3059,476 @@
 }
 
 
+// Two smis before and after the match, for very long strings.
+const int kMaxBuilderEntriesPerRegExpMatch = 5;
+
+
+static void SetLastMatchInfoNoCaptures(Handle<String> subject,
+                                       Handle<JSArray> last_match_info,
+                                       int match_start,
+                                       int match_end) {
+  // Fill last_match_info with a single capture.
+  last_match_info->EnsureSize(2 + RegExpImpl::kLastMatchOverhead);
+  AssertNoAllocation no_gc;
+  FixedArray* elements = FixedArray::cast(last_match_info->elements());
+  RegExpImpl::SetLastCaptureCount(elements, 2);
+  RegExpImpl::SetLastInput(elements, *subject);
+  RegExpImpl::SetLastSubject(elements, *subject);
+  RegExpImpl::SetCapture(elements, 0, match_start);
+  RegExpImpl::SetCapture(elements, 1, match_end);
+}
+
+
+template <typename schar>
+static bool SearchCharMultiple(Vector<schar> subject,
+                               String* pattern,
+                               schar pattern_char,
+                               FixedArrayBuilder* builder,
+                               int* match_pos) {
+  // Position of last match.
+  int pos = *match_pos;
+  int subject_length = subject.length();
+  while (pos < subject_length) {
+    int match_end = pos + 1;
+    if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
+      *match_pos = pos;
+      return false;
+    }
+    int new_pos = SingleCharIndexOf(subject, pattern_char, match_end);
+    if (new_pos >= 0) {
+      // Match has been found.
+      if (new_pos > match_end) {
+        ReplacementStringBuilder::AddSubjectSlice(builder, match_end, new_pos);
+      }
+      pos = new_pos;
+      builder->Add(pattern);
+    } else {
+      break;
+    }
+  }
+  if (pos + 1 < subject_length) {
+    ReplacementStringBuilder::AddSubjectSlice(builder, pos + 1, subject_length);
+  }
+  *match_pos = pos;
+  return true;
+}
+
+
+static bool SearchCharMultiple(Handle<String> subject,
+                               Handle<String> pattern,
+                               Handle<JSArray> last_match_info,
+                               FixedArrayBuilder* builder) {
+  ASSERT(subject->IsFlat());
+  ASSERT_EQ(1, pattern->length());
+  uc16 pattern_char = pattern->Get(0);
+  // Treating position before first as initial "previous match position".
+  int match_pos = -1;
+
+  for (;;) {  // Break when search complete.
+    builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
+    AssertNoAllocation no_gc;
+    if (subject->IsAsciiRepresentation()) {
+      if (pattern_char > String::kMaxAsciiCharCode) {
+        break;
+      }
+      Vector<const char> subject_vector = subject->ToAsciiVector();
+      char pattern_ascii_char = static_cast<char>(pattern_char);
+      bool complete = SearchCharMultiple<const char>(subject_vector,
+                                                     *pattern,
+                                                     pattern_ascii_char,
+                                                     builder,
+                                                     &match_pos);
+      if (complete) break;
+    } else {
+      Vector<const uc16> subject_vector = subject->ToUC16Vector();
+      bool complete = SearchCharMultiple<const uc16>(subject_vector,
+                                                     *pattern,
+                                                     pattern_char,
+                                                     builder,
+                                                     &match_pos);
+      if (complete) break;
+    }
+  }
+
+  if (match_pos >= 0) {
+    SetLastMatchInfoNoCaptures(subject,
+                               last_match_info,
+                               match_pos,
+                               match_pos + 1);
+    return true;
+  }
+  return false;  // No matches at all.
+}
+
+
+template <typename schar, typename pchar>
+static bool SearchStringMultiple(Vector<schar> subject,
+                                 String* pattern,
+                                 Vector<pchar> pattern_string,
+                                 FixedArrayBuilder* builder,
+                                 int* match_pos) {
+  int pos = *match_pos;
+  int subject_length = subject.length();
+  int pattern_length = pattern_string.length();
+  int max_search_start = subject_length - pattern_length;
+  bool is_ascii = (sizeof(schar) == 1);
+  StringSearchStrategy strategy =
+      InitializeStringSearch(pattern_string, is_ascii);
+  switch (strategy) {
+    case SEARCH_FAIL: break;
+    case SEARCH_SHORT:
+      while (pos <= max_search_start) {
+        if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
+          *match_pos = pos;
+          return false;
+        }
+        // Position of end of previous match.
+        int match_end = pos + pattern_length;
+        int new_pos = SimpleIndexOf(subject, pattern_string, match_end);
+        if (new_pos >= 0) {
+          // A match.
+          if (new_pos > match_end) {
+            ReplacementStringBuilder::AddSubjectSlice(builder,
+                                                      match_end,
+                                                      new_pos);
+          }
+          pos = new_pos;
+          builder->Add(pattern);
+        } else {
+          break;
+        }
+      }
+      break;
+    case SEARCH_LONG:
+      while (pos  <= max_search_start) {
+        if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
+          *match_pos = pos;
+          return false;
+        }
+        int match_end = pos + pattern_length;
+        int new_pos = ComplexIndexOf(subject, pattern_string, match_end);
+        if (new_pos >= 0) {
+          // A match has been found.
+          if (new_pos > match_end) {
+            ReplacementStringBuilder::AddSubjectSlice(builder,
+                                                      match_end,
+                                                      new_pos);
+          }
+          pos = new_pos;
+          builder->Add(pattern);
+        } else {
+         break;
+        }
+      }
+      break;
+  }
+  if (pos < max_search_start) {
+    ReplacementStringBuilder::AddSubjectSlice(builder,
+                                              pos + pattern_length,
+                                              subject_length);
+  }
+  *match_pos = pos;
+  return true;
+}
+
+
+static bool SearchStringMultiple(Handle<String> subject,
+                                 Handle<String> pattern,
+                                 Handle<JSArray> last_match_info,
+                                 FixedArrayBuilder* builder) {
+  ASSERT(subject->IsFlat());
+  ASSERT(pattern->IsFlat());
+  ASSERT(pattern->length() > 1);
+
+  // Treating as if a previous match was before first character.
+  int match_pos = -pattern->length();
+
+  for (;;) {  // Break when search complete.
+    builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
+    AssertNoAllocation no_gc;
+    if (subject->IsAsciiRepresentation()) {
+      Vector<const char> subject_vector = subject->ToAsciiVector();
+      if (pattern->IsAsciiRepresentation()) {
+        if (SearchStringMultiple(subject_vector,
+                                 *pattern,
+                                 pattern->ToAsciiVector(),
+                                 builder,
+                                 &match_pos)) break;
+      } else {
+        if (SearchStringMultiple(subject_vector,
+                                 *pattern,
+                                 pattern->ToUC16Vector(),
+                                 builder,
+                                 &match_pos)) break;
+      }
+    } else {
+      Vector<const uc16> subject_vector = subject->ToUC16Vector();
+      if (pattern->IsAsciiRepresentation()) {
+        if (SearchStringMultiple(subject_vector,
+                                 *pattern,
+                                 pattern->ToAsciiVector(),
+                                 builder,
+                                 &match_pos)) break;
+      } else {
+        if (SearchStringMultiple(subject_vector,
+                                 *pattern,
+                                 pattern->ToUC16Vector(),
+                                 builder,
+                                 &match_pos)) break;
+      }
+    }
+  }
+
+  if (match_pos >= 0) {
+    SetLastMatchInfoNoCaptures(subject,
+                               last_match_info,
+                               match_pos,
+                               match_pos + pattern->length());
+    return true;
+  }
+  return false;  // No matches at all.
+}
+
+
+static RegExpImpl::IrregexpResult SearchRegExpNoCaptureMultiple(
+    Handle<String> subject,
+    Handle<JSRegExp> regexp,
+    Handle<JSArray> last_match_array,
+    FixedArrayBuilder* builder) {
+  ASSERT(subject->IsFlat());
+  int match_start = -1;
+  int match_end = 0;
+  int pos = 0;
+  int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject);
+  if (required_registers < 0) return RegExpImpl::RE_EXCEPTION;
+
+  OffsetsVector registers(required_registers);
+  Vector<int> register_vector(registers.vector(), registers.length());
+  int subject_length = subject->length();
+
+  for (;;) {  // Break on failure, return on exception.
+    RegExpImpl::IrregexpResult result =
+        RegExpImpl::IrregexpExecOnce(regexp,
+                                     subject,
+                                     pos,
+                                     register_vector);
+    if (result == RegExpImpl::RE_SUCCESS) {
+      match_start = register_vector[0];
+      builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
+      if (match_end < match_start) {
+        ReplacementStringBuilder::AddSubjectSlice(builder,
+                                                  match_end,
+                                                  match_start);
+      }
+      match_end = register_vector[1];
+      HandleScope loop_scope;
+      builder->Add(*Factory::NewSubString(subject, match_start, match_end));
+      if (match_start != match_end) {
+        pos = match_end;
+      } else {
+        pos = match_end + 1;
+        if (pos > subject_length) break;
+      }
+    } else if (result == RegExpImpl::RE_FAILURE) {
+      break;
+    } else {
+      ASSERT_EQ(result, RegExpImpl::RE_EXCEPTION);
+      return result;
+    }
+  }
+
+  if (match_start >= 0) {
+    if (match_end < subject_length) {
+      ReplacementStringBuilder::AddSubjectSlice(builder,
+                                                match_end,
+                                                subject_length);
+    }
+    SetLastMatchInfoNoCaptures(subject,
+                               last_match_array,
+                               match_start,
+                               match_end);
+    return RegExpImpl::RE_SUCCESS;
+  } else {
+    return RegExpImpl::RE_FAILURE;  // No matches at all.
+  }
+}
+
+
+static RegExpImpl::IrregexpResult SearchRegExpMultiple(
+    Handle<String> subject,
+    Handle<JSRegExp> regexp,
+    Handle<JSArray> last_match_array,
+    FixedArrayBuilder* builder) {
+
+  ASSERT(subject->IsFlat());
+  int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject);
+  if (required_registers < 0) return RegExpImpl::RE_EXCEPTION;
+
+  OffsetsVector registers(required_registers);
+  Vector<int> register_vector(registers.vector(), registers.length());
+
+  RegExpImpl::IrregexpResult result =
+      RegExpImpl::IrregexpExecOnce(regexp,
+                                   subject,
+                                   0,
+                                   register_vector);
+
+  int capture_count = regexp->CaptureCount();
+  int subject_length = subject->length();
+
+  // Position to search from.
+  int pos = 0;
+  // End of previous match. Differs from pos if match was empty.
+  int match_end = 0;
+  if (result == RegExpImpl::RE_SUCCESS) {
+    // Need to keep a copy of the previous match for creating last_match_info
+    // at the end, so we have two vectors that we swap between.
+    OffsetsVector registers2(required_registers);
+    Vector<int> prev_register_vector(registers2.vector(), registers2.length());
+
+    do {
+      int match_start = register_vector[0];
+      builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
+      if (match_end < match_start) {
+        ReplacementStringBuilder::AddSubjectSlice(builder,
+                                                  match_end,
+                                                  match_start);
+      }
+      match_end = register_vector[1];
+
+      {
+        // Avoid accumulating new handles inside loop.
+        HandleScope temp_scope;
+        // Arguments array to replace function is match, captures, index and
+        // subject, i.e., 3 + capture count in total.
+        Handle<FixedArray> elements = Factory::NewFixedArray(3 + capture_count);
+        elements->set(0, *Factory::NewSubString(subject,
+                                                match_start,
+                                                match_end));
+        for (int i = 1; i <= capture_count; i++) {
+          int start = register_vector[i * 2];
+          if (start >= 0) {
+            int end = register_vector[i * 2 + 1];
+            ASSERT(start <= end);
+            Handle<String> substring = Factory::NewSubString(subject,
+                                                             start,
+                                                             end);
+            elements->set(i, *substring);
+          } else {
+            ASSERT(register_vector[i * 2 + 1] < 0);
+            elements->set(i, Heap::undefined_value());
+          }
+        }
+        elements->set(capture_count + 1, Smi::FromInt(match_start));
+        elements->set(capture_count + 2, *subject);
+        builder->Add(*Factory::NewJSArrayWithElements(elements));
+      }
+      // Swap register vectors, so the last successful match is in
+      // prev_register_vector.
+      Vector<int> tmp = prev_register_vector;
+      prev_register_vector = register_vector;
+      register_vector = tmp;
+
+      if (match_end > match_start) {
+        pos = match_end;
+      } else {
+        pos = match_end + 1;
+        if (pos > subject_length) {
+          break;
+        }
+      }
+
+      result = RegExpImpl::IrregexpExecOnce(regexp,
+                                            subject,
+                                            pos,
+                                            register_vector);
+    } while (result == RegExpImpl::RE_SUCCESS);
+
+    if (result != RegExpImpl::RE_EXCEPTION) {
+      // Finished matching, with at least one match.
+      if (match_end < subject_length) {
+        ReplacementStringBuilder::AddSubjectSlice(builder,
+                                                  match_end,
+                                                  subject_length);
+      }
+
+      int last_match_capture_count = (capture_count + 1) * 2;
+      int last_match_array_size =
+          last_match_capture_count + RegExpImpl::kLastMatchOverhead;
+      last_match_array->EnsureSize(last_match_array_size);
+      AssertNoAllocation no_gc;
+      FixedArray* elements = FixedArray::cast(last_match_array->elements());
+      RegExpImpl::SetLastCaptureCount(elements, last_match_capture_count);
+      RegExpImpl::SetLastSubject(elements, *subject);
+      RegExpImpl::SetLastInput(elements, *subject);
+      for (int i = 0; i < last_match_capture_count; i++) {
+        RegExpImpl::SetCapture(elements, i, prev_register_vector[i]);
+      }
+      return RegExpImpl::RE_SUCCESS;
+    }
+  }
+  // No matches at all, return failure or exception result directly.
+  return result;
+}
+
+
+static Object* Runtime_RegExpExecMultiple(Arguments args) {
+  ASSERT(args.length() == 4);
+  HandleScope handles;
+
+  CONVERT_ARG_CHECKED(String, subject, 1);
+  if (!subject->IsFlat()) { FlattenString(subject); }
+  CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
+  CONVERT_ARG_CHECKED(JSArray, last_match_info, 2);
+  CONVERT_ARG_CHECKED(JSArray, result_array, 3);
+
+  ASSERT(last_match_info->HasFastElements());
+  ASSERT(regexp->GetFlags().is_global());
+  Handle<FixedArray> result_elements;
+  if (result_array->HasFastElements()) {
+    result_elements =
+        Handle<FixedArray>(FixedArray::cast(result_array->elements()));
+  } else {
+    result_elements = Factory::NewFixedArrayWithHoles(16);
+  }
+  FixedArrayBuilder builder(result_elements);
+
+  if (regexp->TypeTag() == JSRegExp::ATOM) {
+    Handle<String> pattern(
+        String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex)));
+    int pattern_length = pattern->length();
+    if (pattern_length == 1) {
+      if (SearchCharMultiple(subject, pattern, last_match_info, &builder)) {
+        return *builder.ToJSArray(result_array);
+      }
+      return Heap::null_value();
+    }
+
+    if (!pattern->IsFlat()) FlattenString(pattern);
+    if (SearchStringMultiple(subject, pattern, last_match_info, &builder)) {
+      return *builder.ToJSArray(result_array);
+    }
+    return Heap::null_value();
+  }
+
+  ASSERT_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
+
+  RegExpImpl::IrregexpResult result;
+  if (regexp->CaptureCount() == 0) {
+    result = SearchRegExpNoCaptureMultiple(subject,
+                                           regexp,
+                                           last_match_info,
+                                           &builder);
+  } else {
+    result = SearchRegExpMultiple(subject, regexp, last_match_info, &builder);
+  }
+  if (result == RegExpImpl::RE_SUCCESS) return *builder.ToJSArray(result_array);
+  if (result == RegExpImpl::RE_FAILURE) return Heap::null_value();
+  ASSERT_EQ(result, RegExpImpl::RE_EXCEPTION);
+  return Failure::Exception();
+}
+
+
 static Object* Runtime_NumberToRadixString(Arguments args) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
@@ -2818,7 +3642,7 @@
 // string->Get(index).
 static Handle<Object> GetCharAt(Handle<String> string, uint32_t index) {
   if (index < static_cast<uint32_t>(string->length())) {
-    string->TryFlattenIfNotFlat();
+    string->TryFlatten();
     return LookupSingleCharacterStringFromCode(
         string->Get(index));
   }
@@ -2846,6 +3670,11 @@
     return prototype->GetElement(index);
   }
 
+  return GetElement(object, index);
+}
+
+
+Object* Runtime::GetElement(Handle<Object> object, uint32_t index) {
   return object->GetElement(index);
 }
 
@@ -3072,7 +3901,7 @@
       result = SetElement(js_object, index, value);
     } else {
       Handle<String> key_string = Handle<String>::cast(key);
-      key_string->TryFlattenIfNotFlat();
+      key_string->TryFlatten();
       result = SetProperty(js_object, key_string, value, attr);
     }
     if (result.is_null()) return Failure::Exception();
@@ -3121,7 +3950,7 @@
       return js_object->SetElement(index, *value);
     } else {
       Handle<String> key_string = Handle<String>::cast(key);
-      key_string->TryFlattenIfNotFlat();
+      key_string->TryFlatten();
       return js_object->IgnoreAttributesAndSetLocalProperty(*key_string,
                                                             *value,
                                                             attr);
@@ -3173,7 +4002,7 @@
     key_string = Handle<String>::cast(converted);
   }
 
-  key_string->TryFlattenIfNotFlat();
+  key_string->TryFlatten();
   return js_object->DeleteProperty(*key_string, JSObject::FORCE_DELETION);
 }
 
@@ -3396,7 +4225,7 @@
   int length = LocalPrototypeChainLength(*obj);
 
   // Find the number of local properties for each of the objects.
-  int* local_property_count = NewArray<int>(length);
+  ScopedVector<int> local_property_count(length);
   int total_property_count = 0;
   Handle<JSObject> jsproto = obj;
   for (int i = 0; i < length; i++) {
@@ -3449,7 +4278,6 @@
     }
   }
 
-  DeleteArray(local_property_count);
   return *Factory::NewJSArrayWithElements(names);
 }
 
@@ -3665,11 +4493,66 @@
 }
 
 
+static bool AreDigits(const char*s, int from, int to) {
+  for (int i = from; i < to; i++) {
+    if (s[i] < '0' || s[i] > '9') return false;
+  }
+
+  return true;
+}
+
+
+static int ParseDecimalInteger(const char*s, int from, int to) {
+  ASSERT(to - from < 10);  // Overflow is not possible.
+  ASSERT(from < to);
+  int d = s[from] - '0';
+
+  for (int i = from + 1; i < to; i++) {
+    d = 10 * d + (s[i] - '0');
+  }
+
+  return d;
+}
+
+
 static Object* Runtime_StringToNumber(Arguments args) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   CONVERT_CHECKED(String, subject, args[0]);
-  subject->TryFlattenIfNotFlat();
+  subject->TryFlatten();
+
+  // Fast case: short integer or some sorts of junk values.
+  int len = subject->length();
+  if (subject->IsSeqAsciiString()) {
+    if (len == 0) return Smi::FromInt(0);
+
+    char const* data = SeqAsciiString::cast(subject)->GetChars();
+    bool minus = (data[0] == '-');
+    int start_pos = (minus ? 1 : 0);
+
+    if (start_pos == len) {
+      return Heap::nan_value();
+    } else if (data[start_pos] > '9') {
+      // Fast check for a junk value. A valid string may start from a
+      // whitespace, a sign ('+' or '-'), the decimal point, a decimal digit or
+      // the 'I' character ('Infinity'). All of that have codes not greater than
+      // '9' except 'I'.
+      if (data[start_pos] != 'I') {
+        return Heap::nan_value();
+      }
+    } else if (len - start_pos < 10 && AreDigits(data, start_pos, len)) {
+      // The maximal/minimal smi has 10 digits. If the string has less digits we
+      // know it will fit into the smi-data type.
+      int d = ParseDecimalInteger(data, start_pos, len);
+      if (minus) {
+        if (d == 0) return Heap::minus_zero_value();
+        d = -d;
+      }
+      return Smi::FromInt(d);
+    }
+  }
+
+  // Slower case.
   return Heap::NumberFromDouble(StringToDouble(subject, ALLOW_HEX));
 }
 
@@ -3751,7 +4634,7 @@
   ASSERT(args.length() == 1);
   CONVERT_CHECKED(String, source, args[0]);
 
-  source->TryFlattenIfNotFlat();
+  source->TryFlatten();
 
   int escaped_length = 0;
   int length = source->length();
@@ -3864,7 +4747,7 @@
   ASSERT(args.length() == 1);
   CONVERT_CHECKED(String, source, args[0]);
 
-  source->TryFlattenIfNotFlat();
+  source->TryFlatten();
 
   bool ascii = true;
   int length = source->length();
@@ -3904,51 +4787,11 @@
   CONVERT_CHECKED(String, s, args[0]);
   CONVERT_SMI_CHECKED(radix, args[1]);
 
-  s->TryFlattenIfNotFlat();
+  s->TryFlatten();
 
-  int len = s->length();
-  int i;
-
-  // Skip leading white space.
-  for (i = 0; i < len && Scanner::kIsWhiteSpace.get(s->Get(i)); i++) ;
-  if (i == len) return Heap::nan_value();
-
-  // Compute the sign (default to +).
-  int sign = 1;
-  if (s->Get(i) == '-') {
-    sign = -1;
-    i++;
-  } else if (s->Get(i) == '+') {
-    i++;
-  }
-
-  // Compute the radix if 0.
-  if (radix == 0) {
-    radix = 10;
-    if (i < len && s->Get(i) == '0') {
-      radix = 8;
-      if (i + 1 < len) {
-        int c = s->Get(i + 1);
-        if (c == 'x' || c == 'X') {
-          radix = 16;
-          i += 2;
-        }
-      }
-    }
-  } else if (radix == 16) {
-    // Allow 0x or 0X prefix if radix is 16.
-    if (i + 1 < len && s->Get(i) == '0') {
-      int c = s->Get(i + 1);
-      if (c == 'x' || c == 'X') i += 2;
-    }
-  }
-
-  RUNTIME_ASSERT(2 <= radix && radix <= 36);
-  double value;
-  int end_index = StringToInt(s, i, radix, &value);
-  if (end_index != i) {
-    return Heap::NumberFromDouble(sign * value);
-  }
+  RUNTIME_ASSERT(radix == 0 || (2 <= radix && radix <= 36));
+  double value = StringToInt(s, radix);
+  return Heap::NumberFromDouble(value);
   return Heap::nan_value();
 }
 
@@ -4068,18 +4911,83 @@
 }
 
 
-template <class Converter>
-static Object* ConvertCase(Arguments args,
-                           unibrow::Mapping<Converter, 128>* mapping) {
+static inline SeqAsciiString* TryGetSeqAsciiString(String* s) {
+  if (!s->IsFlat() || !s->IsAsciiRepresentation()) return NULL;
+  if (s->IsConsString()) {
+    ASSERT(ConsString::cast(s)->second()->length() == 0);
+    return SeqAsciiString::cast(ConsString::cast(s)->first());
+  }
+  return SeqAsciiString::cast(s);
+}
+
+
+namespace {
+
+struct ToLowerTraits {
+  typedef unibrow::ToLowercase UnibrowConverter;
+
+  static bool ConvertAscii(char* dst, char* src, int length) {
+    bool changed = false;
+    for (int i = 0; i < length; ++i) {
+      char c = src[i];
+      if ('A' <= c && c <= 'Z') {
+        c += ('a' - 'A');
+        changed = true;
+      }
+      dst[i] = c;
+    }
+    return changed;
+  }
+};
+
+
+struct ToUpperTraits {
+  typedef unibrow::ToUppercase UnibrowConverter;
+
+  static bool ConvertAscii(char* dst, char* src, int length) {
+    bool changed = false;
+    for (int i = 0; i < length; ++i) {
+      char c = src[i];
+      if ('a' <= c && c <= 'z') {
+        c -= ('a' - 'A');
+        changed = true;
+      }
+      dst[i] = c;
+    }
+    return changed;
+  }
+};
+
+}  // namespace
+
+
+template <typename ConvertTraits>
+static Object* ConvertCase(
+    Arguments args,
+    unibrow::Mapping<typename ConvertTraits::UnibrowConverter, 128>* mapping) {
   NoHandleAllocation ha;
-
   CONVERT_CHECKED(String, s, args[0]);
-  s->TryFlattenIfNotFlat();
+  s->TryFlatten();
 
-  int input_string_length = s->length();
+  const int length = s->length();
   // Assume that the string is not empty; we need this assumption later
-  if (input_string_length == 0) return s;
-  int length = input_string_length;
+  if (length == 0) return s;
+
+  // Simpler handling of ascii strings.
+  //
+  // NOTE: This assumes that the upper/lower case of an ascii
+  // character is also ascii.  This is currently the case, but it
+  // might break in the future if we implement more context and locale
+  // dependent upper/lower conversions.
+  SeqAsciiString* seq_ascii = TryGetSeqAsciiString(s);
+  if (seq_ascii != NULL) {
+    Object* o = Heap::AllocateRawAsciiString(length);
+    if (o->IsFailure()) return o;
+    SeqAsciiString* result = SeqAsciiString::cast(o);
+    bool has_changed_character = ConvertTraits::ConvertAscii(
+        result->GetChars(), seq_ascii->GetChars(), length);
+    return has_changed_character ? result : s;
+  }
 
   Object* answer = ConvertCaseHelper(s, length, length, mapping);
   if (answer->IsSmi()) {
@@ -4091,18 +4999,20 @@
 
 
 static Object* Runtime_StringToLowerCase(Arguments args) {
-  return ConvertCase<unibrow::ToLowercase>(args, &to_lower_mapping);
+  return ConvertCase<ToLowerTraits>(args, &to_lower_mapping);
 }
 
 
 static Object* Runtime_StringToUpperCase(Arguments args) {
-  return ConvertCase<unibrow::ToUppercase>(args, &to_upper_mapping);
+  return ConvertCase<ToUpperTraits>(args, &to_upper_mapping);
 }
 
+
 static inline bool IsTrimWhiteSpace(unibrow::uchar c) {
   return unibrow::WhiteSpace::Is(c) || c == 0x200b;
 }
 
+
 static Object* Runtime_StringTrim(Arguments args) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
@@ -4111,7 +5021,7 @@
   CONVERT_BOOLEAN_CHECKED(trimLeft, args[1]);
   CONVERT_BOOLEAN_CHECKED(trimRight, args[2]);
 
-  s->TryFlattenIfNotFlat();
+  s->TryFlatten();
   int length = s->length();
 
   int left = 0;
@@ -4130,6 +5040,245 @@
   return s->SubString(left, right);
 }
 
+
+template <typename schar, typename pchar>
+void FindStringIndices(Vector<const schar> subject,
+                       Vector<const pchar> pattern,
+                       ZoneList<int>* indices,
+                       unsigned int limit) {
+  ASSERT(limit > 0);
+  // Collect indices of pattern in subject, and the end-of-string index.
+  // Stop after finding at most limit values.
+  StringSearchStrategy strategy =
+      InitializeStringSearch(pattern, sizeof(schar) == 1);
+  switch (strategy) {
+    case SEARCH_FAIL: return;
+    case SEARCH_SHORT: {
+      int pattern_length = pattern.length();
+      int index = 0;
+      while (limit > 0) {
+        index = SimpleIndexOf(subject, pattern, index);
+        if (index < 0) return;
+        indices->Add(index);
+        index += pattern_length;
+        limit--;
+      }
+      return;
+    }
+    case SEARCH_LONG: {
+      int pattern_length = pattern.length();
+      int index = 0;
+      while (limit > 0) {
+        index = ComplexIndexOf(subject, pattern, index);
+        if (index < 0) return;
+        indices->Add(index);
+        index += pattern_length;
+        limit--;
+      }
+      return;
+    }
+    default:
+      UNREACHABLE();
+      return;
+  }
+}
+
+template <typename schar>
+inline void FindCharIndices(Vector<const schar> subject,
+                            const schar pattern_char,
+                            ZoneList<int>* indices,
+                            unsigned int limit) {
+  // Collect indices of pattern_char in subject, and the end-of-string index.
+  // Stop after finding at most limit values.
+  int index = 0;
+  while (limit > 0) {
+    index = SingleCharIndexOf(subject, pattern_char, index);
+    if (index < 0) return;
+    indices->Add(index);
+    index++;
+    limit--;
+  }
+}
+
+
+static Object* Runtime_StringSplit(Arguments args) {
+  ASSERT(args.length() == 3);
+  HandleScope handle_scope;
+  CONVERT_ARG_CHECKED(String, subject, 0);
+  CONVERT_ARG_CHECKED(String, pattern, 1);
+  CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[2]);
+
+  int subject_length = subject->length();
+  int pattern_length = pattern->length();
+  RUNTIME_ASSERT(pattern_length > 0);
+
+  // The limit can be very large (0xffffffffu), but since the pattern
+  // isn't empty, we can never create more parts than ~half the length
+  // of the subject.
+
+  if (!subject->IsFlat()) FlattenString(subject);
+
+  static const int kMaxInitialListCapacity = 16;
+
+  ZoneScope scope(DELETE_ON_EXIT);
+
+  // Find (up to limit) indices of separator and end-of-string in subject
+  int initial_capacity = Min<uint32_t>(kMaxInitialListCapacity, limit);
+  ZoneList<int> indices(initial_capacity);
+  if (pattern_length == 1) {
+    // Special case, go directly to fast single-character split.
+    AssertNoAllocation nogc;
+    uc16 pattern_char = pattern->Get(0);
+    if (subject->IsTwoByteRepresentation()) {
+      FindCharIndices(subject->ToUC16Vector(), pattern_char,
+                      &indices,
+                      limit);
+    } else if (pattern_char <= String::kMaxAsciiCharCode) {
+      FindCharIndices(subject->ToAsciiVector(),
+                      static_cast<char>(pattern_char),
+                      &indices,
+                      limit);
+    }
+  } else {
+    if (!pattern->IsFlat()) FlattenString(pattern);
+    AssertNoAllocation nogc;
+    if (subject->IsAsciiRepresentation()) {
+      Vector<const char> subject_vector = subject->ToAsciiVector();
+      if (pattern->IsAsciiRepresentation()) {
+        FindStringIndices(subject_vector,
+                          pattern->ToAsciiVector(),
+                          &indices,
+                          limit);
+      } else {
+        FindStringIndices(subject_vector,
+                          pattern->ToUC16Vector(),
+                          &indices,
+                          limit);
+      }
+    } else {
+      Vector<const uc16> subject_vector = subject->ToUC16Vector();
+      if (pattern->IsAsciiRepresentation()) {
+        FindStringIndices(subject_vector,
+                          pattern->ToAsciiVector(),
+                          &indices,
+                          limit);
+      } else {
+        FindStringIndices(subject_vector,
+                          pattern->ToUC16Vector(),
+                          &indices,
+                          limit);
+      }
+    }
+  }
+  if (static_cast<uint32_t>(indices.length()) < limit) {
+    indices.Add(subject_length);
+  }
+  // The list indices now contains the end of each part to create.
+
+
+  // Create JSArray of substrings separated by separator.
+  int part_count = indices.length();
+
+  Handle<JSArray> result = Factory::NewJSArray(part_count);
+  result->set_length(Smi::FromInt(part_count));
+
+  ASSERT(result->HasFastElements());
+
+  if (part_count == 1 && indices.at(0) == subject_length) {
+    FixedArray::cast(result->elements())->set(0, *subject);
+    return *result;
+  }
+
+  Handle<FixedArray> elements(FixedArray::cast(result->elements()));
+  int part_start = 0;
+  for (int i = 0; i < part_count; i++) {
+    HandleScope local_loop_handle;
+    int part_end = indices.at(i);
+    Handle<String> substring =
+        Factory::NewSubString(subject, part_start, part_end);
+    elements->set(i, *substring);
+    part_start = part_end + pattern_length;
+  }
+
+  return *result;
+}
+
+
+// Copies ascii characters to the given fixed array looking up
+// one-char strings in the cache. Gives up on the first char that is
+// not in the cache and fills the remainder with smi zeros. Returns
+// the length of the successfully copied prefix.
+static int CopyCachedAsciiCharsToArray(const char* chars,
+                                       FixedArray* elements,
+                                       int length) {
+  AssertNoAllocation nogc;
+  FixedArray* ascii_cache = Heap::single_character_string_cache();
+  Object* undefined = Heap::undefined_value();
+  int i;
+  for (i = 0; i < length; ++i) {
+    Object* value = ascii_cache->get(chars[i]);
+    if (value == undefined) break;
+    ASSERT(!Heap::InNewSpace(value));
+    elements->set(i, value, SKIP_WRITE_BARRIER);
+  }
+  if (i < length) {
+    ASSERT(Smi::FromInt(0) == 0);
+    memset(elements->data_start() + i, 0, kPointerSize * (length - i));
+  }
+#ifdef DEBUG
+  for (int j = 0; j < length; ++j) {
+    Object* element = elements->get(j);
+    ASSERT(element == Smi::FromInt(0) ||
+           (element->IsString() && String::cast(element)->LooksValid()));
+  }
+#endif
+  return i;
+}
+
+
+// Converts a String to JSArray.
+// For example, "foo" => ["f", "o", "o"].
+static Object* Runtime_StringToArray(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(String, s, 0);
+
+  s->TryFlatten();
+  const int length = s->length();
+
+  Handle<FixedArray> elements;
+  if (s->IsFlat() && s->IsAsciiRepresentation()) {
+    Object* obj = Heap::AllocateUninitializedFixedArray(length);
+    if (obj->IsFailure()) return obj;
+    elements = Handle<FixedArray>(FixedArray::cast(obj));
+
+    Vector<const char> chars = s->ToAsciiVector();
+    // Note, this will initialize all elements (not only the prefix)
+    // to prevent GC from seeing partially initialized array.
+    int num_copied_from_cache = CopyCachedAsciiCharsToArray(chars.start(),
+                                                            *elements,
+                                                            length);
+
+    for (int i = num_copied_from_cache; i < length; ++i) {
+      elements->set(i, *LookupSingleCharacterStringFromCode(chars[i]));
+    }
+  } else {
+    elements = Factory::NewFixedArray(length);
+    for (int i = 0; i < length; ++i) {
+      elements->set(i, *LookupSingleCharacterStringFromCode(s->Get(i)));
+    }
+  }
+
+#ifdef DEBUG
+  for (int i = 0; i < length; ++i) {
+    ASSERT(String::cast(elements->get(i))->length() == 1);
+  }
+#endif
+
+  return *Factory::NewJSArrayWithElements(elements);
+}
+
+
 bool Runtime::IsUpperCaseChar(uint16_t ch) {
   unibrow::uchar chars[unibrow::ToUppercase::kMaxWidth];
   int char_length = to_upper_mapping.get(ch, 0, chars);
@@ -4148,13 +5297,27 @@
 }
 
 
+static Object* Runtime_NumberToStringSkipCache(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  Object* number = args[0];
+  RUNTIME_ASSERT(number->IsNumber());
+
+  return Heap::NumberToString(number, false);
+}
+
+
 static Object* Runtime_NumberToInteger(Arguments args) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  Object* obj = args[0];
-  if (obj->IsSmi()) return obj;
-  CONVERT_DOUBLE_CHECKED(number, obj);
+  CONVERT_DOUBLE_CHECKED(number, args[0]);
+
+  // We do not include 0 so that we don't have to treat +0 / -0 cases.
+  if (number > 0 && number <= Smi::kMaxValue) {
+    return Smi::FromInt(static_cast<int>(number));
+  }
   return Heap::NumberFromDouble(DoubleToInteger(number));
 }
 
@@ -4163,9 +5326,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  Object* obj = args[0];
-  if (obj->IsSmi() && Smi::cast(obj)->value() >= 0) return obj;
-  CONVERT_NUMBER_CHECKED(int32_t, number, Uint32, obj);
+  CONVERT_NUMBER_CHECKED(int32_t, number, Uint32, args[0]);
   return Heap::NumberFromUint32(number);
 }
 
@@ -4174,9 +5335,12 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  Object* obj = args[0];
-  if (obj->IsSmi()) return obj;
-  CONVERT_DOUBLE_CHECKED(number, obj);
+  CONVERT_DOUBLE_CHECKED(number, args[0]);
+
+  // We do not include 0 so that we don't have to treat +0 / -0 cases.
+  if (number > 0 && number <= Smi::kMaxValue) {
+    return Smi::FromInt(static_cast<int>(number));
+  }
   return Heap::NumberFromInt32(DoubleToInt32(number));
 }
 
@@ -4247,7 +5411,7 @@
 
   CONVERT_DOUBLE_CHECKED(x, args[0]);
   CONVERT_DOUBLE_CHECKED(y, args[1]);
-  return Heap::NewNumberFromDouble(x / y);
+  return Heap::NumberFromDouble(x / y);
 }
 
 
@@ -4259,8 +5423,8 @@
   CONVERT_DOUBLE_CHECKED(y, args[1]);
 
   x = modulo(x, y);
-  // NewNumberFromDouble may return a Smi instead of a Number object
-  return Heap::NewNumberFromDouble(x);
+  // NumberFromDouble may return a Smi instead of a Number object
+  return Heap::NumberFromDouble(x);
 }
 
 
@@ -4274,7 +5438,7 @@
 }
 
 
-template<typename sinkchar>
+template <typename sinkchar>
 static inline void StringBuilderConcatHelper(String* special,
                                              sinkchar* sink,
                                              FixedArray* fixed_array,
@@ -4345,33 +5509,41 @@
 
   bool ascii = special->IsAsciiRepresentation();
   int position = 0;
-  int increment = 0;
   for (int i = 0; i < array_length; i++) {
+    int increment = 0;
     Object* elt = fixed_array->get(i);
     if (elt->IsSmi()) {
       // Smi encoding of position and length.
-      int len = Smi::cast(elt)->value();
-      if (len > 0) {
+      int smi_value = Smi::cast(elt)->value();
+      int pos;
+      int len;
+      if (smi_value > 0) {
         // Position and length encoded in one smi.
-        int pos = len >> 11;
-        len &= 0x7ff;
-        if (pos + len > special_length) {
-          return Top::Throw(Heap::illegal_argument_symbol());
-        }
-        increment = len;
+        pos = StringBuilderSubstringPosition::decode(smi_value);
+        len = StringBuilderSubstringLength::decode(smi_value);
       } else {
         // Position and length encoded in two smis.
-        increment = (-len);
-        // Get the position and check that it is also a smi.
+        len = -smi_value;
+        // Get the position and check that it is a positive smi.
         i++;
         if (i >= array_length) {
           return Top::Throw(Heap::illegal_argument_symbol());
         }
-        Object* pos = fixed_array->get(i);
-        if (!pos->IsSmi()) {
+        Object* next_smi = fixed_array->get(i);
+        if (!next_smi->IsSmi()) {
+          return Top::Throw(Heap::illegal_argument_symbol());
+        }
+        pos = Smi::cast(next_smi)->value();
+        if (pos < 0) {
           return Top::Throw(Heap::illegal_argument_symbol());
         }
       }
+      ASSERT(pos >= 0);
+      ASSERT(len >= 0);
+      if (pos > special_length || len > special_length - pos) {
+        return Top::Throw(Heap::illegal_argument_symbol());
+      }
+      increment = len;
     } else if (elt->IsString()) {
       String* element = String::cast(elt);
       int element_length = element->length();
@@ -4593,6 +5765,66 @@
 }
 
 
+static Object* StringInputBufferCompare(String* x, String* y) {
+  static StringInputBuffer bufx;
+  static StringInputBuffer bufy;
+  bufx.Reset(x);
+  bufy.Reset(y);
+  while (bufx.has_more() && bufy.has_more()) {
+    int d = bufx.GetNext() - bufy.GetNext();
+    if (d < 0) return Smi::FromInt(LESS);
+    else if (d > 0) return Smi::FromInt(GREATER);
+  }
+
+  // x is (non-trivial) prefix of y:
+  if (bufy.has_more()) return Smi::FromInt(LESS);
+  // y is prefix of x:
+  return Smi::FromInt(bufx.has_more() ? GREATER : EQUAL);
+}
+
+
+static Object* FlatStringCompare(String* x, String* y) {
+  ASSERT(x->IsFlat());
+  ASSERT(y->IsFlat());
+  Object* equal_prefix_result = Smi::FromInt(EQUAL);
+  int prefix_length = x->length();
+  if (y->length() < prefix_length) {
+    prefix_length = y->length();
+    equal_prefix_result = Smi::FromInt(GREATER);
+  } else if (y->length() > prefix_length) {
+    equal_prefix_result = Smi::FromInt(LESS);
+  }
+  int r;
+  if (x->IsAsciiRepresentation()) {
+    Vector<const char> x_chars = x->ToAsciiVector();
+    if (y->IsAsciiRepresentation()) {
+      Vector<const char> y_chars = y->ToAsciiVector();
+      r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
+    } else {
+      Vector<const uc16> y_chars = y->ToUC16Vector();
+      r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
+    }
+  } else {
+    Vector<const uc16> x_chars = x->ToUC16Vector();
+    if (y->IsAsciiRepresentation()) {
+      Vector<const char> y_chars = y->ToAsciiVector();
+      r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
+    } else {
+      Vector<const uc16> y_chars = y->ToUC16Vector();
+      r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
+    }
+  }
+  Object* result;
+  if (r == 0) {
+    result = equal_prefix_result;
+  } else {
+    result = (r < 0) ? Smi::FromInt(LESS) : Smi::FromInt(GREATER);
+  }
+  ASSERT(result == StringInputBufferCompare(x, y));
+  return result;
+}
+
+
 static Object* Runtime_StringCompare(Arguments args) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
@@ -4615,33 +5847,13 @@
   if (d < 0) return Smi::FromInt(LESS);
   else if (d > 0) return Smi::FromInt(GREATER);
 
-  x->TryFlattenIfNotFlat();
-  y->TryFlattenIfNotFlat();
+  Object* obj = Heap::PrepareForCompare(x);
+  if (obj->IsFailure()) return obj;
+  obj = Heap::PrepareForCompare(y);
+  if (obj->IsFailure()) return obj;
 
-  static StringInputBuffer bufx;
-  static StringInputBuffer bufy;
-  bufx.Reset(x);
-  bufy.Reset(y);
-  while (bufx.has_more() && bufy.has_more()) {
-    int d = bufx.GetNext() - bufy.GetNext();
-    if (d < 0) return Smi::FromInt(LESS);
-    else if (d > 0) return Smi::FromInt(GREATER);
-  }
-
-  // x is (non-trivial) prefix of y:
-  if (bufy.has_more()) return Smi::FromInt(LESS);
-  // y is prefix of x:
-  return Smi::FromInt(bufx.has_more() ? GREATER : EQUAL);
-}
-
-
-static Object* Runtime_Math_abs(Arguments args) {
-  NoHandleAllocation ha;
-  ASSERT(args.length() == 1);
-  Counters::math_abs.Increment();
-
-  CONVERT_DOUBLE_CHECKED(x, args[0]);
-  return Heap::AllocateHeapNumber(fabs(x));
+  return (x->IsFlat() && y->IsFlat()) ? FlatStringCompare(x, y)
+                                      : StringInputBufferCompare(x, y);
 }
 
 
@@ -4818,17 +6030,56 @@
   }
 }
 
+// Fast version of Math.pow if we know that y is not an integer and
+// y is not -0.5 or 0.5. Used as slowcase from codegen.
+static Object* Runtime_Math_pow_cfunction(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+  CONVERT_DOUBLE_CHECKED(x, args[0]);
+  CONVERT_DOUBLE_CHECKED(y, args[1]);
+  if (y == 0) {
+      return Smi::FromInt(1);
+  } else if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
+      return Heap::nan_value();
+  } else {
+      return Heap::AllocateHeapNumber(pow(x, y));
+  }
+}
 
-static Object* Runtime_Math_round(Arguments args) {
+
+static Object* Runtime_RoundNumber(Arguments args) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   Counters::math_round.Increment();
 
-  CONVERT_DOUBLE_CHECKED(x, args[0]);
-  if (signbit(x) && x >= -0.5) return Heap::minus_zero_value();
-  double integer = ceil(x);
-  if (integer - x > 0.5) { integer -= 1.0; }
-  return Heap::NumberFromDouble(integer);
+  if (!args[0]->IsHeapNumber()) {
+    // Must be smi. Return the argument unchanged for all the other types
+    // to make fuzz-natives test happy.
+    return args[0];
+  }
+
+  HeapNumber* number = reinterpret_cast<HeapNumber*>(args[0]);
+
+  double value = number->value();
+  int exponent = number->get_exponent();
+  int sign = number->get_sign();
+
+  // We compare with kSmiValueSize - 3 because (2^30 - 0.1) has exponent 29 and
+  // should be rounded to 2^30, which is not smi.
+  if (!sign && exponent <= kSmiValueSize - 3) {
+    return Smi::FromInt(static_cast<int>(value + 0.5));
+  }
+
+  // If the magnitude is big enough, there's no place for fraction part. If we
+  // try to add 0.5 to this number, 1.0 will be added instead.
+  if (exponent >= 52) {
+    return number;
+  }
+
+  if (sign && value >= -0.5) return Heap::minus_zero_value();
+
+  // Do not call NumberFromDouble() to avoid extra checks.
+  return Heap::AllocateHeapNumber(floor(value + 0.5));
 }
 
 
@@ -4862,6 +6113,368 @@
 }
 
 
+static int MakeDay(int year, int month, int day) {
+  static const int day_from_month[] = {0, 31, 59, 90, 120, 151,
+                                       181, 212, 243, 273, 304, 334};
+  static const int day_from_month_leap[] = {0, 31, 60, 91, 121, 152,
+                                            182, 213, 244, 274, 305, 335};
+
+  year += month / 12;
+  month %= 12;
+  if (month < 0) {
+    year--;
+    month += 12;
+  }
+
+  ASSERT(month >= 0);
+  ASSERT(month < 12);
+
+  // year_delta is an arbitrary number such that:
+  // a) year_delta = -1 (mod 400)
+  // b) year + year_delta > 0 for years in the range defined by
+  //    ECMA 262 - 15.9.1.1, i.e. upto 100,000,000 days on either side of
+  //    Jan 1 1970. This is required so that we don't run into integer
+  //    division of negative numbers.
+  // c) there shouldn't be an overflow for 32-bit integers in the following
+  //    operations.
+  static const int year_delta = 399999;
+  static const int base_day = 365 * (1970 + year_delta) +
+                              (1970 + year_delta) / 4 -
+                              (1970 + year_delta) / 100 +
+                              (1970 + year_delta) / 400;
+
+  int year1 = year + year_delta;
+  int day_from_year = 365 * year1 +
+                      year1 / 4 -
+                      year1 / 100 +
+                      year1 / 400 -
+                      base_day;
+
+  if (year % 4 || (year % 100 == 0 && year % 400 != 0)) {
+    return day_from_year + day_from_month[month] + day - 1;
+  }
+
+  return day_from_year + day_from_month_leap[month] + day - 1;
+}
+
+
+static Object* Runtime_DateMakeDay(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 3);
+
+  CONVERT_SMI_CHECKED(year, args[0]);
+  CONVERT_SMI_CHECKED(month, args[1]);
+  CONVERT_SMI_CHECKED(date, args[2]);
+
+  return Smi::FromInt(MakeDay(year, month, date));
+}
+
+
+static const int kDays4Years[] = {0, 365, 2 * 365, 3 * 365 + 1};
+static const int kDaysIn4Years = 4 * 365 + 1;
+static const int kDaysIn100Years = 25 * kDaysIn4Years - 1;
+static const int kDaysIn400Years = 4 * kDaysIn100Years + 1;
+static const int kDays1970to2000 = 30 * 365 + 7;
+static const int kDaysOffset = 1000 * kDaysIn400Years + 5 * kDaysIn400Years -
+                               kDays1970to2000;
+static const int kYearsOffset = 400000;
+
+static const char kDayInYear[] = {
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31};
+
+static const char kMonthInYear[] = {
+      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+      0, 0, 0, 0, 0, 0,
+      1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+      1, 1, 1,
+      2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+      2, 2, 2, 2, 2, 2,
+      3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+      3, 3, 3, 3, 3,
+      4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+      4, 4, 4, 4, 4, 4,
+      5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+      5, 5, 5, 5, 5,
+      6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+      6, 6, 6, 6, 6, 6,
+      7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+      7, 7, 7, 7, 7, 7,
+      8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+      8, 8, 8, 8, 8,
+      9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+      9, 9, 9, 9, 9, 9,
+      10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+      10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+      11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+      11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+
+      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+      0, 0, 0, 0, 0, 0,
+      1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+      1, 1, 1,
+      2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+      2, 2, 2, 2, 2, 2,
+      3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+      3, 3, 3, 3, 3,
+      4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+      4, 4, 4, 4, 4, 4,
+      5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+      5, 5, 5, 5, 5,
+      6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+      6, 6, 6, 6, 6, 6,
+      7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+      7, 7, 7, 7, 7, 7,
+      8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+      8, 8, 8, 8, 8,
+      9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+      9, 9, 9, 9, 9, 9,
+      10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+      10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+      11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+      11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+
+      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+      0, 0, 0, 0, 0, 0,
+      1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+      1, 1, 1, 1,
+      2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+      2, 2, 2, 2, 2, 2,
+      3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+      3, 3, 3, 3, 3,
+      4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+      4, 4, 4, 4, 4, 4,
+      5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+      5, 5, 5, 5, 5,
+      6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+      6, 6, 6, 6, 6, 6,
+      7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+      7, 7, 7, 7, 7, 7,
+      8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+      8, 8, 8, 8, 8,
+      9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+      9, 9, 9, 9, 9, 9,
+      10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+      10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+      11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+      11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+
+      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+      0, 0, 0, 0, 0, 0,
+      1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+      1, 1, 1,
+      2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+      2, 2, 2, 2, 2, 2,
+      3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+      3, 3, 3, 3, 3,
+      4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+      4, 4, 4, 4, 4, 4,
+      5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+      5, 5, 5, 5, 5,
+      6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+      6, 6, 6, 6, 6, 6,
+      7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+      7, 7, 7, 7, 7, 7,
+      8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+      8, 8, 8, 8, 8,
+      9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+      9, 9, 9, 9, 9, 9,
+      10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+      10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+      11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+      11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11};
+
+
+// This function works for dates from 1970 to 2099.
+static inline void DateYMDFromTimeAfter1970(int date,
+                                            int& year, int& month, int& day) {
+#ifdef DEBUG
+  int save_date = date;  // Need this for ASSERT in the end.
+#endif
+
+  year = 1970 + (4 * date + 2) / kDaysIn4Years;
+  date %= kDaysIn4Years;
+
+  month = kMonthInYear[date];
+  day = kDayInYear[date];
+
+  ASSERT(MakeDay(year, month, day) == save_date);
+}
+
+
+static inline void DateYMDFromTimeSlow(int date,
+                                       int& year, int& month, int& day) {
+#ifdef DEBUG
+  int save_date = date;  // Need this for ASSERT in the end.
+#endif
+
+  date += kDaysOffset;
+  year = 400 * (date / kDaysIn400Years) - kYearsOffset;
+  date %= kDaysIn400Years;
+
+  ASSERT(MakeDay(year, 0, 1) + date == save_date);
+
+  date--;
+  int yd1 = date / kDaysIn100Years;
+  date %= kDaysIn100Years;
+  year += 100 * yd1;
+
+  date++;
+  int yd2 = date / kDaysIn4Years;
+  date %= kDaysIn4Years;
+  year += 4 * yd2;
+
+  date--;
+  int yd3 = date / 365;
+  date %= 365;
+  year += yd3;
+
+  bool is_leap = (!yd1 || yd2) && !yd3;
+
+  ASSERT(date >= -1);
+  ASSERT(is_leap || (date >= 0));
+  ASSERT((date < 365) || (is_leap && (date < 366)));
+  ASSERT(is_leap == ((year % 4 == 0) && (year % 100 || (year % 400 == 0))));
+  ASSERT(is_leap || ((MakeDay(year, 0, 1) + date) == save_date));
+  ASSERT(!is_leap || ((MakeDay(year, 0, 1) + date + 1) == save_date));
+
+  if (is_leap) {
+    day = kDayInYear[2*365 + 1 + date];
+    month = kMonthInYear[2*365 + 1 + date];
+  } else {
+    day = kDayInYear[date];
+    month = kMonthInYear[date];
+  }
+
+  ASSERT(MakeDay(year, month, day) == save_date);
+}
+
+
+static inline void DateYMDFromTime(int date,
+                                   int& year, int& month, int& day) {
+  if (date >= 0 && date < 32 * kDaysIn4Years) {
+    DateYMDFromTimeAfter1970(date, year, month, day);
+  } else {
+    DateYMDFromTimeSlow(date, year, month, day);
+  }
+}
+
+
+static Object* Runtime_DateYMDFromTime(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_DOUBLE_CHECKED(t, args[0]);
+  CONVERT_CHECKED(JSArray, res_array, args[1]);
+
+  int year, month, day;
+  DateYMDFromTime(static_cast<int>(floor(t / 86400000)), year, month, day);
+
+  res_array->SetElement(0, Smi::FromInt(year));
+  res_array->SetElement(1, Smi::FromInt(month));
+  res_array->SetElement(2, Smi::FromInt(day));
+
+  return Heap::undefined_value();
+}
+
+
 static Object* Runtime_NewArgumentsFast(Arguments args) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
@@ -4897,13 +6510,13 @@
   HandleScope scope;
   ASSERT(args.length() == 2);
   CONVERT_ARG_CHECKED(Context, context, 0);
-  CONVERT_ARG_CHECKED(JSFunction, boilerplate, 1);
+  CONVERT_ARG_CHECKED(SharedFunctionInfo, shared, 1);
 
   PretenureFlag pretenure = (context->global_context() == *context)
       ? TENURED       // Allocate global closures in old space.
       : NOT_TENURED;  // Allocate local closures in new space.
   Handle<JSFunction> result =
-      Factory::NewFunctionFromBoilerplate(boilerplate, context, pretenure);
+      Factory::NewFunctionFromSharedFunctionInfo(shared, context, pretenure);
   return *result;
 }
 
@@ -4941,6 +6554,16 @@
   }
 
   Handle<JSFunction> function = Handle<JSFunction>::cast(constructor);
+
+  // If function should not have prototype, construction is not allowed. In this
+  // case generated code bailouts here, since function has no initial_map.
+  if (!function->should_have_prototype()) {
+    Vector< Handle<Object> > arguments = HandleVector(&constructor, 1);
+    Handle<Object> type_error =
+        Factory::NewTypeError("not_constructor", arguments);
+    return Top::Throw(*type_error);
+  }
+
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Handle stepping into constructors if step into is active.
   if (Debug::StepInActive()) {
@@ -5463,6 +7086,7 @@
   }
   args[0]->Print();
   if (args[0]->IsHeapObject()) {
+    PrintF("\n");
     HeapObject::cast(args[0])->map()->Print();
   }
 #else
@@ -5554,21 +7178,6 @@
 }
 
 
-static Object* Runtime_NumberIsFinite(Arguments args) {
-  NoHandleAllocation ha;
-  ASSERT(args.length() == 1);
-
-  CONVERT_DOUBLE_CHECKED(value, args[0]);
-  Object* result;
-  if (isnan(value) || (fpclassify(value) == FP_INFINITE)) {
-    result = Heap::false_value();
-  } else {
-    result = Heap::true_value();
-  }
-  return result;
-}
-
-
 static Object* Runtime_GlobalReceiver(Arguments args) {
   ASSERT(args.length() == 1);
   Object* global = args[0];
@@ -5587,13 +7196,13 @@
   Handle<Context> context(Top::context()->global_context());
   Compiler::ValidationState validate = (is_json->IsTrue())
     ? Compiler::VALIDATE_JSON : Compiler::DONT_VALIDATE_JSON;
-  Handle<JSFunction> boilerplate = Compiler::CompileEval(source,
-                                                         context,
-                                                         true,
-                                                         validate);
-  if (boilerplate.is_null()) return Failure::Exception();
+  Handle<SharedFunctionInfo> shared = Compiler::CompileEval(source,
+                                                            context,
+                                                            true,
+                                                            validate);
+  if (shared.is_null()) return Failure::Exception();
   Handle<JSFunction> fun =
-      Factory::NewFunctionFromBoilerplate(boilerplate, context, NOT_TENURED);
+      Factory::NewFunctionFromSharedFunctionInfo(shared, context, NOT_TENURED);
   return *fun;
 }
 
@@ -5666,14 +7275,14 @@
   // Deal with a normal eval call with a string argument. Compile it
   // and return the compiled function bound in the local context.
   Handle<String> source = args.at<String>(1);
-  Handle<JSFunction> boilerplate = Compiler::CompileEval(
+  Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
       source,
       Handle<Context>(Top::context()),
       Top::context()->IsGlobalContext(),
       Compiler::DONT_VALIDATE_JSON);
-  if (boilerplate.is_null()) return MakePair(Failure::Exception(), NULL);
-  callee = Factory::NewFunctionFromBoilerplate(
-      boilerplate,
+  if (shared.is_null()) return MakePair(Failure::Exception(), NULL);
+  callee = Factory::NewFunctionFromSharedFunctionInfo(
+      shared,
       Handle<Context>(Top::context()),
       NOT_TENURED);
   return MakePair(*callee, args[2]);
@@ -6163,6 +7772,32 @@
 }
 
 
+static Object* Runtime_SwapElements(Arguments args) {
+  HandleScope handle_scope;
+
+  ASSERT_EQ(3, args.length());
+
+  CONVERT_ARG_CHECKED(JSObject, object, 0);
+  Handle<Object> key1 = args.at<Object>(1);
+  Handle<Object> key2 = args.at<Object>(2);
+
+  uint32_t index1, index2;
+  if (!Array::IndexFromObject(*key1, &index1)
+      || !Array::IndexFromObject(*key2, &index2)) {
+    return Top::ThrowIllegalOperation();
+  }
+
+  Handle<JSObject> jsobject = Handle<JSObject>::cast(object);
+  Handle<Object> tmp1 = GetElement(jsobject, index1);
+  Handle<Object> tmp2 = GetElement(jsobject, index2);
+
+  SetElement(jsobject, index1, tmp2);
+  SetElement(jsobject, index2, tmp1);
+
+  return Heap::undefined_value();
+}
+
+
 // Returns an array that tells you where in the [0, length) interval an array
 // might have elements.  Can either return keys or intervals.  Keys can have
 // gaps in (undefined).  Intervals can also span over some undefined keys.
@@ -7655,14 +9290,14 @@
   Handle<String> function_source =
       Factory::NewStringFromAscii(Vector<const char>(source_str,
                                                      source_str_length));
-  Handle<JSFunction> boilerplate =
+  Handle<SharedFunctionInfo> shared =
       Compiler::CompileEval(function_source,
                             context,
                             context->IsGlobalContext(),
                             Compiler::DONT_VALIDATE_JSON);
-  if (boilerplate.is_null()) return Failure::Exception();
+  if (shared.is_null()) return Failure::Exception();
   Handle<JSFunction> compiled_function =
-      Factory::NewFunctionFromBoilerplate(boilerplate, context);
+      Factory::NewFunctionFromSharedFunctionInfo(shared, context);
 
   // Invoke the result of the compilation to get the evaluation function.
   bool has_pending_exception;
@@ -7723,15 +9358,15 @@
   Handle<Context> context = Top::global_context();
 
   // Compile the source to be evaluated.
-  Handle<JSFunction> boilerplate =
-      Handle<JSFunction>(Compiler::CompileEval(source,
-                                               context,
-                                               true,
-                                               Compiler::DONT_VALIDATE_JSON));
-  if (boilerplate.is_null()) return Failure::Exception();
+  Handle<SharedFunctionInfo> shared =
+      Compiler::CompileEval(source,
+                            context,
+                            true,
+                            Compiler::DONT_VALIDATE_JSON);
+  if (shared.is_null()) return Failure::Exception();
   Handle<JSFunction> compiled_function =
-      Handle<JSFunction>(Factory::NewFunctionFromBoilerplate(boilerplate,
-                                                             context));
+      Handle<JSFunction>(Factory::NewFunctionFromSharedFunctionInfo(shared,
+                                                                    context));
 
   // Invoke the result of the compilation to get the evaluation function.
   bool has_pending_exception;
@@ -8011,6 +9646,261 @@
   return f->shared()->inferred_name();
 }
 
+
+static int FindSharedFunctionInfosForScript(Script* script,
+                                     FixedArray* buffer) {
+  AssertNoAllocation no_allocations;
+
+  int counter = 0;
+  int buffer_size = buffer->length();
+  HeapIterator iterator;
+  for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+    ASSERT(obj != NULL);
+    if (!obj->IsSharedFunctionInfo()) {
+      continue;
+    }
+    SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
+    if (shared->script() != script) {
+      continue;
+    }
+    if (counter < buffer_size) {
+      buffer->set(counter, shared);
+    }
+    counter++;
+  }
+  return counter;
+}
+
+// For a script finds all SharedFunctionInfo's in the heap that points
+// to this script. Returns JSArray of SharedFunctionInfo wrapped
+// in OpaqueReferences.
+static Object* Runtime_LiveEditFindSharedFunctionInfosForScript(
+    Arguments args) {
+  ASSERT(args.length() == 1);
+  HandleScope scope;
+  CONVERT_CHECKED(JSValue, script_value, args[0]);
+
+  Handle<Script> script = Handle<Script>(Script::cast(script_value->value()));
+
+  const int kBufferSize = 32;
+
+  Handle<FixedArray> array;
+  array = Factory::NewFixedArray(kBufferSize);
+  int number = FindSharedFunctionInfosForScript(*script, *array);
+  if (number > kBufferSize) {
+    array = Factory::NewFixedArray(number);
+    FindSharedFunctionInfosForScript(*script, *array);
+  }
+
+  Handle<JSArray> result = Factory::NewJSArrayWithElements(array);
+  result->set_length(Smi::FromInt(number));
+
+  LiveEdit::WrapSharedFunctionInfos(result);
+
+  return *result;
+}
+
+// For a script calculates compilation information about all its functions.
+// The script source is explicitly specified by the second argument.
+// The source of the actual script is not used, however it is important that
+// all generated code keeps references to this particular instance of script.
+// Returns a JSArray of compilation infos. The array is ordered so that
+// each function with all its descendant is always stored in a continues range
+// with the function itself going first. The root function is a script function.
+static Object* Runtime_LiveEditGatherCompileInfo(Arguments args) {
+  ASSERT(args.length() == 2);
+  HandleScope scope;
+  CONVERT_CHECKED(JSValue, script, args[0]);
+  CONVERT_ARG_CHECKED(String, source, 1);
+  Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
+
+  JSArray* result =  LiveEdit::GatherCompileInfo(script_handle, source);
+
+  if (Top::has_pending_exception()) {
+    return Failure::Exception();
+  }
+
+  return result;
+}
+
+// Changes the source of the script to a new_source.
+// If old_script_name is provided (i.e. is a String), also creates a copy of
+// the script with its original source and sends notification to debugger.
+static Object* Runtime_LiveEditReplaceScript(Arguments args) {
+  ASSERT(args.length() == 3);
+  HandleScope scope;
+  CONVERT_CHECKED(JSValue, original_script_value, args[0]);
+  CONVERT_ARG_CHECKED(String, new_source, 1);
+  Handle<Object> old_script_name(args[2]);
+
+  CONVERT_CHECKED(Script, original_script_pointer,
+                  original_script_value->value());
+  Handle<Script> original_script(original_script_pointer);
+
+  Object* old_script = LiveEdit::ChangeScriptSource(original_script,
+                                                    new_source,
+                                                    old_script_name);
+
+  if (old_script->IsScript()) {
+    Handle<Script> script_handle(Script::cast(old_script));
+    return *(GetScriptWrapper(script_handle));
+  } else {
+    return Heap::null_value();
+  }
+}
+
+// Replaces code of SharedFunctionInfo with a new one.
+static Object* Runtime_LiveEditReplaceFunctionCode(Arguments args) {
+  ASSERT(args.length() == 2);
+  HandleScope scope;
+  CONVERT_ARG_CHECKED(JSArray, new_compile_info, 0);
+  CONVERT_ARG_CHECKED(JSArray, shared_info, 1);
+
+  return LiveEdit::ReplaceFunctionCode(new_compile_info, shared_info);
+}
+
+// Connects SharedFunctionInfo to another script.
+static Object* Runtime_LiveEditFunctionSetScript(Arguments args) {
+  ASSERT(args.length() == 2);
+  HandleScope scope;
+  Handle<Object> function_object(args[0]);
+  Handle<Object> script_object(args[1]);
+
+  if (function_object->IsJSValue()) {
+    Handle<JSValue> function_wrapper = Handle<JSValue>::cast(function_object);
+    if (script_object->IsJSValue()) {
+      CONVERT_CHECKED(Script, script, JSValue::cast(*script_object)->value());
+      script_object = Handle<Object>(script);
+    }
+
+    LiveEdit::SetFunctionScript(function_wrapper, script_object);
+  } else {
+    // Just ignore this. We may not have a SharedFunctionInfo for some functions
+    // and we check it in this function.
+  }
+
+  return Heap::undefined_value();
+}
+
+
+// In a code of a parent function replaces original function as embedded object
+// with a substitution one.
+static Object* Runtime_LiveEditReplaceRefToNestedFunction(Arguments args) {
+  ASSERT(args.length() == 3);
+  HandleScope scope;
+
+  CONVERT_ARG_CHECKED(JSValue, parent_wrapper, 0);
+  CONVERT_ARG_CHECKED(JSValue, orig_wrapper, 1);
+  CONVERT_ARG_CHECKED(JSValue, subst_wrapper, 2);
+
+  LiveEdit::ReplaceRefToNestedFunction(parent_wrapper, orig_wrapper,
+                                       subst_wrapper);
+
+  return Heap::undefined_value();
+}
+
+
+// Updates positions of a shared function info (first parameter) according
+// to script source change. Text change is described in second parameter as
+// array of groups of 3 numbers:
+// (change_begin, change_end, change_end_new_position).
+// Each group describes a change in text; groups are sorted by change_begin.
+static Object* Runtime_LiveEditPatchFunctionPositions(Arguments args) {
+  ASSERT(args.length() == 2);
+  HandleScope scope;
+  CONVERT_ARG_CHECKED(JSArray, shared_array, 0);
+  CONVERT_ARG_CHECKED(JSArray, position_change_array, 1);
+
+  return LiveEdit::PatchFunctionPositions(shared_array, position_change_array);
+}
+
+
+// For array of SharedFunctionInfo's (each wrapped in JSValue)
+// checks that none of them have activations on stacks (of any thread).
+// Returns array of the same length with corresponding results of
+// LiveEdit::FunctionPatchabilityStatus type.
+static Object* Runtime_LiveEditCheckAndDropActivations(Arguments args) {
+  ASSERT(args.length() == 2);
+  HandleScope scope;
+  CONVERT_ARG_CHECKED(JSArray, shared_array, 0);
+  CONVERT_BOOLEAN_CHECKED(do_drop, args[1]);
+
+  return *LiveEdit::CheckAndDropActivations(shared_array, do_drop);
+}
+
+// Compares 2 strings line-by-line and returns diff in form of JSArray of
+// triplets (pos1, pos1_end, pos2_end) describing list of diff chunks.
+static Object* Runtime_LiveEditCompareStringsLinewise(Arguments args) {
+  ASSERT(args.length() == 2);
+  HandleScope scope;
+  CONVERT_ARG_CHECKED(String, s1, 0);
+  CONVERT_ARG_CHECKED(String, s2, 1);
+
+  return *LiveEdit::CompareStringsLinewise(s1, s2);
+}
+
+
+
+// A testing entry. Returns statement position which is the closest to
+// source_position.
+static Object* Runtime_GetFunctionCodePositionFromSource(Arguments args) {
+  ASSERT(args.length() == 2);
+  HandleScope scope;
+  CONVERT_ARG_CHECKED(JSFunction, function, 0);
+  CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
+
+  Handle<Code> code(function->code());
+
+  RelocIterator it(*code, 1 << RelocInfo::STATEMENT_POSITION);
+  int closest_pc = 0;
+  int distance = kMaxInt;
+  while (!it.done()) {
+    int statement_position = static_cast<int>(it.rinfo()->data());
+    // Check if this break point is closer that what was previously found.
+    if (source_position <= statement_position &&
+        statement_position - source_position < distance) {
+      closest_pc =
+          static_cast<int>(it.rinfo()->pc() - code->instruction_start());
+      distance = statement_position - source_position;
+      // Check whether we can't get any closer.
+      if (distance == 0) break;
+    }
+    it.next();
+  }
+
+  return Smi::FromInt(closest_pc);
+}
+
+
+// Calls specified function with or without entering the debugger.
+// This is used in unit tests to run code as if debugger is entered or simply
+// to have a stack with C++ frame in the middle.
+static Object* Runtime_ExecuteInDebugContext(Arguments args) {
+  ASSERT(args.length() == 2);
+  HandleScope scope;
+  CONVERT_ARG_CHECKED(JSFunction, function, 0);
+  CONVERT_BOOLEAN_CHECKED(without_debugger, args[1]);
+
+  Handle<Object> result;
+  bool pending_exception;
+  {
+    if (without_debugger) {
+      result = Execution::Call(function, Top::global(), 0, NULL,
+                               &pending_exception);
+    } else {
+      EnterDebugger enter_debugger;
+      result = Execution::Call(function, Top::global(), 0, NULL,
+                               &pending_exception);
+    }
+  }
+  if (!pending_exception) {
+    return *result;
+  } else {
+    return Failure::Exception();
+  }
+}
+
+
 #endif  // ENABLE_DEBUGGER_SUPPORT
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
@@ -8197,6 +10087,91 @@
 }
 
 
+static Object* CacheMiss(FixedArray* cache_obj, int index, Object* key_obj) {
+  ASSERT(index % 2 == 0);  // index of the key
+  ASSERT(index >= JSFunctionResultCache::kEntriesIndex);
+  ASSERT(index < cache_obj->length());
+
+  HandleScope scope;
+
+  Handle<FixedArray> cache(cache_obj);
+  Handle<Object> key(key_obj);
+  Handle<JSFunction> factory(JSFunction::cast(
+        cache->get(JSFunctionResultCache::kFactoryIndex)));
+  // TODO(antonm): consider passing a receiver when constructing a cache.
+  Handle<Object> receiver(Top::global_context()->global());
+
+  Handle<Object> value;
+  {
+    // This handle is nor shared, nor used later, so it's safe.
+    Object** argv[] = { key.location() };
+    bool pending_exception = false;
+    value = Execution::Call(factory,
+                            receiver,
+                            1,
+                            argv,
+                            &pending_exception);
+    if (pending_exception) return Failure::Exception();
+  }
+
+  cache->set(index, *key);
+  cache->set(index + 1, *value);
+  cache->set(JSFunctionResultCache::kFingerIndex, Smi::FromInt(index));
+
+  return *value;
+}
+
+
+static Object* Runtime_GetFromCache(Arguments args) {
+  // This is only called from codegen, so checks might be more lax.
+  CONVERT_CHECKED(FixedArray, cache, args[0]);
+  Object* key = args[1];
+
+  const int finger_index =
+      Smi::cast(cache->get(JSFunctionResultCache::kFingerIndex))->value();
+
+  Object* o = cache->get(finger_index);
+  if (o == key) {
+    // The fastest case: hit the same place again.
+    return cache->get(finger_index + 1);
+  }
+
+  for (int i = finger_index - 2;
+       i >= JSFunctionResultCache::kEntriesIndex;
+       i -= 2) {
+    o = cache->get(i);
+    if (o == key) {
+      cache->set(JSFunctionResultCache::kFingerIndex, Smi::FromInt(i));
+      return cache->get(i + 1);
+    }
+  }
+
+  const int size =
+      Smi::cast(cache->get(JSFunctionResultCache::kCacheSizeIndex))->value();
+  ASSERT(size <= cache->length());
+
+  for (int i = size - 2; i > finger_index; i -= 2) {
+    o = cache->get(i);
+    if (o == key) {
+      cache->set(JSFunctionResultCache::kFingerIndex, Smi::FromInt(i));
+      return cache->get(i + 1);
+    }
+  }
+
+  // Cache miss.  If we have spare room, put new data into it, otherwise
+  // evict post finger entry which must be least recently used.
+  if (size < cache->length()) {
+    cache->set(JSFunctionResultCache::kCacheSizeIndex, Smi::FromInt(size + 2));
+    return CacheMiss(cache, size, key);
+  } else {
+    int target_index = finger_index + JSFunctionResultCache::kEntrySize;
+    if (target_index == cache->length()) {
+      target_index = JSFunctionResultCache::kEntriesIndex;
+    }
+    return CacheMiss(cache, target_index, key);
+  }
+}
+
 #ifdef DEBUG
 // ListNatives is ONLY used by the fuzz-natives.js in debug mode
 // Exclude the code in release mode.
@@ -8205,18 +10180,28 @@
   HandleScope scope;
   Handle<JSArray> result = Factory::NewJSArray(0);
   int index = 0;
+  bool inline_runtime_functions = false;
 #define ADD_ENTRY(Name, argc, ressize)                                       \
   {                                                                          \
     HandleScope inner;                                                       \
-    Handle<String> name =                                                    \
-      Factory::NewStringFromAscii(                                           \
-          Vector<const char>(#Name, StrLength(#Name)));       \
+    Handle<String> name;                                                     \
+    /* Inline runtime functions have an underscore in front of the name. */  \
+    if (inline_runtime_functions) {                                          \
+      name = Factory::NewStringFromAscii(                                    \
+          Vector<const char>("_" #Name, StrLength("_" #Name)));              \
+    } else {                                                                 \
+      name = Factory::NewStringFromAscii(                                    \
+          Vector<const char>(#Name, StrLength(#Name)));                      \
+    }                                                                        \
     Handle<JSArray> pair = Factory::NewJSArray(0);                           \
     SetElement(pair, 0, name);                                               \
     SetElement(pair, 1, Handle<Smi>(Smi::FromInt(argc)));                    \
     SetElement(result, index++, pair);                                       \
   }
+  inline_runtime_functions = false;
   RUNTIME_FUNCTION_LIST(ADD_ENTRY)
+  inline_runtime_functions = true;
+  INLINE_RUNTIME_FUNCTION_LIST(ADD_ENTRY)
 #undef ADD_ENTRY
   return *result;
 }
diff --git a/src/runtime.h b/src/runtime.h
index e2e5c22..a7f0bf3 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -60,6 +60,9 @@
   F(GetArgumentsProperty, 1, 1) \
   F(ToFastProperties, 1, 1) \
   F(ToSlowProperties, 1, 1) \
+  F(FinishArrayPrototypeSetup, 1, 1) \
+  F(SpecialArrayFunctions, 1, 1) \
+  F(GetGlobalReceiver, 0, 1) \
   \
   F(IsInPrototypeChain, 2, 1) \
   F(SetHiddenPrototype, 2, 1) \
@@ -91,11 +94,13 @@
   F(StringParseFloat, 1, 1) \
   F(StringToLowerCase, 1, 1) \
   F(StringToUpperCase, 1, 1) \
+  F(StringSplit, 3, 1) \
   F(CharFromCode, 1, 1) \
   F(URIEscape, 1, 1) \
   F(URIUnescape, 1, 1) \
   \
   F(NumberToString, 1, 1) \
+  F(NumberToStringSkipCache, 1, 1) \
   F(NumberToInteger, 1, 1) \
   F(NumberToJSUint32, 1, 1) \
   F(NumberToJSInt32, 1, 1) \
@@ -131,7 +136,6 @@
   F(StringCompare, 2, 1) \
   \
   /* Math */ \
-  F(Math_abs, 1, 1) \
   F(Math_acos, 1, 1) \
   F(Math_asin, 1, 1) \
   F(Math_atan, 1, 1) \
@@ -142,7 +146,8 @@
   F(Math_floor, 1, 1) \
   F(Math_log, 1, 1) \
   F(Math_pow, 2, 1) \
-  F(Math_round, 1, 1) \
+  F(Math_pow_cfunction, 2, 1) \
+  F(RoundNumber, 1, 1) \
   F(Math_sin, 1, 1) \
   F(Math_sqrt, 1, 1) \
   F(Math_tan, 1, 1) \
@@ -150,6 +155,9 @@
   /* Regular expressions */ \
   F(RegExpCompile, 3, 1) \
   F(RegExpExec, 4, 1) \
+  F(RegExpExecMultiple, 4, 1) \
+  F(RegExpInitializeObject, 5, 1) \
+  F(RegExpConstructResult, 3, 1) \
   \
   /* Strings */ \
   F(StringCharCodeAt, 2, 1) \
@@ -161,6 +169,7 @@
   F(StringReplaceRegExpWithString, 4, 1) \
   F(StringMatch, 3, 1) \
   F(StringTrim, 3, 1) \
+  F(StringToArray, 1, 1) \
   \
   /* Numbers */ \
   F(NumberToRadixString, 2, 1) \
@@ -175,6 +184,7 @@
   F(FunctionSetPrototype, 2, 1) \
   F(FunctionGetName, 1, 1) \
   F(FunctionSetName, 2, 1) \
+  F(FunctionRemovePrototype, 1, 1) \
   F(FunctionGetSourceCode, 1, 1) \
   F(FunctionGetScript, 1, 1) \
   F(FunctionGetScriptSourcePosition, 1, 1) \
@@ -200,9 +210,10 @@
   F(DateLocalTimezone, 1, 1) \
   F(DateLocalTimeOffset, 0, 1) \
   F(DateDaylightSavingsOffset, 1, 1) \
+  F(DateMakeDay, 3, 1) \
+  F(DateYMDFromTime, 2, 1) \
   \
   /* Numbers */ \
-  F(NumberIsFinite, 1, 1) \
   \
   /* Globals */ \
   F(CompileString, 2, 1) \
@@ -222,6 +233,7 @@
   F(GetArrayKeys, 2, 1) \
   F(MoveArrayContents, 2, 1) \
   F(EstimateNumberOfElements, 1, 1) \
+  F(SwapElements, 3, 1) \
   \
   /* Getters and Setters */ \
   F(DefineAccessor, -1 /* 4 or 5 */, 1) \
@@ -230,11 +242,10 @@
   /* Literals */ \
   F(MaterializeRegExpLiteral, 4, 1)\
   F(CreateArrayLiteralBoilerplate, 3, 1) \
-  F(CreateObjectLiteralBoilerplate, 3, 1) \
   F(CloneLiteralBoilerplate, 1, 1) \
   F(CloneShallowLiteralBoilerplate, 1, 1) \
-  F(CreateObjectLiteral, 3, 1) \
-  F(CreateObjectLiteralShallow, 3, 1) \
+  F(CreateObjectLiteral, 4, 1) \
+  F(CreateObjectLiteralShallow, 4, 1) \
   F(CreateArrayLiteral, 3, 1) \
   F(CreateArrayLiteralShallow, 3, 1) \
   \
@@ -279,6 +290,8 @@
   F(LocalKeys, 1, 1) \
   /* Handle scopes */ \
   F(DeleteHandleScopeExtensions, 0, 1) \
+  /* Cache suport */ \
+  F(GetFromCache, 2, 1) \
   \
   /* Pseudo functions - handled as macros by parser */ \
   F(IS_VAR, 1, 1)
@@ -321,7 +334,18 @@
   F(SystemBreak, 0, 1) \
   F(DebugDisassembleFunction, 1, 1) \
   F(DebugDisassembleConstructor, 1, 1) \
-  F(FunctionGetInferredName, 1, 1)
+  F(FunctionGetInferredName, 1, 1) \
+  F(LiveEditFindSharedFunctionInfosForScript, 1, 1) \
+  F(LiveEditGatherCompileInfo, 2, 1) \
+  F(LiveEditReplaceScript, 3, 1) \
+  F(LiveEditReplaceFunctionCode, 2, 1) \
+  F(LiveEditFunctionSetScript, 2, 1) \
+  F(LiveEditReplaceRefToNestedFunction, 3, 1) \
+  F(LiveEditPatchFunctionPositions, 2, 1) \
+  F(LiveEditCheckAndDropActivations, 2, 1) \
+  F(LiveEditCompareStringsLinewise, 2, 1) \
+  F(GetFunctionCodePositionFromSource, 2, 1) \
+  F(ExecuteInDebugContext, 2, 1)
 #else
 #define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
 #endif
@@ -400,6 +424,7 @@
   // Support getting the characters in a string using [] notation as
   // in Firefox/SpiderMonkey, Safari and Opera.
   static Object* GetElementOrCharAt(Handle<Object> object, uint32_t index);
+  static Object* GetElement(Handle<Object> object, uint32_t index);
 
   static Object* SetObjectProperty(Handle<Object> object,
                                    Handle<Object> key,
diff --git a/src/runtime.js b/src/runtime.js
index e9d9848..be93c4f 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -471,17 +471,6 @@
 }
 
 
-// Specialized version of String.charAt. It assumes string as
-// the receiver type and that the index is a number.
-function STRING_CHAR_AT(pos) {
-  var char_code = %_FastCharCodeAt(this, pos);
-  if (!%_IsSmi(char_code)) {
-    return %StringCharAt(this, pos);
-  }
-  return %CharFromCode(char_code);
-}
-
-
 /* -------------------------------------
    - - -   C o n v e r s i o n s   - - -
    -------------------------------------
@@ -530,7 +519,7 @@
 }
 
 function NonStringToString(x) {
-  if (IS_NUMBER(x)) return %NumberToString(x);
+  if (IS_NUMBER(x)) return %_NumberToString(x);
   if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
   if (IS_UNDEFINED(x)) return 'undefined';
   return (IS_NULL(x)) ? 'null' : %ToString(%DefaultString(x));
@@ -577,11 +566,11 @@
   if (IS_NUMBER(x)) {
     if (NUMBER_IS_NAN(x) && NUMBER_IS_NAN(y)) return true;
     // x is +0 and y is -0 or vice versa
-    if (x === 0 && y === 0 && !%_IsSmi(x) && !%_IsSmi(y) && 
+    if (x === 0 && y === 0 && !%_IsSmi(x) && !%_IsSmi(y) &&
         ((1 / x < 0 && 1 / y > 0) || (1 / x > 0 && 1 / y < 0))) {
       return false;
     }
-    return x == y;    
+    return x == y;
   }
   if (IS_STRING(x)) return %StringEquals(x, y);
   if (IS_BOOLEAN(x))return %NumberEquals(%ToNumber(x),%ToNumber(y));
diff --git a/src/scanner.cc b/src/scanner.cc
index cf7e49f..8943119 100755
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -28,6 +28,7 @@
 #include "v8.h"
 
 #include "ast.h"
+#include "handles.h"
 #include "scanner.h"
 
 namespace v8 {
@@ -86,12 +87,7 @@
 
 
 UTF16Buffer::UTF16Buffer()
-    : pos_(0), size_(0) { }
-
-
-Handle<String> UTF16Buffer::SubString(int start, int end) {
-  return internal::SubString(data_, start, end);
-}
+    : pos_(0), end_(Scanner::kNoEndPosition) { }
 
 
 // CharacterStreamUTF16Buffer
@@ -100,10 +96,14 @@
 
 
 void CharacterStreamUTF16Buffer::Initialize(Handle<String> data,
-                                            unibrow::CharacterStream* input) {
-  data_ = data;
-  pos_ = 0;
+                                            unibrow::CharacterStream* input,
+                                            int start_position,
+                                            int end_position) {
   stream_ = input;
+  if (start_position > 0) {
+    SeekForward(start_position);
+  }
+  end_ = end_position != Scanner::kNoEndPosition ? end_position : kMaxInt;
 }
 
 
@@ -115,6 +115,8 @@
 
 
 uc32 CharacterStreamUTF16Buffer::Advance() {
+  ASSERT(end_ != Scanner::kNoEndPosition);
+  ASSERT(end_ >= 0);
   // NOTE: It is of importance to Persian / Farsi resources that we do
   // *not* strip format control characters in the scanner; see
   //
@@ -126,7 +128,7 @@
   if (!pushback_buffer()->is_empty()) {
     pos_++;
     return last_ = pushback_buffer()->RemoveLast();
-  } else if (stream_->has_more()) {
+  } else if (stream_->has_more() && pos_ < end_) {
     pos_++;
     uc32 next = stream_->GetNext();
     return last_ = next;
@@ -146,25 +148,32 @@
 }
 
 
-// TwoByteStringUTF16Buffer
-TwoByteStringUTF16Buffer::TwoByteStringUTF16Buffer()
+// ExternalStringUTF16Buffer
+template <typename StringType, typename CharType>
+ExternalStringUTF16Buffer<StringType, CharType>::ExternalStringUTF16Buffer()
     : raw_data_(NULL) { }
 
 
-void TwoByteStringUTF16Buffer::Initialize(
-     Handle<ExternalTwoByteString> data) {
+template <typename StringType, typename CharType>
+void ExternalStringUTF16Buffer<StringType, CharType>::Initialize(
+     Handle<StringType> data,
+     int start_position,
+     int end_position) {
   ASSERT(!data.is_null());
-
-  data_ = data;
-  pos_ = 0;
-
   raw_data_ = data->resource()->data();
-  size_ = data->length();
+
+  ASSERT(end_position <= data->length());
+  if (start_position > 0) {
+    SeekForward(start_position);
+  }
+  end_ =
+      end_position != Scanner::kNoEndPosition ? end_position : data->length();
 }
 
 
-uc32 TwoByteStringUTF16Buffer::Advance() {
-  if (pos_ < size_) {
+template <typename StringType, typename CharType>
+uc32 ExternalStringUTF16Buffer<StringType, CharType>::Advance() {
+  if (pos_ < end_) {
     return raw_data_[pos_++];
   } else {
     // note: currently the following increment is necessary to avoid a
@@ -175,14 +184,16 @@
 }
 
 
-void TwoByteStringUTF16Buffer::PushBack(uc32 ch) {
+template <typename StringType, typename CharType>
+void ExternalStringUTF16Buffer<StringType, CharType>::PushBack(uc32 ch) {
   pos_--;
   ASSERT(pos_ >= Scanner::kCharacterLookaheadBufferSize);
   ASSERT(raw_data_[pos_ - Scanner::kCharacterLookaheadBufferSize] == ch);
 }
 
 
-void TwoByteStringUTF16Buffer::SeekForward(int pos) {
+template <typename StringType, typename CharType>
+void ExternalStringUTF16Buffer<StringType, CharType>::SeekForward(int pos) {
   pos_ = pos;
 }
 
@@ -327,21 +338,56 @@
     : stack_overflow_(false), is_pre_parsing_(pre == PREPARSE) { }
 
 
+void Scanner::Initialize(Handle<String> source,
+                         ParserLanguage language) {
+  safe_string_input_buffer_.Reset(source.location());
+  Init(source, &safe_string_input_buffer_, 0, source->length(), language);
+}
+
+
+void Scanner::Initialize(Handle<String> source,
+                         unibrow::CharacterStream* stream,
+                         ParserLanguage language) {
+  Init(source, stream, 0, kNoEndPosition, language);
+}
+
+
+void Scanner::Initialize(Handle<String> source,
+                         int start_position,
+                         int end_position,
+                         ParserLanguage language) {
+  safe_string_input_buffer_.Reset(source.location());
+  Init(source, &safe_string_input_buffer_,
+       start_position, end_position, language);
+}
+
+
 void Scanner::Init(Handle<String> source,
                    unibrow::CharacterStream* stream,
-                   int position,
+                   int start_position,
+                   int end_position,
                    ParserLanguage language) {
   // Initialize the source buffer.
   if (!source.is_null() && StringShape(*source).IsExternalTwoByte()) {
     two_byte_string_buffer_.Initialize(
-        Handle<ExternalTwoByteString>::cast(source));
+        Handle<ExternalTwoByteString>::cast(source),
+        start_position,
+        end_position);
     source_ = &two_byte_string_buffer_;
+  } else if (!source.is_null() && StringShape(*source).IsExternalAscii()) {
+    ascii_string_buffer_.Initialize(
+        Handle<ExternalAsciiString>::cast(source),
+        start_position,
+        end_position);
+    source_ = &ascii_string_buffer_;
   } else {
-    char_stream_buffer_.Initialize(source, stream);
+    char_stream_buffer_.Initialize(source,
+                                   stream,
+                                   start_position,
+                                   end_position);
     source_ = &char_stream_buffer_;
   }
 
-  position_ = position;
   is_parsing_json_ = (language == JSON);
 
   // Set c0_ (one character ahead)
@@ -358,11 +404,6 @@
 }
 
 
-Handle<String> Scanner::SubString(int start, int end) {
-  return source_->SubString(start - position_, end - position_);
-}
-
-
 Token::Value Scanner::Next() {
   // BUG 1215673: Find a thread safe way to set a stack limit in
   // pre-parse mode. Otherwise, we cannot safely pre-parse from other
diff --git a/src/scanner.h b/src/scanner.h
index f0035c0..d5efdff 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -84,32 +84,34 @@
 };
 
 
+// Interface through which the scanner reads characters from the input source.
 class UTF16Buffer {
  public:
   UTF16Buffer();
   virtual ~UTF16Buffer() {}
 
   virtual void PushBack(uc32 ch) = 0;
-  // returns a value < 0 when the buffer end is reached
+  // Returns a value < 0 when the buffer end is reached.
   virtual uc32 Advance() = 0;
   virtual void SeekForward(int pos) = 0;
 
   int pos() const { return pos_; }
-  int size() const { return size_; }
-  Handle<String> SubString(int start, int end);
 
  protected:
-  Handle<String> data_;
-  int pos_;
-  int size_;
+  int pos_;  // Current position in the buffer.
+  int end_;  // Position where scanning should stop (EOF).
 };
 
 
+// UTF16 buffer to read characters from a character stream.
 class CharacterStreamUTF16Buffer: public UTF16Buffer {
  public:
   CharacterStreamUTF16Buffer();
   virtual ~CharacterStreamUTF16Buffer() {}
-  void Initialize(Handle<String> data, unibrow::CharacterStream* stream);
+  void Initialize(Handle<String> data,
+                  unibrow::CharacterStream* stream,
+                  int start_position,
+                  int end_position);
   virtual void PushBack(uc32 ch);
   virtual uc32 Advance();
   virtual void SeekForward(int pos);
@@ -123,17 +125,21 @@
 };
 
 
-class TwoByteStringUTF16Buffer: public UTF16Buffer {
+// UTF16 buffer to read characters from an external string.
+template <typename StringType, typename CharType>
+class ExternalStringUTF16Buffer: public UTF16Buffer {
  public:
-  TwoByteStringUTF16Buffer();
-  virtual ~TwoByteStringUTF16Buffer() {}
-  void Initialize(Handle<ExternalTwoByteString> data);
+  ExternalStringUTF16Buffer();
+  virtual ~ExternalStringUTF16Buffer() {}
+  void Initialize(Handle<StringType> data,
+                  int start_position,
+                  int end_position);
   virtual void PushBack(uc32 ch);
   virtual uc32 Advance();
   virtual void SeekForward(int pos);
 
  private:
-  const uint16_t* raw_data_;
+  const CharType* raw_data_;  // Pointer to the actual array of characters.
 };
 
 
@@ -263,11 +269,15 @@
   // Construction
   explicit Scanner(ParserMode parse_mode);
 
-  // Initialize the Scanner to scan source:
-  void Init(Handle<String> source,
-            unibrow::CharacterStream* stream,
-            int position,
-            ParserLanguage language);
+  // Initialize the Scanner to scan source.
+  void Initialize(Handle<String> source,
+                  ParserLanguage language);
+  void Initialize(Handle<String> source,
+                  unibrow::CharacterStream* stream,
+                  ParserLanguage language);
+  void Initialize(Handle<String> source,
+                  int start_position, int end_position,
+                  ParserLanguage language);
 
   // Returns the next token.
   Token::Value Next();
@@ -335,7 +345,6 @@
   // tokens, which is what it is used for.
   void SeekForward(int pos);
 
-  Handle<String> SubString(int start_pos, int end_pos);
   bool stack_overflow() { return stack_overflow_; }
 
   static StaticResource<Utf8Decoder>* utf8_decoder() { return &utf8_decoder_; }
@@ -350,14 +359,28 @@
   static unibrow::Predicate<unibrow::WhiteSpace, 128> kIsWhiteSpace;
 
   static const int kCharacterLookaheadBufferSize = 1;
+  static const int kNoEndPosition = 1;
 
  private:
-  CharacterStreamUTF16Buffer char_stream_buffer_;
-  TwoByteStringUTF16Buffer two_byte_string_buffer_;
+  void Init(Handle<String> source,
+            unibrow::CharacterStream* stream,
+            int start_position, int end_position,
+            ParserLanguage language);
 
-  // Source.
+
+  // Different UTF16 buffers used to pull characters from. Based on input one of
+  // these will be initialized as the actual data source.
+  CharacterStreamUTF16Buffer char_stream_buffer_;
+  ExternalStringUTF16Buffer<ExternalTwoByteString, uint16_t>
+      two_byte_string_buffer_;
+  ExternalStringUTF16Buffer<ExternalAsciiString, char> ascii_string_buffer_;
+
+  // Source. Will point to one of the buffers declared above.
   UTF16Buffer* source_;
-  int position_;
+
+  // Used to convert the source string into a character stream when a stream
+  // is not passed to the scanner.
+  SafeStringInputBuffer safe_string_input_buffer_;
 
   // Buffer to hold literal values (identifiers, strings, numbers)
   // using 0-terminated UTF-8 encoding.
@@ -460,7 +483,7 @@
 
   // Return the current source position.
   int source_pos() {
-    return source_->pos() - kCharacterLookaheadBufferSize + position_;
+    return source_->pos() - kCharacterLookaheadBufferSize;
   }
 
   // Decodes a unicode escape-sequence which is part of an identifier.
diff --git a/src/scopeinfo.cc b/src/scopeinfo.cc
index de1841b..8b7e2ad 100644
--- a/src/scopeinfo.cc
+++ b/src/scopeinfo.cc
@@ -82,7 +82,7 @@
   List<Variable*, Allocator> heap_locals(locals.length());
   for (int i = 0; i < locals.length(); i++) {
     Variable* var = locals[i];
-    if (var->var_uses()->is_used()) {
+    if (var->is_used()) {
       Slot* slot = var->slot();
       if (slot != NULL) {
         switch (slot->type()) {
@@ -130,7 +130,7 @@
   if (scope->is_function_scope()) {
     Variable* var = scope->function();
     if (var != NULL &&
-        var->var_uses()->is_used() &&
+        var->is_used() &&
         var->slot()->type() == Slot::CONTEXT) {
       function_name_ = var->name();
       // Note that we must not find the function name in the context slot
diff --git a/src/scopeinfo.h b/src/scopeinfo.h
index 28d169a..927ac66 100644
--- a/src/scopeinfo.h
+++ b/src/scopeinfo.h
@@ -29,6 +29,7 @@
 #define V8_SCOPEINFO_H_
 
 #include "variables.h"
+#include "zone-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/scopes.cc b/src/scopes.cc
index 701e5e3..b55e5d5 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -309,7 +309,7 @@
   // which is the current user of this function).
   for (int i = 0; i < temps_.length(); i++) {
     Variable* var = temps_[i];
-    if (var->var_uses()->is_used()) {
+    if (var->is_used()) {
       locals->Add(var);
     }
   }
@@ -317,7 +317,7 @@
        p != NULL;
        p = variables_.Next(p)) {
     Variable* var = reinterpret_cast<Variable*>(p->value);
-    if (var->var_uses()->is_used()) {
+    if (var->is_used()) {
       locals->Add(var);
     }
   }
@@ -418,17 +418,16 @@
 
 
 static void PrintVar(PrettyPrinter* printer, int indent, Variable* var) {
-  if (var->var_uses()->is_used() || var->rewrite() != NULL) {
+  if (var->is_used() || var->rewrite() != NULL) {
     Indent(indent, Variable::Mode2String(var->mode()));
     PrintF(" ");
     PrintName(var->name());
     PrintF(";  // ");
-    if (var->rewrite() != NULL) PrintF("%s, ", printer->Print(var->rewrite()));
-    if (var->is_accessed_from_inner_scope()) PrintF("inner scope access, ");
-    PrintF("var ");
-    var->var_uses()->Print();
-    PrintF(", obj ");
-    var->obj_uses()->Print();
+    if (var->rewrite() != NULL) {
+      PrintF("%s, ", printer->Print(var->rewrite()));
+      if (var->is_accessed_from_inner_scope()) PrintF(", ");
+    }
+    if (var->is_accessed_from_inner_scope()) PrintF("inner scope access");
     PrintF("\n");
   }
 }
@@ -738,10 +737,10 @@
       (var->is_accessed_from_inner_scope_ ||
        scope_calls_eval_ || inner_scope_calls_eval_ ||
        scope_contains_with_)) {
-    var->var_uses()->RecordAccess(1);
+    var->set_is_used(true);
   }
   // Global variables do not need to be allocated.
-  return !var->is_global() && var->var_uses()->is_used();
+  return !var->is_global() && var->is_used();
 }
 
 
@@ -847,7 +846,7 @@
                        new Literal(Handle<Object>(Smi::FromInt(i))),
                        RelocInfo::kNoPosition,
                        Property::SYNTHETIC);
-        arguments_shadow->var_uses()->RecordUses(var->var_uses());
+        if (var->is_used()) arguments_shadow->set_is_used(true);
       }
     }
 
diff --git a/src/scopes.h b/src/scopes.h
index 9b506d9..c2354b2 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -277,7 +277,6 @@
   // The number of contexts between this and scope; zero if this == scope.
   int ContextChainLength(Scope* scope);
 
-
   // ---------------------------------------------------------------------------
   // Debugging.
 
diff --git a/src/serialize.cc b/src/serialize.cc
index 110e461..6841267 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -329,10 +329,15 @@
       RUNTIME_ENTRY,
       1,
       "Runtime::PerformGC");
-  Add(ExternalReference::random_positive_smi_function().address(),
+  Add(ExternalReference::fill_heap_number_with_random_function().address(),
       RUNTIME_ENTRY,
       2,
-      "V8::RandomPositiveSmi");
+      "V8::FillHeapNumberWithRandom");
+
+  Add(ExternalReference::random_uint32_function().address(),
+      RUNTIME_ENTRY,
+      3,
+      "V8::Random");
 
   // Miscellaneous
   Add(ExternalReference::the_hole_value_location().address(),
@@ -409,36 +414,44 @@
       UNCLASSIFIED,
       19,
       "compare_doubles");
-#ifdef V8_NATIVE_REGEXP
-  Add(ExternalReference::re_case_insensitive_compare_uc16().address(),
+  Add(ExternalReference::compile_array_pop_call().address(),
       UNCLASSIFIED,
       20,
+      "compile_array_pop");
+  Add(ExternalReference::compile_array_push_call().address(),
+      UNCLASSIFIED,
+      21,
+      "compile_array_push");
+#ifndef V8_INTERPRETED_REGEXP
+  Add(ExternalReference::re_case_insensitive_compare_uc16().address(),
+      UNCLASSIFIED,
+      22,
       "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
   Add(ExternalReference::re_check_stack_guard_state().address(),
       UNCLASSIFIED,
-      21,
+      23,
       "RegExpMacroAssembler*::CheckStackGuardState()");
   Add(ExternalReference::re_grow_stack().address(),
       UNCLASSIFIED,
-      22,
+      24,
       "NativeRegExpMacroAssembler::GrowStack()");
   Add(ExternalReference::re_word_character_map().address(),
       UNCLASSIFIED,
-      23,
+      25,
       "NativeRegExpMacroAssembler::word_character_map");
-#endif
+#endif  // V8_INTERPRETED_REGEXP
   // Keyed lookup cache.
   Add(ExternalReference::keyed_lookup_cache_keys().address(),
       UNCLASSIFIED,
-      24,
+      26,
       "KeyedLookupCache::keys()");
   Add(ExternalReference::keyed_lookup_cache_field_offsets().address(),
       UNCLASSIFIED,
-      25,
+      27,
       "KeyedLookupCache::field_offsets()");
   Add(ExternalReference::transcendental_cache_array_address().address(),
       UNCLASSIFIED,
-      26,
+      28,
       "TranscendentalCache::caches()");
 }
 
@@ -477,7 +490,7 @@
 
 void ExternalReferenceEncoder::Put(Address key, int index) {
   HashMap::Entry* entry = encodings_.Lookup(key, Hash(key), true);
-  entry->value = reinterpret_cast<void *>(index);
+  entry->value = reinterpret_cast<void*>(index);
 }
 
 
@@ -823,6 +836,9 @@
       case START_NEW_PAGE_SERIALIZATION: {
         int space = source_->Get();
         pages_[space].Add(last_object_address_);
+        if (space == CODE_SPACE) {
+          CPU::FlushICache(last_object_address_, Page::kPageSize);
+        }
         break;
       }
       case NATIVES_STRING_RESOURCE: {
@@ -974,7 +990,7 @@
 // the startup snapshot that correspond to the elements of this cache array.  On
 // deserialization we therefore need to visit the cache array.  This fills it up
 // with pointers to deserialized objects.
-void SerializerDeserializer::Iterate(ObjectVisitor *visitor) {
+void SerializerDeserializer::Iterate(ObjectVisitor* visitor) {
   visitor->VisitPointers(
       &partial_snapshot_cache_[0],
       &partial_snapshot_cache_[partial_snapshot_cache_length_]);
diff --git a/src/serialize.h b/src/serialize.h
index ab2ae9f..279bc58 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -503,7 +503,8 @@
     // unique ID, and deserializing several partial snapshots containing script
     // would cause dupes.
     ASSERT(!o->IsScript());
-    return o->IsString() || o->IsSharedFunctionInfo() || o->IsHeapNumber();
+    return o->IsString() || o->IsSharedFunctionInfo() ||
+           o->IsHeapNumber() || o->IsCode();
   }
 
  private:
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index 72f8305..66894c4 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -145,6 +145,40 @@
 }
 
 
+bool Page::GetPageFlag(PageFlag flag) {
+  return (flags & flag) != 0;
+}
+
+
+void Page::SetPageFlag(PageFlag flag, bool value) {
+  if (value) {
+    flags |= flag;
+  } else {
+    flags &= ~flag;
+  }
+}
+
+
+bool Page::WasInUseBeforeMC() {
+  return GetPageFlag(WAS_IN_USE_BEFORE_MC);
+}
+
+
+void Page::SetWasInUseBeforeMC(bool was_in_use) {
+  SetPageFlag(WAS_IN_USE_BEFORE_MC, was_in_use);
+}
+
+
+bool Page::IsLargeObjectPage() {
+  return !GetPageFlag(IS_NORMAL_PAGE);
+}
+
+
+void Page::SetIsLargeObjectPage(bool is_large_object_page) {
+  SetPageFlag(IS_NORMAL_PAGE, !is_large_object_page);
+}
+
+
 // -----------------------------------------------------------------------------
 // MemoryAllocator
 
diff --git a/src/spaces.cc b/src/spaces.cc
index 2c495d8..6b6d926 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -524,7 +524,7 @@
   for (int i = 0; i < pages_in_chunk; i++) {
     Page* p = Page::FromAddress(page_addr);
     p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
-    p->is_normal_page = 1;
+    p->SetIsLargeObjectPage(false);
     page_addr += Page::kPageSize;
   }
 
@@ -568,6 +568,15 @@
 }
 
 
+void MemoryAllocator::FreeAllPages(PagedSpace* space) {
+  for (int i = 0, length = chunks_.length(); i < length; i++) {
+    if (chunks_[i].owner() == space) {
+      DeleteChunk(i);
+    }
+  }
+}
+
+
 void MemoryAllocator::DeleteChunk(int chunk_id) {
   ASSERT(IsValidChunk(chunk_id));
 
@@ -622,6 +631,74 @@
 #endif
 
 
+void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space,
+                                                 Page** first_page,
+                                                 Page** last_page,
+                                                 Page** last_page_in_use) {
+  Page* first = NULL;
+  Page* last = NULL;
+
+  for (int i = 0, length = chunks_.length(); i < length; i++) {
+    ChunkInfo& chunk = chunks_[i];
+
+    if (chunk.owner() == space) {
+      if (first == NULL) {
+        Address low = RoundUp(chunk.address(), Page::kPageSize);
+        first = Page::FromAddress(low);
+      }
+      last = RelinkPagesInChunk(i,
+                                chunk.address(),
+                                chunk.size(),
+                                last,
+                                last_page_in_use);
+    }
+  }
+
+  if (first_page != NULL) {
+    *first_page = first;
+  }
+
+  if (last_page != NULL) {
+    *last_page = last;
+  }
+}
+
+
+Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
+                                          Address chunk_start,
+                                          size_t chunk_size,
+                                          Page* prev,
+                                          Page** last_page_in_use) {
+  Address page_addr = RoundUp(chunk_start, Page::kPageSize);
+  int pages_in_chunk = PagesInChunk(chunk_start, chunk_size);
+
+  if (prev->is_valid()) {
+    SetNextPage(prev, Page::FromAddress(page_addr));
+  }
+
+  for (int i = 0; i < pages_in_chunk; i++) {
+    Page* p = Page::FromAddress(page_addr);
+    p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
+    page_addr += Page::kPageSize;
+
+    if (p->WasInUseBeforeMC()) {
+      *last_page_in_use = p;
+    }
+  }
+
+  // Set the next page of the last page to 0.
+  Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
+  last_page->opaque_header = OffsetFrom(0) | chunk_id;
+
+  if (last_page->WasInUseBeforeMC()) {
+    *last_page_in_use = last_page;
+  }
+
+  return last_page;
+}
+
+
+
 // -----------------------------------------------------------------------------
 // PagedSpace implementation
 
@@ -677,6 +754,8 @@
   // Use first_page_ for allocation.
   SetAllocationInfo(&allocation_info_, first_page_);
 
+  page_list_is_chunk_ordered_ = true;
+
   return true;
 }
 
@@ -687,9 +766,8 @@
 
 
 void PagedSpace::TearDown() {
-  first_page_ = MemoryAllocator::FreePages(first_page_);
-  ASSERT(!first_page_->is_valid());
-
+  MemoryAllocator::FreeAllPages(this);
+  first_page_ = NULL;
   accounting_stats_.Clear();
 }
 
@@ -874,6 +952,12 @@
 
 
 void PagedSpace::Shrink() {
+  if (!page_list_is_chunk_ordered_) {
+    // We can't shrink space if pages is not chunk-ordered
+    // (see comment for class MemoryAllocator for definition).
+    return;
+  }
+
   // Release half of free pages.
   Page* top_page = AllocationTopPage();
   ASSERT(top_page->is_valid());
@@ -955,7 +1039,7 @@
         // The next page will be above the allocation top.
         above_allocation_top = true;
       } else {
-        ASSERT(top == current_page->ObjectAreaEnd() - page_extra_);
+        ASSERT(top == PageAllocationLimit(current_page));
       }
 
       // It should be packed with objects from the bottom to the top.
@@ -1363,7 +1447,7 @@
 
 
 static void ReportCodeKindStatistics() {
-  const char* table[Code::NUMBER_OF_KINDS];
+  const char* table[Code::NUMBER_OF_KINDS] = { NULL };
 
 #define CASE(name)                            \
   case Code::name: table[Code::name] = #name; \
@@ -1379,6 +1463,7 @@
       CASE(STORE_IC);
       CASE(KEYED_STORE_IC);
       CASE(CALL_IC);
+      CASE(BINARY_OP_IC);
     }
   }
 
@@ -1413,7 +1498,7 @@
   PrintF("\n  Object Histogram:\n");
   for (int i = 0; i <= LAST_TYPE; i++) {
     if (heap_histograms[i].number() > 0) {
-      PrintF("    %-33s%10d (%10d bytes)\n",
+      PrintF("    %-34s%10d (%10d bytes)\n",
              heap_histograms[i].name(),
              heap_histograms[i].number(),
              heap_histograms[i].bytes());
@@ -1430,7 +1515,7 @@
   STRING_TYPE_LIST(INCREMENT)
 #undef INCREMENT
   if (string_number > 0) {
-    PrintF("    %-33s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
+    PrintF("    %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
            string_bytes);
   }
 
@@ -1499,7 +1584,7 @@
     PrintF("\n  Object Histogram:\n");
     for (int i = 0; i <= LAST_TYPE; i++) {
       if (allocated_histogram_[i].number() > 0) {
-        PrintF("    %-33s%10d (%10d bytes)\n",
+        PrintF("    %-34s%10d (%10d bytes)\n",
                allocated_histogram_[i].name(),
                allocated_histogram_[i].number(),
                allocated_histogram_[i].bytes());
@@ -1781,6 +1866,9 @@
 // OldSpace implementation
 
 void OldSpace::PrepareForMarkCompact(bool will_compact) {
+  // Call prepare of the super class.
+  PagedSpace::PrepareForMarkCompact(will_compact);
+
   if (will_compact) {
     // Reset relocation info.  During a compacting collection, everything in
     // the space is considered 'available' and we will rediscover live data
@@ -1851,6 +1939,112 @@
 }
 
 
+void PagedSpace::FreePages(Page* prev, Page* last) {
+  if (last == AllocationTopPage()) {
+    // Pages are already at the end of used pages.
+    return;
+  }
+
+  Page* first = NULL;
+
+  // Remove pages from the list.
+  if (prev == NULL) {
+    first = first_page_;
+    first_page_ = last->next_page();
+  } else {
+    first = prev->next_page();
+    MemoryAllocator::SetNextPage(prev, last->next_page());
+  }
+
+  // Attach it after the last page.
+  MemoryAllocator::SetNextPage(last_page_, first);
+  last_page_ = last;
+  MemoryAllocator::SetNextPage(last, NULL);
+
+  // Clean them up.
+  do {
+    first->ClearRSet();
+    first = first->next_page();
+  } while (first != NULL);
+
+  // Order of pages in this space might no longer be consistent with
+  // order of pages in chunks.
+  page_list_is_chunk_ordered_ = false;
+}
+
+
+void PagedSpace::PrepareForMarkCompact(bool will_compact) {
+  if (will_compact) {
+    // MarkCompact collector relies on WAS_IN_USE_BEFORE_MC page flag
+    // to skip unused pages. Update flag value for all pages in space.
+    PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
+    Page* last_in_use = AllocationTopPage();
+    bool in_use = true;
+
+    while (all_pages_iterator.has_next()) {
+      Page* p = all_pages_iterator.next();
+      p->SetWasInUseBeforeMC(in_use);
+      if (p == last_in_use) {
+        // We passed a page containing allocation top. All consequent
+        // pages are not used.
+        in_use = false;
+      }
+    }
+
+    if (!page_list_is_chunk_ordered_) {
+      Page* new_last_in_use = Page::FromAddress(NULL);
+      MemoryAllocator::RelinkPageListInChunkOrder(this,
+                                                  &first_page_,
+                                                  &last_page_,
+                                                  &new_last_in_use);
+      ASSERT(new_last_in_use->is_valid());
+
+      if (new_last_in_use != last_in_use) {
+        // Current allocation top points to a page which is now in the middle
+        // of page list. We should move allocation top forward to the new last
+        // used page so various object iterators will continue to work properly.
+
+        int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
+                                             last_in_use->AllocationTop());
+
+        if (size_in_bytes > 0) {
+          // There is still some space left on this page. Create a fake
+          // object which will occupy all free space on this page.
+          // Otherwise iterators would not be able to scan this page
+          // correctly.
+
+          Heap::CreateFillerObjectAt(last_in_use->AllocationTop(),
+                                     size_in_bytes);
+        }
+
+        // New last in use page was in the middle of the list before
+        // sorting so it full.
+        SetTop(new_last_in_use->AllocationTop());
+
+        ASSERT(AllocationTopPage() == new_last_in_use);
+        ASSERT(AllocationTopPage()->WasInUseBeforeMC());
+      }
+
+      PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
+      while (pages_in_use_iterator.has_next()) {
+        Page* p = pages_in_use_iterator.next();
+        if (!p->WasInUseBeforeMC()) {
+          // Empty page is in the middle of a sequence of used pages.
+          // Create a fake object which will occupy all free space on this page.
+          // Otherwise iterators would not be able to scan this page correctly.
+          int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
+                                               p->ObjectAreaStart());
+
+          Heap::CreateFillerObjectAt(p->ObjectAreaStart(), size_in_bytes);
+        }
+      }
+
+      page_list_is_chunk_ordered_ = true;
+    }
+  }
+}
+
+
 bool PagedSpace::ReserveSpace(int bytes) {
   Address limit = allocation_info_.limit;
   Address top = allocation_info_.top;
@@ -2262,6 +2456,9 @@
 // FixedSpace implementation
 
 void FixedSpace::PrepareForMarkCompact(bool will_compact) {
+  // Call prepare of the super class.
+  PagedSpace::PrepareForMarkCompact(will_compact);
+
   if (will_compact) {
     // Reset relocation info.
     MCResetRelocationInfo();
@@ -2359,7 +2556,7 @@
 HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
                                            int size_in_bytes) {
   ASSERT(current_page->next_page()->is_valid());
-  ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == page_extra_);
+  ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
   ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
   accounting_stats_.WasteBytes(page_extra_);
   SetAllocationInfo(&allocation_info_, current_page->next_page());
@@ -2604,7 +2801,7 @@
   // large object page.  If the chunk_size happened to be written there, its
   // low order bit should already be clear.
   ASSERT((chunk_size & 0x1) == 0);
-  page->is_normal_page &= ~0x1;
+  page->SetIsLargeObjectPage(true);
   page->ClearRSet();
   int extra_bytes = requested_size - object_size;
   if (extra_bytes > 0) {
@@ -2748,6 +2945,9 @@
 
 bool LargeObjectSpace::Contains(HeapObject* object) {
   Address address = object->address();
+  if (Heap::new_space()->Contains(address)) {
+    return false;
+  }
   Page* page = Page::FromAddress(address);
 
   SLOW_ASSERT(!page->IsLargeObjectPage()
diff --git a/src/spaces.h b/src/spaces.h
index 850a723..df42d51 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -167,8 +167,17 @@
     return 0 == (OffsetFrom(a) & kPageAlignmentMask);
   }
 
+  // True if this page was in use before current compaction started.
+  // Result is valid only for pages owned by paged spaces and
+  // only after PagedSpace::PrepareForMarkCompact was called.
+  inline bool WasInUseBeforeMC();
+
+  inline void SetWasInUseBeforeMC(bool was_in_use);
+
   // True if this page is a large object page.
-  bool IsLargeObjectPage() { return (is_normal_page & 0x1) == 0; }
+  inline bool IsLargeObjectPage();
+
+  inline void SetIsLargeObjectPage(bool is_large_object_page);
 
   // Returns the offset of a given address to this page.
   INLINE(int Offset(Address a)) {
@@ -244,6 +253,14 @@
   // Maximum object size that fits in a page.
   static const int kMaxHeapObjectSize = kObjectAreaSize;
 
+  enum PageFlag {
+    IS_NORMAL_PAGE = 1 << 0,
+    WAS_IN_USE_BEFORE_MC = 1 << 1
+  };
+
+  inline bool GetPageFlag(PageFlag flag);
+  inline void SetPageFlag(PageFlag flag, bool value);
+
   //---------------------------------------------------------------------------
   // Page header description.
   //
@@ -262,7 +279,8 @@
   // second word *may* (if the page start and large object chunk start are
   // the same) contain the large object chunk size.  In either case, the
   // low-order bit for large object pages will be cleared.
-  int is_normal_page;
+  // For normal pages this word is used to store various page flags.
+  int flags;
 
   // The following fields may overlap with remembered set, they can only
   // be used in the mark-compact collector when remembered set is not
@@ -301,6 +319,12 @@
 
   virtual int Size() = 0;
 
+#ifdef ENABLE_HEAP_PROTECTION
+  // Protect/unprotect the space by marking it read-only/writable.
+  virtual void Protect() = 0;
+  virtual void Unprotect() = 0;
+#endif
+
 #ifdef DEBUG
   virtual void Print() = 0;
 #endif
@@ -401,6 +425,13 @@
 //
 // The memory allocator also allocates chunks for the large object space, but
 // they are managed by the space itself.  The new space does not expand.
+//
+// The fact that pages for paged spaces are allocated and deallocated in chunks
+// induces a constraint on the order of pages in a linked lists. We say that
+// pages are linked in the chunk-order if and only if every two consecutive
+// pages from the same chunk are consecutive in the linked list.
+//
+
 
 class MemoryAllocator : public AllStatic {
  public:
@@ -460,13 +491,18 @@
   static Page* AllocatePages(int requested_pages, int* allocated_pages,
                              PagedSpace* owner);
 
-  // Frees pages from a given page and after. If 'p' is the first page
-  // of a chunk, pages from 'p' are freed and this function returns an
-  // invalid page pointer. Otherwise, the function searches a page
-  // after 'p' that is the first page of a chunk. Pages after the
-  // found page are freed and the function returns 'p'.
+  // Frees pages from a given page and after. Requires pages to be
+  // linked in chunk-order (see comment for class).
+  // If 'p' is the first page of a chunk, pages from 'p' are freed
+  // and this function returns an invalid page pointer.
+  // Otherwise, the function searches a page after 'p' that is
+  // the first page of a chunk. Pages after the found page
+  // are freed and the function returns 'p'.
   static Page* FreePages(Page* p);
 
+  // Frees all pages owned by given space.
+  static void FreeAllPages(PagedSpace* space);
+
   // Allocates and frees raw memory of certain size.
   // These are just thin wrappers around OS::Allocate and OS::Free,
   // but keep track of allocated bytes as part of heap.
@@ -505,6 +541,15 @@
   static Page* FindFirstPageInSameChunk(Page* p);
   static Page* FindLastPageInSameChunk(Page* p);
 
+  // Relinks list of pages owned by space to make it chunk-ordered.
+  // Returns new first and last pages of space.
+  // Also returns last page in relinked list which has WasInUsedBeforeMC
+  // flag set.
+  static void RelinkPageListInChunkOrder(PagedSpace* space,
+                                         Page** first_page,
+                                         Page** last_page,
+                                         Page** last_page_in_use);
+
 #ifdef ENABLE_HEAP_PROTECTION
   // Protect/unprotect a block of memory by marking it read-only/writable.
   static inline void Protect(Address start, size_t size);
@@ -593,6 +638,12 @@
   // used as a marking stack and its page headers are destroyed.
   static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
                                       PagedSpace* owner);
+
+  static Page* RelinkPagesInChunk(int chunk_id,
+                                  Address chunk_start,
+                                  size_t chunk_size,
+                                  Page* prev,
+                                  Page** last_page_in_use);
 };
 
 
@@ -874,9 +925,16 @@
   void ClearRSet();
 
   // Prepares for a mark-compact GC.
-  virtual void PrepareForMarkCompact(bool will_compact) = 0;
+  virtual void PrepareForMarkCompact(bool will_compact);
 
-  virtual Address PageAllocationTop(Page* page) = 0;
+  // The top of allocation in a page in this space. Undefined if page is unused.
+  Address PageAllocationTop(Page* page) {
+    return page == TopPageOf(allocation_info_) ? top()
+        : PageAllocationLimit(page);
+  }
+
+  // The limit of allocation for a page in this space.
+  virtual Address PageAllocationLimit(Page* page) = 0;
 
   // Current capacity without growing (Size() + Available() + Waste()).
   int Capacity() { return accounting_stats_.Capacity(); }
@@ -914,6 +972,16 @@
   // Used by ReserveSpace.
   virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
 
+  // Free all pages in range from prev (exclusive) to last (inclusive).
+  // Freed pages are moved to the end of page list.
+  void FreePages(Page* prev, Page* last);
+
+  // Set space allocation info.
+  void SetTop(Address top) {
+    allocation_info_.top = top;
+    allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top));
+  }
+
   // ---------------------------------------------------------------------------
   // Mark-compact collection support functions
 
@@ -962,6 +1030,9 @@
   static void ResetCodeStatistics();
 #endif
 
+  // Returns the page of the allocation pointer.
+  Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
+
  protected:
   // Maximum capacity of this space.
   int max_capacity_;
@@ -976,6 +1047,10 @@
   // Expand and Shrink.
   Page* last_page_;
 
+  // True if pages owned by this space are linked in chunk-order.
+  // See comment for class MemoryAllocator for definition of chunk-order.
+  bool page_list_is_chunk_ordered_;
+
   // Normal allocation information.
   AllocationInfo allocation_info_;
 
@@ -1037,8 +1112,6 @@
   void DoPrintRSet(const char* space_name);
 #endif
  private:
-  // Returns the page of the allocation pointer.
-  Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
 
   // Returns a pointer to the page of the relocation pointer.
   Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
@@ -1169,6 +1242,12 @@
   bool Commit();
   bool Uncommit();
 
+#ifdef ENABLE_HEAP_PROTECTION
+  // Protect/unprotect the space by marking it read-only/writable.
+  virtual void Protect() {}
+  virtual void Unprotect() {}
+#endif
+
 #ifdef DEBUG
   virtual void Print();
   virtual void Verify();
@@ -1652,17 +1731,22 @@
   // pointer).
   int AvailableFree() { return free_list_.available(); }
 
-  // The top of allocation in a page in this space. Undefined if page is unused.
-  virtual Address PageAllocationTop(Page* page) {
-    return page == TopPageOf(allocation_info_) ? top() : page->ObjectAreaEnd();
+  // The limit of allocation for a page in this space.
+  virtual Address PageAllocationLimit(Page* page) {
+    return page->ObjectAreaEnd();
   }
 
   // Give a block of memory to the space's free list.  It might be added to
   // the free list or accounted as waste.
-  void Free(Address start, int size_in_bytes) {
-    int wasted_bytes = free_list_.Free(start, size_in_bytes);
+  // If add_to_freelist is false then just accounting stats are updated and
+  // no attempt to add area to free list is made.
+  void Free(Address start, int size_in_bytes, bool add_to_freelist) {
     accounting_stats_.DeallocateBytes(size_in_bytes);
-    accounting_stats_.WasteBytes(wasted_bytes);
+
+    if (add_to_freelist) {
+      int wasted_bytes = free_list_.Free(start, size_in_bytes);
+      accounting_stats_.WasteBytes(wasted_bytes);
+    }
   }
 
   // Prepare for full garbage collection.  Resets the relocation pointer and
@@ -1715,17 +1799,20 @@
     page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
   }
 
-  // The top of allocation in a page in this space. Undefined if page is unused.
-  virtual Address PageAllocationTop(Page* page) {
-    return page == TopPageOf(allocation_info_) ? top()
-        : page->ObjectAreaEnd() - page_extra_;
+  // The limit of allocation for a page in this space.
+  virtual Address PageAllocationLimit(Page* page) {
+    return page->ObjectAreaEnd() - page_extra_;
   }
 
   int object_size_in_bytes() { return object_size_in_bytes_; }
 
   // Give a fixed sized block of memory to the space's free list.
-  void Free(Address start) {
-    free_list_.Free(start);
+  // If add_to_freelist is false then just accounting stats are updated and
+  // no attempt to add area to free list is made.
+  void Free(Address start, bool add_to_freelist) {
+    if (add_to_freelist) {
+      free_list_.Free(start);
+    }
     accounting_stats_.DeallocateBytes(object_size_in_bytes_);
   }
 
diff --git a/src/splay-tree-inl.h b/src/splay-tree-inl.h
new file mode 100644
index 0000000..9c2287e
--- /dev/null
+++ b/src/splay-tree-inl.h
@@ -0,0 +1,310 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SPLAY_TREE_INL_H_
+#define V8_SPLAY_TREE_INL_H_
+
+#include "splay-tree.h"
+
+namespace v8 {
+namespace internal {
+
+
+template<typename Config, class Allocator>
+SplayTree<Config, Allocator>::~SplayTree() {
+  NodeDeleter deleter;
+  ForEachNode(&deleter);
+}
+
+
+template<typename Config, class Allocator>
+bool SplayTree<Config, Allocator>::Insert(const Key& key, Locator* locator) {
+  if (is_empty()) {
+    // If the tree is empty, insert the new node.
+    root_ = new Node(key, Config::kNoValue);
+  } else {
+    // Splay on the key to move the last node on the search path
+    // for the key to the root of the tree.
+    Splay(key);
+    // Ignore repeated insertions with the same key.
+    int cmp = Config::Compare(key, root_->key_);
+    if (cmp == 0) {
+      locator->bind(root_);
+      return false;
+    }
+    // Insert the new node.
+    Node* node = new Node(key, Config::kNoValue);
+    InsertInternal(cmp, node);
+  }
+  locator->bind(root_);
+  return true;
+}
+
+
+template<typename Config, class Allocator>
+void SplayTree<Config, Allocator>::InsertInternal(int cmp, Node* node) {
+  if (cmp > 0) {
+    node->left_ = root_;
+    node->right_ = root_->right_;
+    root_->right_ = NULL;
+  } else {
+    node->right_ = root_;
+    node->left_ = root_->left_;
+    root_->left_ = NULL;
+  }
+  root_ = node;
+}
+
+
+template<typename Config, class Allocator>
+bool SplayTree<Config, Allocator>::FindInternal(const Key& key) {
+  if (is_empty())
+    return false;
+  Splay(key);
+  return Config::Compare(key, root_->key_) == 0;
+}
+
+
+template<typename Config, class Allocator>
+bool SplayTree<Config, Allocator>::Find(const Key& key, Locator* locator) {
+  if (FindInternal(key)) {
+    locator->bind(root_);
+    return true;
+  } else {
+    return false;
+  }
+}
+
+
+template<typename Config, class Allocator>
+bool SplayTree<Config, Allocator>::FindGreatestLessThan(const Key& key,
+                                                        Locator* locator) {
+  if (is_empty())
+    return false;
+  // Splay on the key to move the node with the given key or the last
+  // node on the search path to the top of the tree.
+  Splay(key);
+  // Now the result is either the root node or the greatest node in
+  // the left subtree.
+  int cmp = Config::Compare(root_->key_, key);
+  if (cmp <= 0) {
+    locator->bind(root_);
+    return true;
+  } else {
+    Node* temp = root_;
+    root_ = root_->left_;
+    bool result = FindGreatest(locator);
+    root_ = temp;
+    return result;
+  }
+}
+
+
+template<typename Config, class Allocator>
+bool SplayTree<Config, Allocator>::FindLeastGreaterThan(const Key& key,
+                                                        Locator* locator) {
+  if (is_empty())
+    return false;
+  // Splay on the key to move the node with the given key or the last
+  // node on the search path to the top of the tree.
+  Splay(key);
+  // Now the result is either the root node or the least node in
+  // the right subtree.
+  int cmp = Config::Compare(root_->key_, key);
+  if (cmp >= 0) {
+    locator->bind(root_);
+    return true;
+  } else {
+    Node* temp = root_;
+    root_ = root_->right_;
+    bool result = FindLeast(locator);
+    root_ = temp;
+    return result;
+  }
+}
+
+
+template<typename Config, class Allocator>
+bool SplayTree<Config, Allocator>::FindGreatest(Locator* locator) {
+  if (is_empty())
+    return false;
+  Node* current = root_;
+  while (current->right_ != NULL)
+    current = current->right_;
+  locator->bind(current);
+  return true;
+}
+
+
+template<typename Config, class Allocator>
+bool SplayTree<Config, Allocator>::FindLeast(Locator* locator) {
+  if (is_empty())
+    return false;
+  Node* current = root_;
+  while (current->left_ != NULL)
+    current = current->left_;
+  locator->bind(current);
+  return true;
+}
+
+
+template<typename Config, class Allocator>
+bool SplayTree<Config, Allocator>::Move(const Key& old_key,
+                                        const Key& new_key) {
+  if (!FindInternal(old_key))
+    return false;
+  Node* node_to_move = root_;
+  RemoveRootNode(old_key);
+  Splay(new_key);
+  int cmp = Config::Compare(new_key, root_->key_);
+  if (cmp == 0) {
+    // A node with the target key already exists.
+    delete node_to_move;
+    return false;
+  }
+  node_to_move->key_ = new_key;
+  InsertInternal(cmp, node_to_move);
+  return true;
+}
+
+
+template<typename Config, class Allocator>
+bool SplayTree<Config, Allocator>::Remove(const Key& key) {
+  if (!FindInternal(key))
+    return false;
+  Node* node_to_remove = root_;
+  RemoveRootNode(key);
+  delete node_to_remove;
+  return true;
+}
+
+
+template<typename Config, class Allocator>
+void SplayTree<Config, Allocator>::RemoveRootNode(const Key& key) {
+  if (root_->left_ == NULL) {
+    // No left child, so the new tree is just the right child.
+    root_ = root_->right_;
+  } else {
+    // Left child exists.
+    Node* right = root_->right_;
+    // Make the original left child the new root.
+    root_ = root_->left_;
+    // Splay to make sure that the new root has an empty right child.
+    Splay(key);
+    // Insert the original right child as the right child of the new
+    // root.
+    root_->right_ = right;
+  }
+}
+
+
+template<typename Config, class Allocator>
+void SplayTree<Config, Allocator>::Splay(const Key& key) {
+  if (is_empty())
+    return;
+  Node dummy_node(Config::kNoKey, Config::kNoValue);
+  // Create a dummy node.  The use of the dummy node is a bit
+  // counter-intuitive: The right child of the dummy node will hold
+  // the L tree of the algorithm.  The left child of the dummy node
+  // will hold the R tree of the algorithm.  Using a dummy node, left
+  // and right will always be nodes and we avoid special cases.
+  Node* dummy = &dummy_node;
+  Node* left = dummy;
+  Node* right = dummy;
+  Node* current = root_;
+  while (true) {
+    int cmp = Config::Compare(key, current->key_);
+    if (cmp < 0) {
+      if (current->left_ == NULL)
+        break;
+      if (Config::Compare(key, current->left_->key_) < 0) {
+        // Rotate right.
+        Node* temp = current->left_;
+        current->left_ = temp->right_;
+        temp->right_ = current;
+        current = temp;
+        if (current->left_ == NULL)
+          break;
+      }
+      // Link right.
+      right->left_ = current;
+      right = current;
+      current = current->left_;
+    } else if (cmp > 0) {
+      if (current->right_ == NULL)
+        break;
+      if (Config::Compare(key, current->right_->key_) > 0) {
+        // Rotate left.
+        Node* temp = current->right_;
+        current->right_ = temp->left_;
+        temp->left_ = current;
+        current = temp;
+        if (current->right_ == NULL)
+          break;
+      }
+      // Link left.
+      left->right_ = current;
+      left = current;
+      current = current->right_;
+    } else {
+      break;
+    }
+  }
+  // Assemble.
+  left->right_ = current->left_;
+  right->left_ = current->right_;
+  current->left_ = dummy->right_;
+  current->right_ = dummy->left_;
+  root_ = current;
+}
+
+
+template <typename Config, class Allocator> template <class Callback>
+void SplayTree<Config, Allocator>::ForEach(Callback* callback) {
+  NodeToPairAdaptor<Callback> callback_adaptor(callback);
+  ForEachNode(&callback_adaptor);
+}
+
+
+template <typename Config, class Allocator> template <class Callback>
+void SplayTree<Config, Allocator>::ForEachNode(Callback* callback) {
+  // Pre-allocate some space for tiny trees.
+  List<Node*, Allocator> nodes_to_visit(10);
+  if (root_ != NULL) nodes_to_visit.Add(root_);
+  int pos = 0;
+  while (pos < nodes_to_visit.length()) {
+    Node* node = nodes_to_visit[pos++];
+    if (node->left() != NULL) nodes_to_visit.Add(node->left());
+    if (node->right() != NULL) nodes_to_visit.Add(node->right());
+    callback->Call(node);
+  }
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_SPLAY_TREE_INL_H_
diff --git a/src/splay-tree.h b/src/splay-tree.h
new file mode 100644
index 0000000..c265276
--- /dev/null
+++ b/src/splay-tree.h
@@ -0,0 +1,203 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SPLAY_TREE_H_
+#define V8_SPLAY_TREE_H_
+
+namespace v8 {
+namespace internal {
+
+
+// A splay tree.  The config type parameter encapsulates the different
+// configurations of a concrete splay tree:
+//
+//   typedef Key: the key type
+//   typedef Value: the value type
+//   static const kNoKey: the dummy key used when no key is set
+//   static const kNoValue: the dummy value used to initialize nodes
+//   int (Compare)(Key& a, Key& b) -> {-1, 0, 1}: comparison function
+//
+// The tree is also parameterized by an allocation policy
+// (Allocator). The policy is used for allocating lists in the C free
+// store or the zone; see zone.h.
+
+// Forward defined as
+// template <typename Config, class Allocator = FreeStoreAllocationPolicy>
+//     class SplayTree;
+template <typename Config, class Allocator>
+class SplayTree {
+ public:
+  typedef typename Config::Key Key;
+  typedef typename Config::Value Value;
+
+  class Locator;
+
+  SplayTree() : root_(NULL) { }
+  ~SplayTree();
+
+  INLINE(void* operator new(size_t size)) {
+    return Allocator::New(static_cast<int>(size));
+  }
+  INLINE(void operator delete(void* p, size_t)) { return Allocator::Delete(p); }
+
+  // Inserts the given key in this tree with the given value.  Returns
+  // true if a node was inserted, otherwise false.  If found the locator
+  // is enabled and provides access to the mapping for the key.
+  bool Insert(const Key& key, Locator* locator);
+
+  // Looks up the key in this tree and returns true if it was found,
+  // otherwise false.  If the node is found the locator is enabled and
+  // provides access to the mapping for the key.
+  bool Find(const Key& key, Locator* locator);
+
+  // Finds the mapping with the greatest key less than or equal to the
+  // given key.
+  bool FindGreatestLessThan(const Key& key, Locator* locator);
+
+  // Find the mapping with the greatest key in this tree.
+  bool FindGreatest(Locator* locator);
+
+  // Finds the mapping with the least key greater than or equal to the
+  // given key.
+  bool FindLeastGreaterThan(const Key& key, Locator* locator);
+
+  // Find the mapping with the least key in this tree.
+  bool FindLeast(Locator* locator);
+
+  // Move the node from one key to another.
+  bool Move(const Key& old_key, const Key& new_key);
+
+  // Remove the node with the given key from the tree.
+  bool Remove(const Key& key);
+
+  bool is_empty() { return root_ == NULL; }
+
+  // Perform the splay operation for the given key. Moves the node with
+  // the given key to the top of the tree.  If no node has the given
+  // key, the last node on the search path is moved to the top of the
+  // tree.
+  void Splay(const Key& key);
+
+  class Node {
+   public:
+    Node(const Key& key, const Value& value)
+        : key_(key),
+          value_(value),
+          left_(NULL),
+          right_(NULL) { }
+
+    INLINE(void* operator new(size_t size)) {
+      return Allocator::New(static_cast<int>(size));
+    }
+    INLINE(void operator delete(void* p, size_t)) {
+      return Allocator::Delete(p);
+    }
+
+    Key key() { return key_; }
+    Value value() { return value_; }
+    Node* left() { return left_; }
+    Node* right() { return right_; }
+   private:
+
+    friend class SplayTree;
+    friend class Locator;
+    Key key_;
+    Value value_;
+    Node* left_;
+    Node* right_;
+  };
+
+  // A locator provides access to a node in the tree without actually
+  // exposing the node.
+  class Locator BASE_EMBEDDED {
+   public:
+    explicit Locator(Node* node) : node_(node) { }
+    Locator() : node_(NULL) { }
+    const Key& key() { return node_->key_; }
+    Value& value() { return node_->value_; }
+    void set_value(const Value& value) { node_->value_ = value; }
+    inline void bind(Node* node) { node_ = node; }
+   private:
+    Node* node_;
+  };
+
+  template <class Callback>
+  void ForEach(Callback* callback);
+
+ protected:
+
+  // Resets tree root. Existing nodes become unreachable.
+  void ResetRoot() { root_ = NULL; }
+
+ private:
+  // Search for a node with a given key. If found, root_ points
+  // to the node.
+  bool FindInternal(const Key& key);
+
+  // Inserts a node assuming that root_ is already set up.
+  void InsertInternal(int cmp, Node* node);
+
+  // Removes root_ node.
+  void RemoveRootNode(const Key& key);
+
+  template<class Callback>
+  class NodeToPairAdaptor BASE_EMBEDDED {
+   public:
+    explicit NodeToPairAdaptor(Callback* callback)
+        : callback_(callback) { }
+    void Call(Node* node) {
+      callback_->Call(node->key(), node->value());
+    }
+
+   private:
+    Callback* callback_;
+
+    DISALLOW_COPY_AND_ASSIGN(NodeToPairAdaptor);
+  };
+
+  class NodeDeleter BASE_EMBEDDED {
+   public:
+    NodeDeleter() { }
+    void Call(Node* node) { delete node; }
+
+   private:
+
+    DISALLOW_COPY_AND_ASSIGN(NodeDeleter);
+  };
+
+  template <class Callback>
+  void ForEachNode(Callback* callback);
+
+  Node* root_;
+
+  DISALLOW_COPY_AND_ASSIGN(SplayTree);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_SPLAY_TREE_H_
diff --git a/src/string.js b/src/string.js
index a8fc8d4..9433249 100644
--- a/src/string.js
+++ b/src/string.js
@@ -69,7 +69,7 @@
     if (index >= subject.length || index < 0) return "";
     char_code = %StringCharCodeAt(subject, index);
   }
-  return %CharFromCode(char_code);
+  return %_CharFromCode(char_code);
 }
 
 
@@ -149,6 +149,16 @@
 }
 
 
+function CloneDenseArray(array) {
+  if (array === null) return null;
+  var clone = new $Array(array.length);
+  for (var i = 0; i < array.length; i++) {
+    clone[i] = array[i];
+  }
+  return clone;
+}
+
+
 // ECMA-262 section 15.5.4.9
 //
 // This function is implementation specific.  For now, we do not
@@ -164,13 +174,37 @@
 
 // ECMA-262 section 15.5.4.10
 function StringMatch(regexp) {
-  if (!IS_REGEXP(regexp)) regexp = new $RegExp(regexp);
   var subject = TO_STRING_INLINE(this);
+  if (IS_REGEXP(regexp)) {
+    if (!regexp.global) return regexp.exec(subject);
 
-  if (!regexp.global) return regexp.exec(subject);
-  %_Log('regexp', 'regexp-match,%0S,%1r', [subject, regexp]);
-  // lastMatchInfo is defined in regexp.js.
-  return %StringMatch(subject, regexp, lastMatchInfo);
+    var cache = regExpCache;
+    var saveAnswer = false;
+
+    if (%_ObjectEquals(cache.type, 'match') &&
+        %_ObjectEquals(cache.regExp, regexp) &&
+        %_ObjectEquals(cache.subject, subject)) {
+      if (cache.answerSaved) {
+        return CloneDenseArray(cache.answer);
+      } else {
+        saveAnswer = true;
+      }
+    }
+    %_Log('regexp', 'regexp-match,%0S,%1r', [subject, regexp]);
+    // lastMatchInfo is defined in regexp.js.
+    var result = %StringMatch(subject, regexp, lastMatchInfo);
+    cache.type = 'match';
+    cache.regExp = regexp;
+    cache.subject = subject;
+    if (saveAnswer) cache.answer = CloneDenseArray(result);
+    cache.answerSaved = saveAnswer;
+    return result;
+  }
+  // Non-regexp argument.
+  regexp = new $RegExp(regexp);
+  // Don't check regexp exec cache, since the regexp is new.
+  // TODO(lrn): Change this if we start caching regexps here.
+  return RegExpExecNoTests(regexp, subject, 0);
 }
 
 
@@ -184,7 +218,7 @@
     if (!%_IsSmi(char_code)) {
       char_code = %StringCharCodeAt(string, start);
     }
-    return %CharFromCode(char_code);
+    return %_CharFromCode(char_code);
   }
   return %_SubString(string, start, end);
 }
@@ -206,6 +240,7 @@
   if (IS_REGEXP(search)) {
     %_Log('regexp', 'regexp-replace,%0r,%1S', [search, subject]);
     if (IS_FUNCTION(replace)) {
+      regExpCache.type = 'none';
       return StringReplaceRegExpWithFunction(subject, search, replace);
     } else {
       return StringReplaceRegExp(subject, search, replace);
@@ -241,12 +276,25 @@
 
 // Helper function for regular expressions in String.prototype.replace.
 function StringReplaceRegExp(subject, regexp, replace) {
+  var cache = regExpCache;
+  if (%_ObjectEquals(cache.regExp, regexp) &&
+      %_ObjectEquals(cache.type, 'replace') &&
+      %_ObjectEquals(cache.replaceString, replace) &&
+      %_ObjectEquals(cache.subject, subject)) {
+    return cache.answer;
+  }
   replace = TO_STRING_INLINE(replace);
-  return %StringReplaceRegExpWithString(subject,
-                                        regexp,
-                                        replace,
-                                        lastMatchInfo);
-};
+  var answer = %StringReplaceRegExpWithString(subject,
+                                              regexp,
+                                              replace,
+                                              lastMatchInfo);
+  cache.subject = subject;
+  cache.regExp = regexp;
+  cache.replaceString = replace;
+  cache.answer = answer;
+  cache.type = 'replace';
+  return answer;
+}
 
 
 // Expand the $-expressions in the string and return a new string with
@@ -368,72 +416,95 @@
   builder.addSpecialSlice(start, end);
 };
 
+// TODO(lrn): This array will survive indefinitely if replace is never
+// called again. However, it will be empty, since the contents are cleared
+// in the finally block.
+var reusableReplaceArray = $Array(16);
 
 // Helper function for replacing regular expressions with the result of a
-// function application in String.prototype.replace.  The function application
-// must be interleaved with the regexp matching (contrary to ECMA-262
-// 15.5.4.11) to mimic SpiderMonkey and KJS behavior when the function uses
-// the static properties of the RegExp constructor.  Example:
-//     'abcd'.replace(/(.)/g, function() { return RegExp.$1; }
-// should be 'abcd' and not 'dddd' (or anything else).
+// function application in String.prototype.replace.
 function StringReplaceRegExpWithFunction(subject, regexp, replace) {
-  var matchInfo = DoRegExpExec(regexp, subject, 0);
-  if (IS_NULL(matchInfo)) return subject;
-
-  var result = new ReplaceResultBuilder(subject);
-  // There's at least one match.  If the regexp is global, we have to loop
-  // over all matches.  The loop is not in C++ code here like the one in
-  // RegExp.prototype.exec, because of the interleaved function application.
-  // Unfortunately, that means this code is nearly duplicated, here and in
-  // jsregexp.cc.
   if (regexp.global) {
-    var numberOfCaptures = NUMBER_OF_CAPTURES(matchInfo) >> 1;
-    var previous = 0;
-    do {
-      var startOfMatch = matchInfo[CAPTURE0];
-      result.addSpecialSlice(previous, startOfMatch);
-      previous = matchInfo[CAPTURE1];
-      if (numberOfCaptures == 1) {
-        var match = SubString(subject, startOfMatch, previous);
-        // Don't call directly to avoid exposing the built-in global object.
-        result.add(replace.call(null, match, startOfMatch, subject));
-      } else {
-        result.add(ApplyReplacementFunction(replace, matchInfo, subject));
-      }
-      // Can't use matchInfo any more from here, since the function could
-      // overwrite it.
-      // Continue with the next match.
-      // Increment previous if we matched an empty string, as per ECMA-262
-      // 15.5.4.10.
-      if (previous == startOfMatch) {
-        // Add the skipped character to the output, if any.
-        if (previous < subject.length) {
-          result.addSpecialSlice(previous, previous + 1);
-        }
-        previous++;
-      }
-
-      // Per ECMA-262 15.10.6.2, if the previous index is greater than the
-      // string length, there is no match
-      matchInfo = (previous > subject.length)
-          ? null
-          : DoRegExpExec(regexp, subject, previous);
-    } while (!IS_NULL(matchInfo));
-
-    // Tack on the final right substring after the last match, if necessary.
-    if (previous < subject.length) {
-      result.addSpecialSlice(previous, subject.length);
+    var resultArray = reusableReplaceArray;
+    if (resultArray) {
+      reusableReplaceArray = null;
+    } else {
+      // Inside a nested replace (replace called from the replacement function
+      // of another replace) or we have failed to set the reusable array
+      // back due to an exception in a replacement function. Create a new
+      // array to use in the future, or until the original is written back.
+      resultArray = $Array(16);
     }
+
+    var res = %RegExpExecMultiple(regexp,
+                                  subject,
+                                  lastMatchInfo,
+                                  resultArray);
+    regexp.lastIndex = 0;
+    if (IS_NULL(res)) {
+      // No matches at all.
+      return subject;
+    }
+    var len = res.length;
+    var i = 0;
+    if (NUMBER_OF_CAPTURES(lastMatchInfo) == 2) {
+      var match_start = 0;
+      var override = [null, 0, subject];
+      while (i < len) {
+        var elem = res[i];
+        if (%_IsSmi(elem)) {
+          if (elem > 0) {
+            match_start = (elem >> 11) + (elem & 0x7ff);
+          } else {
+            match_start = res[++i] - elem;
+          }
+        } else {
+          override[0] = elem;
+          override[1] = match_start;
+          lastMatchInfoOverride = override;
+          var func_result = replace.call(null, elem, match_start, subject);
+          if (!IS_STRING(func_result)) {
+            func_result = NonStringToString(func_result);
+          }
+          res[i] = func_result;
+          match_start += elem.length;
+        }
+        i++;
+      }
+    } else {
+      while (i < len) {
+        var elem = res[i];
+        if (!%_IsSmi(elem)) {
+          // elem must be an Array.
+          // Use the apply argument as backing for global RegExp properties.
+          lastMatchInfoOverride = elem;
+          var func_result = replace.apply(null, elem);
+          if (!IS_STRING(func_result)) {
+            func_result = NonStringToString(func_result);
+          }
+          res[i] = func_result;
+        }
+        i++;
+      }
+    }
+    var resultBuilder = new ReplaceResultBuilder(subject, res);
+    var result = resultBuilder.generate();
+    resultArray.length = 0;
+    reusableReplaceArray = resultArray;
+    return result;
   } else { // Not a global regexp, no need to loop.
+    var matchInfo = DoRegExpExec(regexp, subject, 0);
+    if (IS_NULL(matchInfo)) return subject;
+
+    var result = new ReplaceResultBuilder(subject);
     result.addSpecialSlice(0, matchInfo[CAPTURE0]);
     var endOfMatch = matchInfo[CAPTURE1];
     result.add(ApplyReplacementFunction(replace, matchInfo, subject));
     // Can't use matchInfo any more from here, since the function could
     // overwrite it.
     result.addSpecialSlice(endOfMatch, subject.length);
+    return result.generate();
   }
-
-  return result.generate();
 }
 
 
@@ -458,19 +529,22 @@
   return replace.apply(null, parameters);
 }
 
-
 // ECMA-262 section 15.5.4.12
 function StringSearch(re) {
-  var regexp = new $RegExp(re);
+  var regexp;
+  if (IS_STRING(re)) {
+    regexp = %_GetFromCache(STRING_TO_REGEXP_CACHE_ID, re);
+  } else if (IS_REGEXP(re)) {
+    regexp = re;
+  } else {
+    regexp = new $RegExp(re);
+  }
   var s = TO_STRING_INLINE(this);
-  var last_idx = regexp.lastIndex; // keep old lastIndex
-  regexp.lastIndex = 0;            // ignore re.global property
-  var result = regexp.exec(s);
-  regexp.lastIndex = last_idx;     // restore lastIndex
-  if (result == null)
-    return -1;
-  else
-    return result.index;
+  var match = DoRegExpExec(regexp, s, 0);
+  if (match) {
+    return match[CAPTURE0];
+  }
+  return -1;
 }
 
 
@@ -517,7 +591,7 @@
 
   // ECMA-262 says that if separator is undefined, the result should
   // be an array of size 1 containing the entire string.  SpiderMonkey
-  // and KJS have this behaviour only when no separator is given.  If
+  // and KJS have this behavior only when no separator is given.  If
   // undefined is explicitly given, they convert it to a string and
   // use that.  We do as SpiderMonkey and KJS.
   if (%_ArgumentsLength() === 0) {
@@ -530,32 +604,39 @@
     var separator_length = separator.length;
 
     // If the separator string is empty then return the elements in the subject.
-    if (separator_length === 0) {
-      var result = $Array(length);
-      for (var i = 0; i < length; i++) result[i] = subject[i];
-      return result;
-    }
+    if (separator_length === 0) return %StringToArray(subject);
 
-    var result = [];
-    var start_index = 0;
-    var index;
-    while (true) {
-      if (start_index + separator_length > length ||
-          (index = %StringIndexOf(subject, separator, start_index)) === -1) {
-        result.push(SubString(subject, start_index, length));
-        break;
-      }
-      if (result.push(SubString(subject, start_index, index)) === limit) break;
-      start_index = index + separator_length;
-    }
+    var result = %StringSplit(subject, separator, limit);
 
     return result;
   }
 
+  var cache = regExpCache;
+  var saveAnswer = false;
+
+  if (%_ObjectEquals(cache.type, 'split') &&
+      %_ObjectEquals(cache.regExp, separator) &&
+      %_ObjectEquals(cache.subject, subject)) {
+    if (cache.answerSaved) {
+      return CloneDenseArray(cache.answer);
+    } else {
+      saveAnswer = true;
+    }
+  }
+
+  cache.type = 'split';
+  cache.regExp = separator;
+  cache.subject = subject;
+
   %_Log('regexp', 'regexp-split,%0S,%1r', [subject, separator]);
 
   if (length === 0) {
-    if (splitMatch(separator, subject, 0, 0) != null) return [];
+    cache.answerSaved = true;
+    if (splitMatch(separator, subject, 0, 0) != null) {
+      cache.answer = [];
+      return [];
+    }
+    cache.answer = [subject];
     return [subject];
   }
 
@@ -563,18 +644,19 @@
   var startIndex = 0;
   var result = [];
 
+  outer_loop:
   while (true) {
 
     if (startIndex === length) {
       result[result.length] = subject.slice(currentIndex, length);
-      return result;
+      break;
     }
 
     var matchInfo = splitMatch(separator, subject, currentIndex, startIndex);
 
     if (IS_NULL(matchInfo)) {
       result[result.length] = subject.slice(currentIndex, length);
-      return result;
+      break;
     }
 
     var endIndex = matchInfo[CAPTURE1];
@@ -586,7 +668,7 @@
     }
 
     result[result.length] = SubString(subject, currentIndex, matchInfo[CAPTURE0]);
-    if (result.length === limit) return result;
+    if (result.length === limit) break;
 
     var num_captures = NUMBER_OF_CAPTURES(matchInfo);
     for (var i = 2; i < num_captures; i += 2) {
@@ -597,11 +679,14 @@
       } else {
         result[result.length] = void 0;
       }
-      if (result.length === limit) return result;
+      if (result.length === limit) break outer_loop;
     }
 
     startIndex = currentIndex = endIndex;
   }
+  if (saveAnswer) cache.answer = CloneDenseArray(result);
+  cache.answerSaved = saveAnswer;
+  return result;
 }
 
 
@@ -723,16 +808,26 @@
   return %StringTrim(TO_STRING_INLINE(this), false, true);
 }
 
+var static_charcode_array = new $Array(4);
+
 // ECMA-262, section 15.5.3.2
 function StringFromCharCode(code) {
   var n = %_ArgumentsLength();
-  if (n == 1) return %CharFromCode(ToNumber(code) & 0xffff)
+  if (n == 1) {
+    if (!%_IsSmi(code)) code = ToNumber(code);
+    return %_CharFromCode(code & 0xffff);
+  }
 
   // NOTE: This is not super-efficient, but it is necessary because we
   // want to avoid converting to numbers from within the virtual
   // machine. Maybe we can find another way of doing this?
-  var codes = new $Array(n);
-  for (var i = 0; i < n; i++) codes[i] = ToNumber(%_Arguments(i));
+  var codes = static_charcode_array;
+  for (var i = 0; i < n; i++) {
+    var code = %_Arguments(i);
+    if (!%_IsSmi(code)) code = ToNumber(code);
+    codes[i] = code;
+  }
+  codes.length = n;
   return %StringFromCharCodeArray(codes);
 }
 
@@ -815,7 +910,11 @@
 
 // ReplaceResultBuilder support.
 function ReplaceResultBuilder(str) {
-  this.elements = new $Array();
+  if (%_ArgumentsLength() > 1) {
+    this.elements = %_Arguments(1);
+  } else {
+    this.elements = new $Array();
+  }
   this.special_string = str;
 }
 
@@ -831,10 +930,10 @@
 
 ReplaceResultBuilder.prototype.addSpecialSlice = function(start, end) {
   var len = end - start;
-  if (len == 0) return;
+  if (start < 0 || len <= 0) return;
   var elements = this.elements;
   if (start < 0x80000 && len < 0x800) {
-    elements[elements.length] = (start << 11) + len;
+    elements[elements.length] = (start << 11) | len;
   } else {
     // 0 < len <= String::kMaxLength and Smi::kMaxValue >= String::kMaxLength,
     // so -len is a smi.
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 577c2d7..f353253 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -93,6 +93,38 @@
 }
 
 
+Object* StubCache::ComputeLoadNonexistent(String* name, JSObject* receiver) {
+  // If no global objects are present in the prototype chain, the load
+  // nonexistent IC stub can be shared for all names for a given map
+  // and we use the empty string for the map cache in that case.  If
+  // there are global objects involved, we need to check global
+  // property cells in the stub and therefore the stub will be
+  // specific to the name.
+  String* cache_name = Heap::empty_string();
+  if (receiver->IsGlobalObject()) cache_name = name;
+  JSObject* last = receiver;
+  while (last->GetPrototype() != Heap::null_value()) {
+    last = JSObject::cast(last->GetPrototype());
+    if (last->IsGlobalObject()) cache_name = name;
+  }
+  // Compile the stub that is either shared for all names or
+  // name specific if there are global objects involved.
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(Code::LOAD_IC, NONEXISTENT);
+  Object* code = receiver->map()->FindInCodeCache(cache_name, flags);
+  if (code->IsUndefined()) {
+    LoadStubCompiler compiler;
+    code = compiler.CompileLoadNonexistent(cache_name, receiver, last);
+    if (code->IsFailure()) return code;
+    PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), cache_name));
+    Object* result =
+        receiver->map()->UpdateCodeCache(cache_name, Code::cast(code));
+    if (result->IsFailure()) return result;
+  }
+  return Set(name, receiver->map(), Code::cast(code));
+}
+
+
 Object* StubCache::ComputeLoadField(String* name,
                                     JSObject* receiver,
                                     JSObject* holder,
@@ -103,7 +135,7 @@
     LoadStubCompiler compiler;
     code = compiler.CompileLoadField(receiver, holder, field_index, name);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+    PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -122,7 +154,7 @@
     LoadStubCompiler compiler;
     code = compiler.CompileLoadCallback(name, receiver, holder, callback);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+    PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -141,7 +173,7 @@
     LoadStubCompiler compiler;
     code = compiler.CompileLoadConstant(receiver, holder, value, name);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+    PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -158,7 +190,7 @@
     LoadStubCompiler compiler;
     code = compiler.CompileLoadInterceptor(receiver, holder, name);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+    PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -187,7 +219,7 @@
                                       name,
                                       is_dont_delete);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+    PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -205,7 +237,7 @@
     KeyedLoadStubCompiler compiler;
     code = compiler.CompileLoadField(name, receiver, holder, field_index);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+    PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -224,7 +256,7 @@
     KeyedLoadStubCompiler compiler;
     code = compiler.CompileLoadConstant(name, receiver, holder, value);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+    PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -242,7 +274,7 @@
     KeyedLoadStubCompiler compiler;
     code = compiler.CompileLoadInterceptor(receiver, holder, name);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+    PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -261,7 +293,7 @@
     KeyedLoadStubCompiler compiler;
     code = compiler.CompileLoadCallback(name, receiver, holder, callback);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+    PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -279,7 +311,7 @@
     KeyedLoadStubCompiler compiler;
     code = compiler.CompileLoadArrayLength(name);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+    PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -296,7 +328,7 @@
     KeyedLoadStubCompiler compiler;
     code = compiler.CompileLoadStringLength(name);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+    PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -313,7 +345,7 @@
     KeyedLoadStubCompiler compiler;
     code = compiler.CompileLoadFunctionPrototype(name);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+    PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -332,7 +364,7 @@
     StoreStubCompiler compiler;
     code = compiler.CompileStoreField(receiver, field_index, transition, name);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
+    PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -349,7 +381,7 @@
     StoreStubCompiler compiler;
     code = compiler.CompileStoreGlobal(receiver, cell, name);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+    PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -367,7 +399,7 @@
     StoreStubCompiler compiler;
     code = compiler.CompileStoreCallback(receiver, callback, name);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
+    PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -384,7 +416,7 @@
     StoreStubCompiler compiler;
     code = compiler.CompileStoreInterceptor(receiver, name);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
+    PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -401,7 +433,8 @@
     KeyedStoreStubCompiler compiler;
     code = compiler.CompileStoreField(receiver, field_index, transition, name);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, Code::cast(code), name));
+    PROFILE(CodeCreateEvent(
+        Logger::KEYED_STORE_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -435,14 +468,6 @@
                                     argc);
   Object* code = map->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
-    if (object->IsJSObject()) {
-      Object* opt =
-          Top::LookupSpecialFunction(JSObject::cast(object), holder, function);
-      if (opt->IsJSFunction()) {
-        check = StubCompiler::JSARRAY_HAS_FAST_ELEMENTS_CHECK;
-        function = JSFunction::cast(opt);
-      }
-    }
     // If the function hasn't been compiled yet, we cannot do it now
     // because it may cause GC. To avoid this issue, we return an
     // internal error which will make sure we do not update any
@@ -453,7 +478,7 @@
     code = compiler.CompileCallConstant(object, holder, function, name, check);
     if (code->IsFailure()) return code;
     ASSERT_EQ(flags, Code::cast(code)->flags());
-    LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
+    PROFILE(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
     Object* result = map->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -490,7 +515,7 @@
                                      name);
     if (code->IsFailure()) return code;
     ASSERT_EQ(flags, Code::cast(code)->flags());
-    LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
+    PROFILE(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
     Object* result = map->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -526,7 +551,7 @@
                                            name);
     if (code->IsFailure()) return code;
     ASSERT_EQ(flags, Code::cast(code)->flags());
-    LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
+    PROFILE(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
     Object* result = map->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -564,7 +589,7 @@
     code = compiler.CompileCallGlobal(receiver, holder, cell, function, name);
     if (code->IsFailure()) return code;
     ASSERT_EQ(flags, Code::cast(code)->flags());
-    LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
+    PROFILE(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -709,8 +734,8 @@
   if (result->IsCode()) {
     Code* code = Code::cast(result);
     USE(code);
-    LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG,
-                        code, code->arguments_count()));
+    PROFILE(CodeCreateEvent(Logger::LAZY_COMPILE_TAG,
+                            code, code->arguments_count()));
   }
   return result;
 }
@@ -790,6 +815,10 @@
   return *value;
 }
 
+
+static const int kAccessorInfoOffsetInInterceptorArgs = 2;
+
+
 /**
  * Attempts to load a property with an interceptor (which must be present),
  * but doesn't search the prototype chain.
@@ -798,11 +827,12 @@
  * provide any value for the given name.
  */
 Object* LoadPropertyWithInterceptorOnly(Arguments args) {
-  JSObject* receiver_handle = JSObject::cast(args[0]);
-  JSObject* holder_handle = JSObject::cast(args[1]);
-  Handle<String> name_handle = args.at<String>(2);
-  Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(3);
-  Object* data_handle = args[4];
+  Handle<String> name_handle = args.at<String>(0);
+  Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(1);
+  ASSERT(kAccessorInfoOffsetInInterceptorArgs == 2);
+  ASSERT(args[2]->IsJSObject());  // Receiver.
+  ASSERT(args[3]->IsJSObject());  // Holder.
+  ASSERT(args.length() == 5);  // Last arg is data object.
 
   Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
   v8::NamedPropertyGetter getter =
@@ -811,8 +841,8 @@
 
   {
     // Use the interceptor getter.
-    CustomArguments args(data_handle, receiver_handle, holder_handle);
-    v8::AccessorInfo info(args.end());
+    v8::AccessorInfo info(args.arguments() -
+                          kAccessorInfoOffsetInInterceptorArgs);
     HandleScope scope;
     v8::Handle<v8::Value> r;
     {
@@ -850,11 +880,12 @@
 
 static Object* LoadWithInterceptor(Arguments* args,
                                    PropertyAttributes* attrs) {
-  Handle<JSObject> receiver_handle = args->at<JSObject>(0);
-  Handle<JSObject> holder_handle = args->at<JSObject>(1);
-  Handle<String> name_handle = args->at<String>(2);
-  Handle<InterceptorInfo> interceptor_info = args->at<InterceptorInfo>(3);
-  Handle<Object> data_handle = args->at<Object>(4);
+  Handle<String> name_handle = args->at<String>(0);
+  Handle<InterceptorInfo> interceptor_info = args->at<InterceptorInfo>(1);
+  ASSERT(kAccessorInfoOffsetInInterceptorArgs == 2);
+  Handle<JSObject> receiver_handle = args->at<JSObject>(2);
+  Handle<JSObject> holder_handle = args->at<JSObject>(3);
+  ASSERT(args->length() == 5);  // Last arg is data object.
 
   Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
   v8::NamedPropertyGetter getter =
@@ -863,8 +894,8 @@
 
   {
     // Use the interceptor getter.
-    CustomArguments args(*data_handle, *receiver_handle, *holder_handle);
-    v8::AccessorInfo info(args.end());
+    v8::AccessorInfo info(args->arguments() -
+                          kAccessorInfoOffsetInInterceptorArgs);
     HandleScope scope;
     v8::Handle<v8::Value> r;
     {
@@ -899,7 +930,7 @@
 
   // If the property is present, return it.
   if (attr != ABSENT) return result;
-  return ThrowReferenceError(String::cast(args[2]));
+  return ThrowReferenceError(String::cast(args[0]));
 }
 
 
@@ -941,8 +972,8 @@
     Counters::call_initialize_stubs.Increment();
     Code* code = Code::cast(result);
     USE(code);
-    LOG(CodeCreateEvent(Logger::CALL_INITIALIZE_TAG,
-                        code, code->arguments_count()));
+    PROFILE(CodeCreateEvent(Logger::CALL_INITIALIZE_TAG,
+                            code, code->arguments_count()));
   }
   return result;
 }
@@ -959,8 +990,8 @@
     Counters::call_premonomorphic_stubs.Increment();
     Code* code = Code::cast(result);
     USE(code);
-    LOG(CodeCreateEvent(Logger::CALL_PRE_MONOMORPHIC_TAG,
-                        code, code->arguments_count()));
+    PROFILE(CodeCreateEvent(Logger::CALL_PRE_MONOMORPHIC_TAG,
+                            code, code->arguments_count()));
   }
   return result;
 }
@@ -975,8 +1006,8 @@
     Counters::call_normal_stubs.Increment();
     Code* code = Code::cast(result);
     USE(code);
-    LOG(CodeCreateEvent(Logger::CALL_NORMAL_TAG,
-                        code, code->arguments_count()));
+    PROFILE(CodeCreateEvent(Logger::CALL_NORMAL_TAG,
+                            code, code->arguments_count()));
   }
   return result;
 }
@@ -991,8 +1022,8 @@
     Counters::call_megamorphic_stubs.Increment();
     Code* code = Code::cast(result);
     USE(code);
-    LOG(CodeCreateEvent(Logger::CALL_MEGAMORPHIC_TAG,
-                        code, code->arguments_count()));
+    PROFILE(CodeCreateEvent(Logger::CALL_MEGAMORPHIC_TAG,
+                            code, code->arguments_count()));
   }
   return result;
 }
@@ -1007,7 +1038,8 @@
     Counters::call_megamorphic_stubs.Increment();
     Code* code = Code::cast(result);
     USE(code);
-    LOG(CodeCreateEvent(Logger::CALL_MISS_TAG, code, code->arguments_count()));
+    PROFILE(CodeCreateEvent(Logger::CALL_MISS_TAG,
+                            code, code->arguments_count()));
   }
   return result;
 }
@@ -1021,8 +1053,8 @@
   if (!result->IsFailure()) {
     Code* code = Code::cast(result);
     USE(code);
-    LOG(CodeCreateEvent(Logger::CALL_DEBUG_BREAK_TAG,
-                        code, code->arguments_count()));
+    PROFILE(CodeCreateEvent(Logger::CALL_DEBUG_BREAK_TAG,
+                            code, code->arguments_count()));
   }
   return result;
 }
@@ -1038,8 +1070,8 @@
   if (!result->IsFailure()) {
     Code* code = Code::cast(result);
     USE(code);
-    LOG(CodeCreateEvent(Logger::CALL_DEBUG_PREPARE_STEP_IN_TAG,
-                        code, code->arguments_count()));
+    PROFILE(CodeCreateEvent(Logger::CALL_DEBUG_PREPARE_STEP_IN_TAG,
+                            code, code->arguments_count()));
   }
   return result;
 }
@@ -1126,10 +1158,77 @@
   if (!result->IsFailure()) {
     Code* code = Code::cast(result);
     USE(code);
-    LOG(CodeCreateEvent(Logger::STUB_TAG, code, "ConstructStub"));
+    PROFILE(CodeCreateEvent(Logger::STUB_TAG, code, "ConstructStub"));
   }
   return result;
 }
 
 
+CallOptimization::CallOptimization(LookupResult* lookup) {
+  if (!lookup->IsProperty() || !lookup->IsCacheable() ||
+      lookup->type() != CONSTANT_FUNCTION) {
+    Initialize(NULL);
+  } else {
+    // We only optimize constant function calls.
+    Initialize(lookup->GetConstantFunction());
+  }
+}
+
+CallOptimization::CallOptimization(JSFunction* function) {
+  Initialize(function);
+}
+
+
+int CallOptimization::GetPrototypeDepthOfExpectedType(JSObject* object,
+                                                      JSObject* holder) const {
+  ASSERT(is_simple_api_call_);
+  if (expected_receiver_type_ == NULL) return 0;
+  int depth = 0;
+  while (object != holder) {
+    if (object->IsInstanceOf(expected_receiver_type_)) return depth;
+    object = JSObject::cast(object->GetPrototype());
+    ++depth;
+  }
+  if (holder->IsInstanceOf(expected_receiver_type_)) return depth;
+  return kInvalidProtoDepth;
+}
+
+
+void CallOptimization::Initialize(JSFunction* function) {
+  constant_function_ = NULL;
+  is_simple_api_call_ = false;
+  expected_receiver_type_ = NULL;
+  api_call_info_ = NULL;
+
+  if (function == NULL || !function->is_compiled()) return;
+
+  constant_function_ = function;
+  AnalyzePossibleApiFunction(function);
+}
+
+
+void CallOptimization::AnalyzePossibleApiFunction(JSFunction* function) {
+  SharedFunctionInfo* sfi = function->shared();
+  if (!sfi->IsApiFunction()) return;
+  FunctionTemplateInfo* info = sfi->get_api_func_data();
+
+  // Require a C++ callback.
+  if (info->call_code()->IsUndefined()) return;
+  api_call_info_ = CallHandlerInfo::cast(info->call_code());
+
+  // Accept signatures that either have no restrictions at all or
+  // only have restrictions on the receiver.
+  if (!info->signature()->IsUndefined()) {
+    SignatureInfo* signature = SignatureInfo::cast(info->signature());
+    if (!signature->args()->IsUndefined()) return;
+    if (!signature->receiver()->IsUndefined()) {
+      expected_receiver_type_ =
+          FunctionTemplateInfo::cast(signature->receiver());
+    }
+  }
+
+  is_simple_api_call_ = true;
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 43354db..2e0faf6 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -56,6 +56,8 @@
 
   // Computes the right stub matching. Inserts the result in the
   // cache before returning.  This might compile a stub if needed.
+  static Object* ComputeLoadNonexistent(String* name, JSObject* receiver);
+
   static Object* ComputeLoadField(String* name,
                                   JSObject* receiver,
                                   JSObject* holder,
@@ -326,8 +328,7 @@
     RECEIVER_MAP_CHECK,
     STRING_CHECK,
     NUMBER_CHECK,
-    BOOLEAN_CHECK,
-    JSARRAY_HAS_FAST_ELEMENTS_CHECK
+    BOOLEAN_CHECK
   };
 
   StubCompiler() : scope_(), masm_(NULL, 256), failure_(NULL) { }
@@ -462,18 +463,25 @@
 
 class LoadStubCompiler: public StubCompiler {
  public:
+  Object* CompileLoadNonexistent(String* name,
+                                 JSObject* object,
+                                 JSObject* last);
+
   Object* CompileLoadField(JSObject* object,
                            JSObject* holder,
                            int index,
                            String* name);
+
   Object* CompileLoadCallback(String* name,
                               JSObject* object,
                               JSObject* holder,
                               AccessorInfo* callback);
+
   Object* CompileLoadConstant(JSObject* object,
                               JSObject* holder,
                               Object* value,
                               String* name);
+
   Object* CompileLoadInterceptor(JSObject* object,
                                  JSObject* holder,
                                  String* name);
@@ -495,17 +503,21 @@
                            JSObject* object,
                            JSObject* holder,
                            int index);
+
   Object* CompileLoadCallback(String* name,
                               JSObject* object,
                               JSObject* holder,
                               AccessorInfo* callback);
+
   Object* CompileLoadConstant(String* name,
                               JSObject* object,
                               JSObject* holder,
                               Object* value);
+
   Object* CompileLoadInterceptor(JSObject* object,
                                  JSObject* holder,
                                  String* name);
+
   Object* CompileLoadArrayLength(String* name);
   Object* CompileLoadStringLength(String* name);
   Object* CompileLoadFunctionPrototype(String* name);
@@ -549,7 +561,7 @@
 
 class CallStubCompiler: public StubCompiler {
  public:
-  explicit CallStubCompiler(int argc, InLoopFlag in_loop)
+  CallStubCompiler(int argc, InLoopFlag in_loop)
       : arguments_(argc), in_loop_(in_loop) { }
 
   Object* CompileCallField(JSObject* object,
@@ -570,6 +582,18 @@
                             JSFunction* function,
                             String* name);
 
+  Object* CompileArrayPushCall(Object* object,
+                               JSObject* holder,
+                               JSFunction* function,
+                               String* name,
+                               CheckType check);
+
+  Object* CompileArrayPopCall(Object* object,
+                              JSObject* holder,
+                              JSFunction* function,
+                              String* name,
+                              CheckType check);
+
  private:
   const ParameterCount arguments_;
   const InLoopFlag in_loop_;
@@ -591,6 +615,79 @@
 };
 
 
+// Holds information about possible function call optimizations.
+class CallOptimization BASE_EMBEDDED {
+ public:
+  explicit CallOptimization(LookupResult* lookup);
+
+  explicit CallOptimization(JSFunction* function);
+
+  bool is_constant_call() const {
+    return constant_function_ != NULL;
+  }
+
+  JSFunction* constant_function() const {
+    ASSERT(constant_function_ != NULL);
+    return constant_function_;
+  }
+
+  bool is_simple_api_call() const {
+    return is_simple_api_call_;
+  }
+
+  FunctionTemplateInfo* expected_receiver_type() const {
+    ASSERT(is_simple_api_call_);
+    return expected_receiver_type_;
+  }
+
+  CallHandlerInfo* api_call_info() const {
+    ASSERT(is_simple_api_call_);
+    return api_call_info_;
+  }
+
+  // Returns the depth of the object having the expected type in the
+  // prototype chain between the two arguments.
+  int GetPrototypeDepthOfExpectedType(JSObject* object,
+                                      JSObject* holder) const;
+
+ private:
+  void Initialize(JSFunction* function);
+
+  // Determines whether the given function can be called using the
+  // fast api call builtin.
+  void AnalyzePossibleApiFunction(JSFunction* function);
+
+  JSFunction* constant_function_;
+  bool is_simple_api_call_;
+  FunctionTemplateInfo* expected_receiver_type_;
+  CallHandlerInfo* api_call_info_;
+};
+
+
+typedef Object* (*CustomCallGenerator)(CallStubCompiler* compiler,
+                                       Object* object,
+                                       JSObject* holder,
+                                       JSFunction* function,
+                                       String* name,
+                                       StubCompiler::CheckType check);
+
+
+Object* CompileArrayPushCall(CallStubCompiler* compiler,
+                             Object* object,
+                             JSObject* holder,
+                             JSFunction* function,
+                             String* name,
+                             StubCompiler::CheckType check);
+
+
+Object* CompileArrayPopCall(CallStubCompiler* compiler,
+                            Object* object,
+                            JSObject* holder,
+                            JSFunction* function,
+                            String* name,
+                            StubCompiler::CheckType check);
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_STUB_CACHE_H_
diff --git a/src/top.cc b/src/top.cc
index d174175..2f75c8f 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -31,6 +31,7 @@
 #include "bootstrapper.h"
 #include "debug.h"
 #include "execution.h"
+#include "messages.h"
 #include "platform.h"
 #include "simulator.h"
 #include "string-stream.h"
@@ -87,19 +88,30 @@
 }
 
 
+void Top::IterateThread(ThreadVisitor* v) {
+  v->VisitThread(&thread_local_);
+}
+
+
+void Top::IterateThread(ThreadVisitor* v, char* t) {
+  ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(t);
+  v->VisitThread(thread);
+}
+
+
 void Top::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
   v->VisitPointer(&(thread->pending_exception_));
   v->VisitPointer(&(thread->pending_message_obj_));
   v->VisitPointer(
-      bit_cast<Object**, Script**>(&(thread->pending_message_script_)));
-  v->VisitPointer(bit_cast<Object**, Context**>(&(thread->context_)));
+      BitCast<Object**, Script**>(&(thread->pending_message_script_)));
+  v->VisitPointer(BitCast<Object**, Context**>(&(thread->context_)));
   v->VisitPointer(&(thread->scheduled_exception_));
 
   for (v8::TryCatch* block = thread->TryCatchHandler();
        block != NULL;
        block = TRY_CATCH_FROM_ADDRESS(block->next_)) {
-    v->VisitPointer(bit_cast<Object**, void**>(&(block->exception_)));
-    v->VisitPointer(bit_cast<Object**, void**>(&(block->message_)));
+    v->VisitPointer(BitCast<Object**, void**>(&(block->exception_)));
+    v->VisitPointer(BitCast<Object**, void**>(&(block->message_)));
   }
 
   // Iterate over pointers on native execution stack.
@@ -438,10 +450,9 @@
 
   // Get the data object from access check info.
   JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
-  Object* info = constructor->shared()->function_data();
-  if (info == Heap::undefined_value()) return;
-
-  Object* data_obj = FunctionTemplateInfo::cast(info)->access_check_info();
+  if (!constructor->shared()->IsApiFunction()) return;
+  Object* data_obj =
+      constructor->shared()->get_api_func_data()->access_check_info();
   if (data_obj == Heap::undefined_value()) return;
 
   HandleScope scope;
@@ -501,10 +512,10 @@
 
   // Get named access check callback
   JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
-  Object* info = constructor->shared()->function_data();
-  if (info == Heap::undefined_value()) return false;
+  if (!constructor->shared()->IsApiFunction()) return false;
 
-  Object* data_obj = FunctionTemplateInfo::cast(info)->access_check_info();
+  Object* data_obj =
+     constructor->shared()->get_api_func_data()->access_check_info();
   if (data_obj == Heap::undefined_value()) return false;
 
   Object* fun_obj = AccessCheckInfo::cast(data_obj)->named_callback();
@@ -546,10 +557,10 @@
 
   // Get indexed access check callback
   JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
-  Object* info = constructor->shared()->function_data();
-  if (info == Heap::undefined_value()) return false;
+  if (!constructor->shared()->IsApiFunction()) return false;
 
-  Object* data_obj = FunctionTemplateInfo::cast(info)->access_check_info();
+  Object* data_obj =
+      constructor->shared()->get_api_func_data()->access_check_info();
   if (data_obj == Heap::undefined_value()) return false;
 
   Object* fun_obj = AccessCheckInfo::cast(data_obj)->indexed_callback();
@@ -949,27 +960,6 @@
 }
 
 
-bool Top::CanHaveSpecialFunctions(JSObject* object) {
-  return object->IsJSArray();
-}
-
-
-Object* Top::LookupSpecialFunction(JSObject* receiver,
-                                   JSObject* prototype,
-                                   JSFunction* function) {
-  if (CanHaveSpecialFunctions(receiver)) {
-    FixedArray* table = context()->global_context()->special_function_table();
-    for (int index = 0; index < table->length(); index +=3) {
-      if ((prototype == table->get(index)) &&
-          (function == table->get(index+1))) {
-        return table->get(index+2);
-      }
-    }
-  }
-  return Heap::undefined_value();
-}
-
-
 char* Top::ArchiveThread(char* to) {
   memcpy(to, reinterpret_cast<char*>(&thread_local_), sizeof(thread_local_));
   InitializeThreadLocal();
diff --git a/src/top.h b/src/top.h
index ddc73ba..d263777 100644
--- a/src/top.h
+++ b/src/top.h
@@ -40,6 +40,7 @@
 // Top has static variables used for JavaScript execution.
 
 class SaveContext;  // Forward declaration.
+class ThreadVisitor;  // Defined in v8threads.h
 
 class ThreadLocalTop BASE_EMBEDDED {
  public:
@@ -319,6 +320,8 @@
   static void Iterate(ObjectVisitor* v);
   static void Iterate(ObjectVisitor* v, ThreadLocalTop* t);
   static char* Iterate(ObjectVisitor* v, char* t);
+  static void IterateThread(ThreadVisitor* v);
+  static void IterateThread(ThreadVisitor* v, char* t);
 
   // Returns the global object of the current context. It could be
   // a builtin object, or a js global object.
@@ -342,11 +345,6 @@
     return Handle<JSBuiltinsObject>(thread_local_.context_->builtins());
   }
 
-  static bool CanHaveSpecialFunctions(JSObject* object);
-  static Object* LookupSpecialFunction(JSObject* receiver,
-                                       JSObject* prototype,
-                                       JSFunction* value);
-
   static void RegisterTryCatchHandler(v8::TryCatch* that);
   static void UnregisterTryCatchHandler(v8::TryCatch* that);
 
diff --git a/src/usage-analyzer.h b/src/type-info.cc
similarity index 74%
copy from src/usage-analyzer.h
copy to src/type-info.cc
index 1b0ea4a..3fc929d 100644
--- a/src/usage-analyzer.h
+++ b/src/type-info.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,16 +25,29 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef V8_USAGE_ANALYZER_H_
-#define V8_USAGE_ANALYZER_H_
+#include "v8.h"
+#include "type-info.h"
+#include "objects-inl.h"
 
 namespace v8 {
 namespace internal {
 
-// Compute usage counts for all variables.
-// Used for variable allocation.
-bool AnalyzeVariableUsage(FunctionLiteral* lit);
+
+TypeInfo TypeInfo::TypeFromValue(Handle<Object> value) {
+  TypeInfo info;
+  if (value->IsSmi()) {
+    info = TypeInfo::Smi();
+  } else if (value->IsHeapNumber()) {
+    info = TypeInfo::IsInt32Double(HeapNumber::cast(*value)->value())
+        ? TypeInfo::Integer32()
+        : TypeInfo::Double();
+  } else if (value->IsString()) {
+    info = TypeInfo::String();
+  } else {
+    info = TypeInfo::Unknown();
+  }
+  return info;
+}
+
 
 } }  // namespace v8::internal
-
-#endif  // V8_USAGE_ANALYZER_H_
diff --git a/src/type-info.h b/src/type-info.h
new file mode 100644
index 0000000..568437a
--- /dev/null
+++ b/src/type-info.h
@@ -0,0 +1,244 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_TYPE_INFO_H_
+#define V8_TYPE_INFO_H_
+
+#include "globals.h"
+
+namespace v8 {
+namespace internal {
+
+//        Unknown
+//           |
+//      PrimitiveType
+//           |   \--------|
+//         Number      String
+//         /    |         |
+//    Double  Integer32   |
+//        |      |       /
+//        |     Smi     /
+//        |     /      /
+//        Uninitialized.
+
+class TypeInfo {
+ public:
+  TypeInfo() { }
+
+  static inline TypeInfo Unknown();
+  // We know it's a primitive type.
+  static inline TypeInfo Primitive();
+  // We know it's a number of some sort.
+  static inline TypeInfo Number();
+  // We know it's signed or unsigned 32 bit integer.
+  static inline TypeInfo Integer32();
+  // We know it's a Smi.
+  static inline TypeInfo Smi();
+  // We know it's a heap number.
+  static inline TypeInfo Double();
+  // We know it's a string.
+  static inline TypeInfo String();
+  // We haven't started collecting info yet.
+  static inline TypeInfo Uninitialized();
+
+  // Return compact representation.  Very sensitive to enum values below!
+  // Compacting drops information about primtive types and strings types.
+  // We use the compact representation when we only care about number types.
+  int ThreeBitRepresentation() {
+    ASSERT(type_ != kUninitializedType);
+    int answer = type_ & 0xf;
+    answer = answer > 6 ? answer - 2 : answer;
+    ASSERT(answer >= 0);
+    ASSERT(answer <= 7);
+    return answer;
+  }
+
+  // Decode compact representation.  Very sensitive to enum values below!
+  static TypeInfo ExpandedRepresentation(int three_bit_representation) {
+    Type t = static_cast<Type>(three_bit_representation >= 6 ?
+                               three_bit_representation + 2 :
+                               three_bit_representation);
+    t = (t == kUnknownType) ? t : static_cast<Type>(t | kPrimitiveType);
+    ASSERT(t == kUnknownType ||
+           t == kNumberType ||
+           t == kInteger32Type ||
+           t == kSmiType ||
+           t == kDoubleType);
+    return TypeInfo(t);
+  }
+
+  int ToInt() {
+    return type_;
+  }
+
+  static TypeInfo FromInt(int bit_representation) {
+    Type t = static_cast<Type>(bit_representation);
+    ASSERT(t == kUnknownType ||
+           t == kPrimitiveType ||
+           t == kNumberType ||
+           t == kInteger32Type ||
+           t == kSmiType ||
+           t == kDoubleType ||
+           t == kStringType);
+    return TypeInfo(t);
+  }
+
+  // Return the weakest (least precise) common type.
+  static TypeInfo Combine(TypeInfo a, TypeInfo b) {
+    return TypeInfo(static_cast<Type>(a.type_ & b.type_));
+  }
+
+
+  // Integer32 is an integer that can be represented as either a signed
+  // 32-bit integer or as an unsigned 32-bit integer. It has to be
+  // in the range [-2^31, 2^32 - 1]. We also have to check for negative 0
+  // as it is not an Integer32.
+  static inline bool IsInt32Double(double value) {
+    const DoubleRepresentation minus_zero(-0.0);
+    DoubleRepresentation rep(value);
+    if (rep.bits == minus_zero.bits) return false;
+    if (value >= kMinInt && value <= kMaxUInt32) {
+      if (value <= kMaxInt && value == static_cast<int32_t>(value)) {
+        return true;
+      }
+      if (value == static_cast<uint32_t>(value)) return true;
+    }
+    return false;
+  }
+
+  static TypeInfo TypeFromValue(Handle<Object> value);
+
+  inline bool IsUnknown() {
+    return type_ == kUnknownType;
+  }
+
+  inline bool IsNumber() {
+    ASSERT(type_ != kUninitializedType);
+    return ((type_ & kNumberType) == kNumberType);
+  }
+
+  inline bool IsSmi() {
+    ASSERT(type_ != kUninitializedType);
+    return ((type_ & kSmiType) == kSmiType);
+  }
+
+  inline bool IsInteger32() {
+    ASSERT(type_ != kUninitializedType);
+    return ((type_ & kInteger32Type) == kInteger32Type);
+  }
+
+  inline bool IsDouble() {
+    ASSERT(type_ != kUninitializedType);
+    return ((type_ & kDoubleType) == kDoubleType);
+  }
+
+  inline bool IsString() {
+    ASSERT(type_ != kUninitializedType);
+    return ((type_ & kStringType) == kStringType);
+  }
+
+  inline bool IsUninitialized() {
+    return type_ == kUninitializedType;
+  }
+
+  const char* ToString() {
+    switch (type_) {
+      case kUnknownType: return "UnknownType";
+      case kPrimitiveType: return "PrimitiveType";
+      case kNumberType: return "NumberType";
+      case kInteger32Type: return "Integer32Type";
+      case kSmiType: return "SmiType";
+      case kDoubleType: return "DoubleType";
+      case kStringType: return "StringType";
+      case kUninitializedType:
+        UNREACHABLE();
+        return "UninitializedType";
+    }
+    UNREACHABLE();
+    return "Unreachable code";
+  }
+
+ private:
+  // We use 6 bits to represent the types.
+  enum Type {
+    kUnknownType = 0,          // 000000
+    kPrimitiveType = 0x10,     // 010000
+    kNumberType = 0x11,        // 010001
+    kInteger32Type = 0x13,     // 010011
+    kSmiType = 0x17,           // 010111
+    kDoubleType = 0x19,        // 011001
+    kStringType = 0x30,        // 110000
+    kUninitializedType = 0x3f  // 111111
+  };
+  explicit inline TypeInfo(Type t) : type_(t) { }
+
+  Type type_;
+};
+
+
+TypeInfo TypeInfo::Unknown() {
+  return TypeInfo(kUnknownType);
+}
+
+
+TypeInfo TypeInfo::Primitive() {
+  return TypeInfo(kPrimitiveType);
+}
+
+
+TypeInfo TypeInfo::Number() {
+  return TypeInfo(kNumberType);
+}
+
+
+TypeInfo TypeInfo::Integer32() {
+  return TypeInfo(kInteger32Type);
+}
+
+
+TypeInfo TypeInfo::Smi() {
+  return TypeInfo(kSmiType);
+}
+
+
+TypeInfo TypeInfo::Double() {
+  return TypeInfo(kDoubleType);
+}
+
+
+TypeInfo TypeInfo::String() {
+  return TypeInfo(kStringType);
+}
+
+
+TypeInfo TypeInfo::Uninitialized() {
+  return TypeInfo(kUninitializedType);
+}
+
+} }  // namespace v8::internal
+
+#endif  // V8_TYPE_INFO_H_
diff --git a/src/uri.js b/src/uri.js
index 5af71b6..3adab83 100644
--- a/src/uri.js
+++ b/src/uri.js
@@ -244,7 +244,7 @@
     if (cc == 61) return true;
     // ?@
     if (63 <= cc && cc <= 64) return true;
-    
+
     return false;
   };
   var string = ToString(uri);
@@ -268,7 +268,7 @@
   if (65 <= cc && cc <= 90) return true;
   // 0 - 9
   if (48 <= cc && cc <= 57) return true;
-  
+
   return false;
 }
 
@@ -293,7 +293,7 @@
     if (cc == 95) return true;
     // ~
     if (cc == 126) return true;
-    
+
     return false;
   };
 
@@ -316,7 +316,7 @@
     if (cc == 95) return true;
     // ~
     if (cc == 126) return true;
-    
+
     return false;
   };
 
@@ -327,14 +327,14 @@
 
 function HexValueOf(c) {
   var code = c.charCodeAt(0);
-  
+
   // 0-9
   if (code >= 48 && code <= 57) return code - 48;
   // A-F
   if (code >= 65 && code <= 70) return code - 55;
   // a-f
   if (code >= 97 && code <= 102) return code - 87;
-  
+
   return -1;
 }
 
diff --git a/src/usage-analyzer.cc b/src/usage-analyzer.cc
deleted file mode 100644
index 74cf982..0000000
--- a/src/usage-analyzer.cc
+++ /dev/null
@@ -1,426 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "ast.h"
-#include "scopes.h"
-#include "usage-analyzer.h"
-
-namespace v8 {
-namespace internal {
-
-// Weight boundaries
-static const int MinWeight = 1;
-static const int MaxWeight = 1000000;
-static const int InitialWeight = 100;
-
-
-class UsageComputer: public AstVisitor {
- public:
-  static bool Traverse(AstNode* node);
-
-  // AST node visit functions.
-#define DECLARE_VISIT(type) void Visit##type(type* node);
-  AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
-  void VisitVariable(Variable* var);
-
- private:
-  int weight_;
-  bool is_write_;
-
-  UsageComputer(int weight, bool is_write);
-  virtual ~UsageComputer();
-
-  // Helper functions
-  void RecordUses(UseCount* uses);
-  void Read(Expression* x);
-  void Write(Expression* x);
-  void ReadList(ZoneList<Expression*>* list);
-  void ReadList(ZoneList<ObjectLiteral::Property*>* list);
-
-  friend class WeightScaler;
-};
-
-
-class WeightScaler BASE_EMBEDDED {
- public:
-  WeightScaler(UsageComputer* uc, float scale);
-  ~WeightScaler();
-
- private:
-  UsageComputer* uc_;
-  int old_weight_;
-};
-
-
-// ----------------------------------------------------------------------------
-// Implementation of UsageComputer
-
-bool UsageComputer::Traverse(AstNode* node) {
-  UsageComputer uc(InitialWeight, false);
-  uc.Visit(node);
-  return !uc.HasStackOverflow();
-}
-
-
-void UsageComputer::VisitBlock(Block* node) {
-  VisitStatements(node->statements());
-}
-
-
-void UsageComputer::VisitDeclaration(Declaration* node) {
-  Write(node->proxy());
-  if (node->fun() != NULL)
-    VisitFunctionLiteral(node->fun());
-}
-
-
-void UsageComputer::VisitExpressionStatement(ExpressionStatement* node) {
-  Visit(node->expression());
-}
-
-
-void UsageComputer::VisitEmptyStatement(EmptyStatement* node) {
-  // nothing to do
-}
-
-
-void UsageComputer::VisitIfStatement(IfStatement* node) {
-  Read(node->condition());
-  { WeightScaler ws(this, 0.5);  // executed 50% of the time
-    Visit(node->then_statement());
-    Visit(node->else_statement());
-  }
-}
-
-
-void UsageComputer::VisitContinueStatement(ContinueStatement* node) {
-  // nothing to do
-}
-
-
-void UsageComputer::VisitBreakStatement(BreakStatement* node) {
-  // nothing to do
-}
-
-
-void UsageComputer::VisitReturnStatement(ReturnStatement* node) {
-  Read(node->expression());
-}
-
-
-void UsageComputer::VisitWithEnterStatement(WithEnterStatement* node) {
-  Read(node->expression());
-}
-
-
-void UsageComputer::VisitWithExitStatement(WithExitStatement* node) {
-  // nothing to do
-}
-
-
-void UsageComputer::VisitSwitchStatement(SwitchStatement* node) {
-  Read(node->tag());
-  ZoneList<CaseClause*>* cases = node->cases();
-  for (int i = cases->length(); i-- > 0;) {
-    WeightScaler ws(this, static_cast<float>(1.0 / cases->length()));
-    CaseClause* clause = cases->at(i);
-    if (!clause->is_default())
-      Read(clause->label());
-    VisitStatements(clause->statements());
-  }
-}
-
-
-void UsageComputer::VisitDoWhileStatement(DoWhileStatement* node) {
-  WeightScaler ws(this, 10.0);
-  Read(node->cond());
-  Visit(node->body());
-}
-
-
-void UsageComputer::VisitWhileStatement(WhileStatement* node) {
-  WeightScaler ws(this, 10.0);
-  Read(node->cond());
-  Visit(node->body());
-}
-
-
-void UsageComputer::VisitForStatement(ForStatement* node) {
-  if (node->init() != NULL) Visit(node->init());
-  { WeightScaler ws(this, 10.0);  // executed in each iteration
-    if (node->cond() != NULL) Read(node->cond());
-    if (node->next() != NULL) Visit(node->next());
-    Visit(node->body());
-  }
-}
-
-
-void UsageComputer::VisitForInStatement(ForInStatement* node) {
-  WeightScaler ws(this, 10.0);
-  Write(node->each());
-  Read(node->enumerable());
-  Visit(node->body());
-}
-
-
-void UsageComputer::VisitTryCatchStatement(TryCatchStatement* node) {
-  Visit(node->try_block());
-  { WeightScaler ws(this, 0.25);
-    Write(node->catch_var());
-    Visit(node->catch_block());
-  }
-}
-
-
-void UsageComputer::VisitTryFinallyStatement(TryFinallyStatement* node) {
-  Visit(node->try_block());
-  Visit(node->finally_block());
-}
-
-
-void UsageComputer::VisitDebuggerStatement(DebuggerStatement* node) {
-}
-
-
-void UsageComputer::VisitFunctionLiteral(FunctionLiteral* node) {
-  ZoneList<Declaration*>* decls = node->scope()->declarations();
-  for (int i = 0; i < decls->length(); i++) VisitDeclaration(decls->at(i));
-  VisitStatements(node->body());
-}
-
-
-void UsageComputer::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* node) {
-  // Do nothing.
-}
-
-
-void UsageComputer::VisitConditional(Conditional* node) {
-  Read(node->condition());
-  { WeightScaler ws(this, 0.5);
-    Read(node->then_expression());
-    Read(node->else_expression());
-  }
-}
-
-
-void UsageComputer::VisitSlot(Slot* node) {
-  UNREACHABLE();
-}
-
-
-void UsageComputer::VisitVariable(Variable* node) {
-  RecordUses(node->var_uses());
-}
-
-
-void UsageComputer::VisitVariableProxy(VariableProxy* node) {
-  // The proxy may refer to a variable in which case it was bound via
-  // VariableProxy::BindTo.
-  RecordUses(node->var_uses());
-}
-
-
-void UsageComputer::VisitLiteral(Literal* node) {
-  // nothing to do
-}
-
-void UsageComputer::VisitRegExpLiteral(RegExpLiteral* node) {
-  // nothing to do
-}
-
-
-void UsageComputer::VisitObjectLiteral(ObjectLiteral* node) {
-  ReadList(node->properties());
-}
-
-
-void UsageComputer::VisitArrayLiteral(ArrayLiteral* node) {
-  ReadList(node->values());
-}
-
-
-void UsageComputer::VisitCatchExtensionObject(CatchExtensionObject* node) {
-  Read(node->value());
-}
-
-
-void UsageComputer::VisitAssignment(Assignment* node) {
-  if (node->op() != Token::ASSIGN)
-    Read(node->target());
-  Write(node->target());
-  Read(node->value());
-}
-
-
-void UsageComputer::VisitThrow(Throw* node) {
-  Read(node->exception());
-}
-
-
-void UsageComputer::VisitProperty(Property* node) {
-  // In any case (read or write) we read both the
-  // node's object and the key.
-  Read(node->obj());
-  Read(node->key());
-  // If the node's object is a variable proxy,
-  // we have a 'simple' object property access. We count
-  // the access via the variable or proxy's object uses.
-  VariableProxy* proxy = node->obj()->AsVariableProxy();
-  if (proxy != NULL) {
-    RecordUses(proxy->obj_uses());
-  }
-}
-
-
-void UsageComputer::VisitCall(Call* node) {
-  Read(node->expression());
-  ReadList(node->arguments());
-}
-
-
-void UsageComputer::VisitCallNew(CallNew* node) {
-  Read(node->expression());
-  ReadList(node->arguments());
-}
-
-
-void UsageComputer::VisitCallRuntime(CallRuntime* node) {
-  ReadList(node->arguments());
-}
-
-
-void UsageComputer::VisitUnaryOperation(UnaryOperation* node) {
-  Read(node->expression());
-}
-
-
-void UsageComputer::VisitCountOperation(CountOperation* node) {
-  Read(node->expression());
-  Write(node->expression());
-}
-
-
-void UsageComputer::VisitBinaryOperation(BinaryOperation* node) {
-  Read(node->left());
-  Read(node->right());
-}
-
-
-void UsageComputer::VisitCompareOperation(CompareOperation* node) {
-  Read(node->left());
-  Read(node->right());
-}
-
-
-void UsageComputer::VisitThisFunction(ThisFunction* node) {
-}
-
-
-UsageComputer::UsageComputer(int weight, bool is_write) {
-  weight_ = weight;
-  is_write_ = is_write;
-}
-
-
-UsageComputer::~UsageComputer() {
-  // nothing to do
-}
-
-
-void UsageComputer::RecordUses(UseCount* uses) {
-  if (is_write_)
-    uses->RecordWrite(weight_);
-  else
-    uses->RecordRead(weight_);
-}
-
-
-void UsageComputer::Read(Expression* x) {
-  if (is_write_) {
-    UsageComputer uc(weight_, false);
-    uc.Visit(x);
-  } else {
-    Visit(x);
-  }
-}
-
-
-void UsageComputer::Write(Expression* x) {
-  if (!is_write_) {
-    UsageComputer uc(weight_, true);
-    uc.Visit(x);
-  } else {
-    Visit(x);
-  }
-}
-
-
-void UsageComputer::ReadList(ZoneList<Expression*>* list) {
-  for (int i = list->length(); i-- > 0; )
-    Read(list->at(i));
-}
-
-
-void UsageComputer::ReadList(ZoneList<ObjectLiteral::Property*>* list) {
-  for (int i = list->length(); i-- > 0; )
-    Read(list->at(i)->value());
-}
-
-
-// ----------------------------------------------------------------------------
-// Implementation of WeightScaler
-
-WeightScaler::WeightScaler(UsageComputer* uc, float scale) {
-  uc_ = uc;
-  old_weight_ = uc->weight_;
-  int new_weight = static_cast<int>(uc->weight_ * scale);
-  if (new_weight <= 0) new_weight = MinWeight;
-  else if (new_weight > MaxWeight) new_weight = MaxWeight;
-  uc->weight_ = new_weight;
-}
-
-
-WeightScaler::~WeightScaler() {
-  uc_->weight_ = old_weight_;
-}
-
-
-// ----------------------------------------------------------------------------
-// Interface to variable usage analysis
-
-bool AnalyzeVariableUsage(FunctionLiteral* lit) {
-  if (!FLAG_usage_computation) return true;
-  HistogramTimerScope timer(&Counters::usage_analysis);
-  return UsageComputer::Traverse(lit);
-}
-
-} }  // namespace v8::internal
diff --git a/src/utils.h b/src/utils.h
index 2fcd241..fa24947 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -29,6 +29,7 @@
 #define V8_UTILS_H_
 
 #include <stdlib.h>
+#include <string.h>
 
 namespace v8 {
 namespace internal {
@@ -340,7 +341,6 @@
   // Releases the array underlying this vector. Once disposed the
   // vector is empty.
   void Dispose() {
-    if (is_empty()) return;
     DeleteArray(start_);
     start_ = NULL;
     length_ = 0;
@@ -396,7 +396,7 @@
     if (this == &rhs) return *this;
     Vector<T>::operator=(rhs);
     memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
-    set_start(buffer_);
+    this->set_start(buffer_);
     return *this;
   }
 
@@ -528,11 +528,11 @@
   sinkchar* limit = dest + chars;
 #ifdef V8_HOST_CAN_READ_UNALIGNED
   if (sizeof(*dest) == sizeof(*src)) {
-    // Number of characters in a uint32_t.
-    static const int kStepSize = sizeof(uint32_t) / sizeof(*dest);  // NOLINT
+    // Number of characters in a uintptr_t.
+    static const int kStepSize = sizeof(uintptr_t) / sizeof(*dest);  // NOLINT
     while (dest <= limit - kStepSize) {
-      *reinterpret_cast<uint32_t*>(dest) =
-          *reinterpret_cast<const uint32_t*>(src);
+      *reinterpret_cast<uintptr_t*>(dest) =
+          *reinterpret_cast<const uintptr_t*>(src);
       dest += kStepSize;
       src += kStepSize;
     }
@@ -544,9 +544,120 @@
 }
 
 
+// Compare ASCII/16bit chars to ASCII/16bit chars.
+template <typename lchar, typename rchar>
+static inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
+  const lchar* limit = lhs + chars;
+#ifdef V8_HOST_CAN_READ_UNALIGNED
+  if (sizeof(*lhs) == sizeof(*rhs)) {
+    // Number of characters in a uintptr_t.
+    static const int kStepSize = sizeof(uintptr_t) / sizeof(*lhs);  // NOLINT
+    while (lhs <= limit - kStepSize) {
+      if (*reinterpret_cast<const uintptr_t*>(lhs) !=
+          *reinterpret_cast<const uintptr_t*>(rhs)) {
+        break;
+      }
+      lhs += kStepSize;
+      rhs += kStepSize;
+    }
+  }
+#endif
+  while (lhs < limit) {
+    int r = static_cast<int>(*lhs) - static_cast<int>(*rhs);
+    if (r != 0) return r;
+    ++lhs;
+    ++rhs;
+  }
+  return 0;
+}
+
+
+template <typename T>
+static inline void MemsetPointer(T** dest, T* value, int counter) {
+#if defined(V8_HOST_ARCH_IA32)
+#define STOS "stosl"
+#elif defined(V8_HOST_ARCH_X64)
+#define STOS "stosq"
+#endif
+
+#if defined(__GNUC__) && defined(STOS)
+  asm volatile(
+      "cld;"
+      "rep ; " STOS
+      : "+&c" (counter), "+&D" (dest)
+      : "a" (value)
+      : "memory", "cc");
+#else
+  for (int i = 0; i < counter; i++) {
+    dest[i] = value;
+  }
+#endif
+
+#undef STOS
+}
+
+
+// Copies data from |src| to |dst|.  The data spans MUST not overlap.
+inline void CopyWords(Object** dst, Object** src, int num_words) {
+  ASSERT(Min(dst, src) + num_words <= Max(dst, src));
+  ASSERT(num_words > 0);
+
+  // Use block copying memcpy if the segment we're copying is
+  // enough to justify the extra call/setup overhead.
+  static const int kBlockCopyLimit = 16;
+
+  if (num_words >= kBlockCopyLimit) {
+    memcpy(dst, src, num_words * kPointerSize);
+  } else {
+    int remaining = num_words;
+    do {
+      remaining--;
+      *dst++ = *src++;
+    } while (remaining > 0);
+  }
+}
+
+
 // Calculate 10^exponent.
 int TenToThe(int exponent);
 
+
+// The type-based aliasing rule allows the compiler to assume that pointers of
+// different types (for some definition of different) never alias each other.
+// Thus the following code does not work:
+//
+// float f = foo();
+// int fbits = *(int*)(&f);
+//
+// The compiler 'knows' that the int pointer can't refer to f since the types
+// don't match, so the compiler may cache f in a register, leaving random data
+// in fbits.  Using C++ style casts makes no difference, however a pointer to
+// char data is assumed to alias any other pointer.  This is the 'memcpy
+// exception'.
+//
+// Bit_cast uses the memcpy exception to move the bits from a variable of one
+// type of a variable of another type.  Of course the end result is likely to
+// be implementation dependent.  Most compilers (gcc-4.2 and MSVC 2005)
+// will completely optimize BitCast away.
+//
+// There is an additional use for BitCast.
+// Recent gccs will warn when they see casts that may result in breakage due to
+// the type-based aliasing rule.  If you have checked that there is no breakage
+// you can use BitCast to cast one pointer type to another.  This confuses gcc
+// enough that it can no longer see that you have cast one pointer type to
+// another thus avoiding the warning.
+template <class Dest, class Source>
+inline Dest BitCast(const Source& source) {
+  // Compile time assertion: sizeof(Dest) == sizeof(Source)
+  // A compile error here means your Dest and Source have different sizes.
+  typedef char VerifySizesAreEqual[sizeof(Dest) == sizeof(Source) ? 1 : -1];
+
+  Dest dest;
+  memcpy(&dest, &source, sizeof(dest));
+  return dest;
+}
+
 } }  // namespace v8::internal
 
+
 #endif  // V8_UTILS_H_
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 1ba2003..bd671a1 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -166,6 +166,7 @@
   SC(generic_binary_stub_calls_regs, V8.GenericBinaryStubCallsRegs)   \
   SC(string_add_runtime, V8.StringAddRuntime)                         \
   SC(string_add_native, V8.StringAddNative)                           \
+  SC(string_add_runtime_ext_to_ascii, V8.StringAddRuntimeExtToAscii)  \
   SC(sub_string_runtime, V8.SubStringRuntime)                         \
   SC(sub_string_native, V8.SubStringNative)                           \
   SC(string_compare_native, V8.StringCompareNative)                   \
@@ -174,7 +175,6 @@
   SC(regexp_entry_native, V8.RegExpEntryNative)                       \
   SC(number_to_string_native, V8.NumberToStringNative)                \
   SC(number_to_string_runtime, V8.NumberToStringRuntime)              \
-  SC(math_abs, V8.MathAbs)                                            \
   SC(math_acos, V8.MathAcos)                                          \
   SC(math_asin, V8.MathAsin)                                          \
   SC(math_atan, V8.MathAtan)                                          \
diff --git a/src/v8.cc b/src/v8.cc
index 3953361..7219d63 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -43,7 +43,7 @@
 bool V8::has_been_disposed_ = false;
 bool V8::has_fatal_error_ = false;
 
-bool V8::Initialize(Deserializer *des) {
+bool V8::Initialize(Deserializer* des) {
   bool create_heap_objects = des == NULL;
   if (has_been_disposed_ || has_fatal_error_) return false;
   if (IsRunning()) return true;
@@ -60,6 +60,8 @@
   // Enable logging before setting up the heap
   Logger::Setup();
 
+  CpuProfiler::Setup();
+
   // Setup the platform OS support.
   OS::Setup();
 
@@ -148,6 +150,9 @@
   Top::TearDown();
 
   Heap::TearDown();
+
+  CpuProfiler::TearDown();
+
   Logger::TearDown();
 
   is_running_ = false;
@@ -155,6 +160,14 @@
 }
 
 
+static uint32_t random_seed() {
+  if (FLAG_random_seed == 0) {
+    return random();
+  }
+  return FLAG_random_seed;
+}
+
+
 uint32_t V8::Random() {
   // Random number generator using George Marsaglia's MWC algorithm.
   static uint32_t hi = 0;
@@ -164,8 +177,8 @@
   // should ever become zero again, or if random() returns zero, we
   // avoid getting stuck with zero bits in hi or lo by re-initializing
   // them on demand.
-  if (hi == 0) hi = random();
-  if (lo == 0) lo = random();
+  if (hi == 0) hi = random_seed();
+  if (lo == 0) lo = random_seed();
 
   // Mix the bits.
   hi = 36969 * (hi & 0xFFFF) + (hi >> 16);
@@ -183,14 +196,29 @@
   return Heap::IdleNotification();
 }
 
-static const uint32_t kRandomPositiveSmiMax = 0x3fffffff;
 
-Smi* V8::RandomPositiveSmi() {
-  uint32_t random = Random();
-  ASSERT(static_cast<uint32_t>(Smi::kMaxValue) >= kRandomPositiveSmiMax);
-  // kRandomPositiveSmiMax must match the value being divided
-  // by in math.js.
-  return Smi::FromInt(random & kRandomPositiveSmiMax);
+// Use a union type to avoid type-aliasing optimizations in GCC.
+typedef union {
+  double double_value;
+  uint64_t uint64_t_value;
+} double_int_union;
+
+
+Object* V8::FillHeapNumberWithRandom(Object* heap_number) {
+  uint64_t random_bits = Random();
+  // Make a double* from address (heap_number + sizeof(double)).
+  double_int_union* r = reinterpret_cast<double_int_union*>(
+      reinterpret_cast<char*>(heap_number) +
+      HeapNumber::kValueOffset - kHeapObjectTag);
+  // Convert 32 random bits to 0.(32 random bits) in a double
+  // by computing:
+  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
+  const double binary_million = 1048576.0;
+  r->double_value = binary_million;
+  r->uint64_t_value |=  random_bits;
+  r->double_value -= binary_million;
+
+  return heap_number;
 }
 
 } }  // namespace v8::internal
diff --git a/src/v8.h b/src/v8.h
index b3624c5..966d5a9 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -67,7 +67,9 @@
 #include "spaces-inl.h"
 #include "heap-inl.h"
 #include "log-inl.h"
-#include "messages.h"
+#include "cpu-profiler-inl.h"
+#include "handles-inl.h"
+#include "vm-state-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -94,7 +96,7 @@
 
   // Random number generation support. Not cryptographically safe.
   static uint32_t Random();
-  static Smi* RandomPositiveSmi();
+  static Object* FillHeapNumberWithRandom(Object* heap_number);
 
   // Idle notification directly from the API.
   static bool IdleNotification();
diff --git a/src/v8natives.js b/src/v8natives.js
index 6a32d7b..fd86dda 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -54,6 +54,7 @@
     var key = functions[i];
     var f = functions[i + 1];
     %FunctionSetName(f, key);
+    %FunctionRemovePrototype(f);
     %SetProperty(object, key, f, attributes);
   }
   %ToFastProperties(object);
@@ -82,7 +83,10 @@
 
 // ECMA 262 - 15.1.5
 function GlobalIsFinite(number) {
-  return %NumberIsFinite(ToNumber(number));
+  if (!IS_NUMBER(number)) number = ToNumber(number);
+
+  // NaN - NaN == NaN, Infinity - Infinity == NaN, -Infinity - -Infinity == NaN.
+  return %_IsSmi(number) || number - number == 0;
 }
 
 
@@ -221,7 +225,7 @@
 
 // ECMA-262 - 15.2.4.6
 function ObjectIsPrototypeOf(V) {
-  if (!IS_OBJECT(V) && !IS_FUNCTION(V)) return false;
+  if (!IS_OBJECT(V) && !IS_FUNCTION(V) && !IS_UNDETECTABLE(V)) return false;
   return %IsInPrototypeChain(this, V);
 }
 
@@ -236,7 +240,7 @@
 
 // Extensions for providing property getters and setters.
 function ObjectDefineGetter(name, fun) {
-  if (this == null) {
+  if (this == null && !IS_UNDETECTABLE(this)) {
     throw new $TypeError('Object.prototype.__defineGetter__: this is Null');
   }
   if (!IS_FUNCTION(fun)) {
@@ -247,7 +251,7 @@
 
 
 function ObjectLookupGetter(name) {
-  if (this == null) {
+  if (this == null && !IS_UNDETECTABLE(this)) {
     throw new $TypeError('Object.prototype.__lookupGetter__: this is Null');
   }
   return %LookupAccessor(ToObject(this), ToString(name), GETTER);
@@ -255,7 +259,7 @@
 
 
 function ObjectDefineSetter(name, fun) {
-  if (this == null) {
+  if (this == null && !IS_UNDETECTABLE(this)) {
     throw new $TypeError('Object.prototype.__defineSetter__: this is Null');
   }
   if (!IS_FUNCTION(fun)) {
@@ -267,7 +271,7 @@
 
 
 function ObjectLookupSetter(name) {
-  if (this == null) {
+  if (this == null && !IS_UNDETECTABLE(this)) {
     throw new $TypeError('Object.prototype.__lookupSetter__: this is Null');
   }
   return %LookupAccessor(ToObject(this), ToString(name), SETTER);
@@ -275,7 +279,8 @@
 
 
 function ObjectKeys(obj) {
-  if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj))
+  if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj) &&
+      !IS_UNDETECTABLE(obj))
     throw MakeTypeError("obj_ctor_property_non_object", ["keys"]);
   return %LocalKeys(obj);
 }
@@ -481,7 +486,7 @@
 // ES5 section 8.12.1.
 function GetOwnProperty(obj, p) {
   var desc = new PropertyDescriptor();
-  
+
   // An array with:
   //  obj is a data property [false, value, Writeable, Enumerable, Configurable]
   //  obj is an accessor [true, Get, Set, Enumerable, Configurable]
@@ -521,7 +526,7 @@
 }
 
 
-// ES5 8.12.9.  
+// ES5 8.12.9.
 function DefineOwnProperty(obj, p, desc, should_throw) {
   var current = GetOwnProperty(obj, p);
   var extensible = %IsExtensible(ToObject(obj));
@@ -557,7 +562,7 @@
     }
   }
 
-  // Send flags - enumerable and configurable are common - writable is 
+  // Send flags - enumerable and configurable are common - writable is
   // only send to the data descriptor.
   // Take special care if enumerable and configurable is not defined on
   // desc (we need to preserve the existing values from current).
@@ -594,15 +599,17 @@
 
 // ES5 section 15.2.3.2.
 function ObjectGetPrototypeOf(obj) {
-  if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj))
+  if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj) &&
+      !IS_UNDETECTABLE(obj))
     throw MakeTypeError("obj_ctor_property_non_object", ["getPrototypeOf"]);
   return obj.__proto__;
 }
 
 
-// ES5 section 15.2.3.3 
+// ES5 section 15.2.3.3
 function ObjectGetOwnPropertyDescriptor(obj, p) {
-  if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj))
+  if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj) &&
+      !IS_UNDETECTABLE(obj))
     throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyDescriptor"]);
   var desc = GetOwnProperty(obj, p);
   return FromPropertyDescriptor(desc);
@@ -611,7 +618,8 @@
 
 // ES5 section 15.2.3.4.
 function ObjectGetOwnPropertyNames(obj) {
-  if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj))
+  if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj) &&
+      !IS_UNDETECTABLE(obj))
     throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyNames"]);
 
   // Find all the indexed properties.
@@ -664,7 +672,8 @@
 
 // ES5 section 15.2.3.6.
 function ObjectDefineProperty(obj, p, attributes) {
-  if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj))
+  if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj) &&
+      !IS_UNDETECTABLE(obj))
     throw MakeTypeError("obj_ctor_property_non_object", ["defineProperty"]);
   var name = ToString(p);
   var desc = ToPropertyDescriptor(attributes);
@@ -675,7 +684,8 @@
 
 // ES5 section 15.2.3.7.
 function ObjectDefineProperties(obj, properties) {
- if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj))
+ if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj) &&
+     !IS_UNDETECTABLE(obj))
     throw MakeTypeError("obj_ctor_property_non_object", ["defineProperties"]);
   var props = ToObject(properties);
   var key_values = [];
diff --git a/src/v8threads.cc b/src/v8threads.cc
index 80a7cd9..02292f6 100644
--- a/src/v8threads.cc
+++ b/src/v8threads.cc
@@ -331,6 +331,17 @@
 }
 
 
+void ThreadManager::IterateThreads(ThreadVisitor* v) {
+  for (ThreadState* state = ThreadState::FirstInUse();
+       state != NULL;
+       state = state->Next()) {
+    char* data = state->data();
+    data += HandleScopeImplementer::ArchiveSpacePerThread();
+    Top::IterateThread(v, data);
+  }
+}
+
+
 void ThreadManager::MarkCompactPrologue(bool is_compacting) {
   for (ThreadState* state = ThreadState::FirstInUse();
        state != NULL;
diff --git a/src/v8threads.h b/src/v8threads.h
index 0684053..d70aa3c 100644
--- a/src/v8threads.h
+++ b/src/v8threads.h
@@ -79,6 +79,20 @@
 };
 
 
+// Defined in top.h
+class ThreadLocalTop;
+
+
+class ThreadVisitor {
+ public:
+  // ThreadLocalTop may be only available during this call.
+  virtual void VisitThread(ThreadLocalTop* top) = 0;
+
+ protected:
+  virtual ~ThreadVisitor() {}
+};
+
+
 class ThreadManager : public AllStatic {
  public:
   static void Lock();
@@ -90,6 +104,7 @@
   static bool IsArchived();
 
   static void Iterate(ObjectVisitor* v);
+  static void IterateThreads(ThreadVisitor* v);
   static void MarkCompactPrologue(bool is_compacting);
   static void MarkCompactEpilogue(bool is_compacting);
   static bool IsLockedByCurrentThread() { return mutex_owner_.IsSelf(); }
diff --git a/src/variables.cc b/src/variables.cc
index 3bcd48a..f46a54d 100644
--- a/src/variables.cc
+++ b/src/variables.cc
@@ -35,57 +35,6 @@
 namespace internal {
 
 // ----------------------------------------------------------------------------
-// Implementation UseCount.
-
-UseCount::UseCount()
-  : nreads_(0),
-    nwrites_(0) {
-}
-
-
-void UseCount::RecordRead(int weight) {
-  ASSERT(weight > 0);
-  nreads_ += weight;
-  // We must have a positive nreads_ here. Handle
-  // any kind of overflow by setting nreads_ to
-  // some large-ish value.
-  if (nreads_ <= 0) nreads_ = 1000000;
-  ASSERT(is_read() & is_used());
-}
-
-
-void UseCount::RecordWrite(int weight) {
-  ASSERT(weight > 0);
-  nwrites_ += weight;
-  // We must have a positive nwrites_ here. Handle
-  // any kind of overflow by setting nwrites_ to
-  // some large-ish value.
-  if (nwrites_ <= 0) nwrites_ = 1000000;
-  ASSERT(is_written() && is_used());
-}
-
-
-void UseCount::RecordAccess(int weight) {
-  RecordRead(weight);
-  RecordWrite(weight);
-}
-
-
-void UseCount::RecordUses(UseCount* uses) {
-  if (uses->nreads() > 0) RecordRead(uses->nreads());
-  if (uses->nwrites() > 0) RecordWrite(uses->nwrites());
-}
-
-
-#ifdef DEBUG
-void UseCount::Print() {
-  // PrintF("r = %d, w = %d", nreads_, nwrites_);
-  PrintF("%du = %dr + %dw", nuses(), nreads(), nwrites());
-}
-#endif
-
-
-// ----------------------------------------------------------------------------
 // Implementation StaticType.
 
 
@@ -136,6 +85,12 @@
 }
 
 
+bool Variable::IsStackAllocated() const {
+  Slot* s = slot();
+  return s != NULL && s->IsStackAllocated();
+}
+
+
 Variable::Variable(Scope* scope,
                    Handle<String> name,
                    Mode mode,
@@ -148,6 +103,7 @@
     kind_(kind),
     local_if_not_shadowed_(NULL),
     is_accessed_from_inner_scope_(false),
+    is_used_(false),
     rewrite_(NULL) {
   // names must be canonicalized for fast equality checks
   ASSERT(name->IsSymbol());
diff --git a/src/variables.h b/src/variables.h
index ac7f294..618f6ac 100644
--- a/src/variables.h
+++ b/src/variables.h
@@ -33,35 +33,6 @@
 namespace v8 {
 namespace internal {
 
-class UseCount BASE_EMBEDDED {
- public:
-  UseCount();
-
-  // Inform the node of a "use". The weight can be used to indicate
-  // heavier use, for instance if the variable is accessed inside a loop.
-  void RecordRead(int weight);
-  void RecordWrite(int weight);
-  void RecordAccess(int weight);  // records a read & write
-  void RecordUses(UseCount* uses);
-
-  int nreads() const  { return nreads_; }
-  int nwrites() const  { return nwrites_; }
-  int nuses() const  { return nreads_ + nwrites_; }
-
-  bool is_read() const  { return nreads() > 0; }
-  bool is_written() const  { return nwrites() > 0; }
-  bool is_used() const  { return nuses() > 0; }
-
-#ifdef DEBUG
-  void Print();
-#endif
-
- private:
-  int nreads_;
-  int nwrites_;
-};
-
-
 // Variables and AST expression nodes can track their "type" to enable
 // optimizations and removal of redundant checks when generating code.
 
@@ -99,8 +70,6 @@
 
  private:
   Kind kind_;
-
-  DISALLOW_COPY_AND_ASSIGN(StaticType);
 };
 
 
@@ -168,13 +137,15 @@
   bool is_accessed_from_inner_scope() const  {
     return is_accessed_from_inner_scope_;
   }
-  UseCount* var_uses()  { return &var_uses_; }
-  UseCount* obj_uses()  { return &obj_uses_; }
+  bool is_used() { return is_used_; }
+  void set_is_used(bool flag) { is_used_ = flag; }
 
   bool IsVariable(Handle<String> n) const {
     return !is_this() && name().is_identical_to(n);
   }
 
+  bool IsStackAllocated() const;
+
   bool is_dynamic() const {
     return (mode_ == DYNAMIC ||
             mode_ == DYNAMIC_GLOBAL ||
@@ -216,8 +187,7 @@
 
   // Usage info.
   bool is_accessed_from_inner_scope_;  // set by variable resolver
-  UseCount var_uses_;  // uses of the variable value
-  UseCount obj_uses_;  // uses of the object the variable points to
+  bool is_used_;
 
   // Static type information
   StaticType type_;
diff --git a/src/version.cc b/src/version.cc
index f6d84f3..7563c69 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -33,10 +33,10 @@
 // NOTE these macros are used by the SCons build script so their names
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     2
-#define MINOR_VERSION     1
-#define BUILD_NUMBER      3
+#define MINOR_VERSION     2
+#define BUILD_NUMBER      8
 #define PATCH_LEVEL       0
-#define CANDIDATE_VERSION true
+#define CANDIDATE_VERSION false
 
 // Define SONAME to have the SCons build the put a specific SONAME into the
 // shared library instead the generic SONAME generated from the V8 version
diff --git a/src/virtual-frame-heavy-inl.h b/src/virtual-frame-heavy-inl.h
new file mode 100644
index 0000000..6381d01
--- /dev/null
+++ b/src/virtual-frame-heavy-inl.h
@@ -0,0 +1,152 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_VIRTUAL_FRAME_HEAVY_INL_H_
+#define V8_VIRTUAL_FRAME_HEAVY_INL_H_
+
+#include "type-info.h"
+#include "register-allocator.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+// On entry to a function, the virtual frame already contains the receiver,
+// the parameters, and a return address.  All frame elements are in memory.
+VirtualFrame::VirtualFrame()
+    : elements_(parameter_count() + local_count() + kPreallocatedElements),
+      stack_pointer_(parameter_count() + 1) {  // 0-based index of TOS.
+  for (int i = 0; i <= stack_pointer_; i++) {
+    elements_.Add(FrameElement::MemoryElement(TypeInfo::Unknown()));
+  }
+  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+    register_locations_[i] = kIllegalIndex;
+  }
+}
+
+
+// When cloned, a frame is a deep copy of the original.
+VirtualFrame::VirtualFrame(VirtualFrame* original)
+    : elements_(original->element_count()),
+      stack_pointer_(original->stack_pointer_) {
+  elements_.AddAll(original->elements_);
+  // Copy register locations from original.
+  memcpy(&register_locations_,
+         original->register_locations_,
+         sizeof(register_locations_));
+}
+
+
+void VirtualFrame::PushFrameSlotAt(int index) {
+  elements_.Add(CopyElementAt(index));
+}
+
+
+void VirtualFrame::Push(Register reg, TypeInfo info) {
+  if (is_used(reg)) {
+    int index = register_location(reg);
+    FrameElement element = CopyElementAt(index, info);
+    elements_.Add(element);
+  } else {
+    Use(reg, element_count());
+    FrameElement element =
+        FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED, info);
+    elements_.Add(element);
+  }
+}
+
+
+void VirtualFrame::Push(Handle<Object> value) {
+  FrameElement element =
+      FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED);
+  elements_.Add(element);
+}
+
+
+bool VirtualFrame::Equals(VirtualFrame* other) {
+#ifdef DEBUG
+  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+    if (register_location(i) != other->register_location(i)) {
+      return false;
+    }
+  }
+  if (element_count() != other->element_count()) return false;
+#endif
+  if (stack_pointer_ != other->stack_pointer_) return false;
+  for (int i = 0; i < element_count(); i++) {
+    if (!elements_[i].Equals(other->elements_[i])) return false;
+  }
+
+  return true;
+}
+
+
+void VirtualFrame::SetTypeForLocalAt(int index, TypeInfo info) {
+  elements_[local0_index() + index].set_type_info(info);
+}
+
+
+// Make the type of all elements be MEMORY.
+void VirtualFrame::SpillAll() {
+  for (int i = 0; i < element_count(); i++) {
+    SpillElementAt(i);
+  }
+}
+
+
+void VirtualFrame::PrepareForReturn() {
+  // Spill all locals. This is necessary to make sure all locals have
+  // the right value when breaking at the return site in the debugger.
+  for (int i = 0; i < expression_base_index(); i++) {
+    SpillElementAt(i);
+  }
+}
+
+
+void VirtualFrame::SetTypeForParamAt(int index, TypeInfo info) {
+  elements_[param0_index() + index].set_type_info(info);
+}
+
+
+void VirtualFrame::Nip(int num_dropped) {
+  ASSERT(num_dropped >= 0);
+  if (num_dropped == 0) return;
+  Result tos = Pop();
+  if (num_dropped > 1) {
+    Drop(num_dropped - 1);
+  }
+  SetElementAt(0, &tos);
+}
+
+
+void VirtualFrame::Push(Smi* value) {
+  Push(Handle<Object> (value));
+}
+
+} }  // namespace v8::internal
+
+#endif  // V8_VIRTUAL_FRAME_HEAVY_INL_H_
diff --git a/src/virtual-frame-heavy.cc b/src/virtual-frame-heavy.cc
new file mode 100644
index 0000000..7270280
--- /dev/null
+++ b/src/virtual-frame-heavy.cc
@@ -0,0 +1,312 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "register-allocator-inl.h"
+#include "virtual-frame-inl.h"
+
+namespace v8 {
+namespace internal {
+
+void VirtualFrame::SetElementAt(int index, Result* value) {
+  int frame_index = element_count() - index - 1;
+  ASSERT(frame_index >= 0);
+  ASSERT(frame_index < element_count());
+  ASSERT(value->is_valid());
+  FrameElement original = elements_[frame_index];
+
+  // Early exit if the element is the same as the one being set.
+  bool same_register = original.is_register()
+      && value->is_register()
+      && original.reg().is(value->reg());
+  bool same_constant = original.is_constant()
+      && value->is_constant()
+      && original.handle().is_identical_to(value->handle());
+  if (same_register || same_constant) {
+    value->Unuse();
+    return;
+  }
+
+  InvalidateFrameSlotAt(frame_index);
+
+  if (value->is_register()) {
+    if (is_used(value->reg())) {
+      // The register already appears on the frame.  Either the existing
+      // register element, or the new element at frame_index, must be made
+      // a copy.
+      int i = register_location(value->reg());
+
+      if (i < frame_index) {
+        // The register FrameElement is lower in the frame than the new copy.
+        elements_[frame_index] = CopyElementAt(i);
+      } else {
+        // There was an early bailout for the case of setting a
+        // register element to itself.
+        ASSERT(i != frame_index);
+        elements_[frame_index] = elements_[i];
+        elements_[i] = CopyElementAt(frame_index);
+        if (elements_[frame_index].is_synced()) {
+          elements_[i].set_sync();
+        }
+        elements_[frame_index].clear_sync();
+        set_register_location(value->reg(), frame_index);
+        for (int j = i + 1; j < element_count(); j++) {
+          if (elements_[j].is_copy() && elements_[j].index() == i) {
+            elements_[j].set_index(frame_index);
+          }
+        }
+      }
+    } else {
+      // The register value->reg() was not already used on the frame.
+      Use(value->reg(), frame_index);
+      elements_[frame_index] =
+          FrameElement::RegisterElement(value->reg(),
+                                        FrameElement::NOT_SYNCED,
+                                        value->type_info());
+    }
+  } else {
+    ASSERT(value->is_constant());
+    elements_[frame_index] =
+        FrameElement::ConstantElement(value->handle(),
+                                      FrameElement::NOT_SYNCED);
+  }
+  value->Unuse();
+}
+
+
+// Create a duplicate of an existing valid frame element.
+// We can pass an optional number type information that will override the
+// existing information about the backing element. The new information must
+// not conflict with the existing type information and must be equally or
+// more precise. The default parameter value kUninitialized means that there
+// is no additional information.
+FrameElement VirtualFrame::CopyElementAt(int index, TypeInfo info) {
+  ASSERT(index >= 0);
+  ASSERT(index < element_count());
+
+  FrameElement target = elements_[index];
+  FrameElement result;
+
+  switch (target.type()) {
+    case FrameElement::CONSTANT:
+      // We do not copy constants and instead return a fresh unsynced
+      // constant.
+      result = FrameElement::ConstantElement(target.handle(),
+                                             FrameElement::NOT_SYNCED);
+      break;
+
+    case FrameElement::COPY:
+      // We do not allow copies of copies, so we follow one link to
+      // the actual backing store of a copy before making a copy.
+      index = target.index();
+      ASSERT(elements_[index].is_memory() || elements_[index].is_register());
+      // Fall through.
+
+    case FrameElement::MEMORY:  // Fall through.
+    case FrameElement::REGISTER: {
+      // All copies are backed by memory or register locations.
+      result.set_type(FrameElement::COPY);
+      result.clear_copied();
+      result.clear_sync();
+      result.set_index(index);
+      elements_[index].set_copied();
+      // Update backing element's number information.
+      TypeInfo existing = elements_[index].type_info();
+      ASSERT(!existing.IsUninitialized());
+      // Assert that the new type information (a) does not conflict with the
+      // existing one and (b) is equally or more precise.
+      ASSERT((info.ToInt() & existing.ToInt()) == existing.ToInt());
+      ASSERT((info.ToInt() | existing.ToInt()) == info.ToInt());
+
+      elements_[index].set_type_info(!info.IsUninitialized()
+                                       ? info
+                                       : existing);
+      break;
+    }
+    case FrameElement::INVALID:
+      // We should not try to copy invalid elements.
+      UNREACHABLE();
+      break;
+  }
+  return result;
+}
+
+
+// Modify the state of the virtual frame to match the actual frame by adding
+// extra in-memory elements to the top of the virtual frame.  The extra
+// elements will be externally materialized on the actual frame (eg, by
+// pushing an exception handler).  No code is emitted.
+void VirtualFrame::Adjust(int count) {
+  ASSERT(count >= 0);
+  ASSERT(stack_pointer_ == element_count() - 1);
+
+  for (int i = 0; i < count; i++) {
+    elements_.Add(FrameElement::MemoryElement(TypeInfo::Unknown()));
+  }
+  stack_pointer_ += count;
+}
+
+
+void VirtualFrame::ForgetElements(int count) {
+  ASSERT(count >= 0);
+  ASSERT(element_count() >= count);
+
+  for (int i = 0; i < count; i++) {
+    FrameElement last = elements_.RemoveLast();
+    if (last.is_register()) {
+      // A hack to properly count register references for the code
+      // generator's current frame and also for other frames.  The
+      // same code appears in PrepareMergeTo.
+      if (cgen()->frame() == this) {
+        Unuse(last.reg());
+      } else {
+        set_register_location(last.reg(), kIllegalIndex);
+      }
+    }
+  }
+}
+
+
+// Make the type of the element at a given index be MEMORY.
+void VirtualFrame::SpillElementAt(int index) {
+  if (!elements_[index].is_valid()) return;
+
+  SyncElementAt(index);
+  // Number type information is preserved.
+  // Copies get their number information from their backing element.
+  TypeInfo info;
+  if (!elements_[index].is_copy()) {
+    info = elements_[index].type_info();
+  } else {
+    info = elements_[elements_[index].index()].type_info();
+  }
+  // The element is now in memory.  Its copied flag is preserved.
+  FrameElement new_element = FrameElement::MemoryElement(info);
+  if (elements_[index].is_copied()) {
+    new_element.set_copied();
+  }
+  if (elements_[index].is_untagged_int32()) {
+    new_element.set_untagged_int32(true);
+  }
+  if (elements_[index].is_register()) {
+    Unuse(elements_[index].reg());
+  }
+  elements_[index] = new_element;
+}
+
+
+// Clear the dirty bit for the element at a given index.
+void VirtualFrame::SyncElementAt(int index) {
+  if (index <= stack_pointer_) {
+    if (!elements_[index].is_synced()) SyncElementBelowStackPointer(index);
+  } else if (index == stack_pointer_ + 1) {
+    SyncElementByPushing(index);
+  } else {
+    SyncRange(stack_pointer_ + 1, index);
+  }
+}
+
+
+void VirtualFrame::PrepareMergeTo(VirtualFrame* expected) {
+  // Perform state changes on this frame that will make merge to the
+  // expected frame simpler or else increase the likelihood that his
+  // frame will match another.
+  for (int i = 0; i < element_count(); i++) {
+    FrameElement source = elements_[i];
+    FrameElement target = expected->elements_[i];
+
+    if (!target.is_valid() ||
+        (target.is_memory() && !source.is_memory() && source.is_synced())) {
+      // No code needs to be generated to invalidate valid elements.
+      // No code needs to be generated to move values to memory if
+      // they are already synced.  We perform those moves here, before
+      // merging.
+      if (source.is_register()) {
+        // If the frame is the code generator's current frame, we have
+        // to decrement both the frame-internal and global register
+        // counts.
+        if (cgen()->frame() == this) {
+          Unuse(source.reg());
+        } else {
+          set_register_location(source.reg(), kIllegalIndex);
+        }
+      }
+      elements_[i] = target;
+    } else if (target.is_register() && !target.is_synced() &&
+               !source.is_memory()) {
+      // If an element's target is a register that doesn't need to be
+      // synced, and the element is not in memory, then the sync state
+      // of the element is irrelevant.  We clear the sync bit.
+      ASSERT(source.is_valid());
+      elements_[i].clear_sync();
+    }
+  }
+}
+
+
+void VirtualFrame::PrepareForCall(int spilled_args, int dropped_args) {
+  ASSERT(height() >= dropped_args);
+  ASSERT(height() >= spilled_args);
+  ASSERT(dropped_args <= spilled_args);
+
+  SyncRange(0, element_count() - 1);
+  // Spill registers.
+  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+    if (is_used(i)) {
+      SpillElementAt(register_location(i));
+    }
+  }
+
+  // Spill the arguments.
+  for (int i = element_count() - spilled_args; i < element_count(); i++) {
+    if (!elements_[i].is_memory()) {
+      SpillElementAt(i);
+    }
+  }
+
+  // Forget the frame elements that will be popped by the call.
+  Forget(dropped_args);
+}
+
+
+// If there are any registers referenced only by the frame, spill one.
+Register VirtualFrame::SpillAnyRegister() {
+  // Find the leftmost (ordered by register number) register whose only
+  // reference is in the frame.
+  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+    if (is_used(i) && cgen()->allocator()->count(i) == 1) {
+      SpillElementAt(register_location(i));
+      ASSERT(!cgen()->allocator()->is_used(i));
+      return RegisterAllocator::ToRegister(i);
+    }
+  }
+  return no_reg;
+}
+
+} }  // namespace v8::internal
diff --git a/src/usage-analyzer.h b/src/virtual-frame-inl.h
similarity index 81%
copy from src/usage-analyzer.h
copy to src/virtual-frame-inl.h
index 1b0ea4a..c9f4aac 100644
--- a/src/usage-analyzer.h
+++ b/src/virtual-frame-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2008 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,16 +25,15 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef V8_USAGE_ANALYZER_H_
-#define V8_USAGE_ANALYZER_H_
+#ifndef V8_VIRTUAL_FRAME_INL_H_
+#define V8_VIRTUAL_FRAME_INL_H_
 
-namespace v8 {
-namespace internal {
+#include "virtual-frame.h"
 
-// Compute usage counts for all variables.
-// Used for variable allocation.
-bool AnalyzeVariableUsage(FunctionLiteral* lit);
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
+#include "virtual-frame-heavy-inl.h"
+#else
+#include "virtual-frame-light-inl.h"
+#endif
 
-} }  // namespace v8::internal
-
-#endif  // V8_USAGE_ANALYZER_H_
+#endif  // V8_VIRTUAL_FRAME_INL_H_
diff --git a/src/virtual-frame-light-inl.h b/src/virtual-frame-light-inl.h
new file mode 100644
index 0000000..c50e6c8
--- /dev/null
+++ b/src/virtual-frame-light-inl.h
@@ -0,0 +1,69 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_VIRTUAL_FRAME_LIGHT_INL_H_
+#define V8_VIRTUAL_FRAME_LIGHT_INL_H_
+
+#include "type-info.h"
+#include "register-allocator.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+// On entry to a function, the virtual frame already contains the receiver,
+// the parameters, and a return address.  All frame elements are in memory.
+VirtualFrame::VirtualFrame()
+    : element_count_(parameter_count() + 2),
+      top_of_stack_state_(NO_TOS_REGISTERS),
+      register_allocation_map_(0) { }
+
+
+// When cloned, a frame is a deep copy of the original.
+VirtualFrame::VirtualFrame(VirtualFrame* original)
+    : element_count_(original->element_count()),
+      top_of_stack_state_(original->top_of_stack_state_),
+      register_allocation_map_(original->register_allocation_map_) { }
+
+
+bool VirtualFrame::Equals(VirtualFrame* other) {
+  ASSERT(element_count() == other->element_count());
+  if (top_of_stack_state_ != other->top_of_stack_state_) return false;
+  if (register_allocation_map_ != other->register_allocation_map_) return false;
+
+  return true;
+}
+
+
+void VirtualFrame::PrepareForReturn() {
+  SpillAll();
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_VIRTUAL_FRAME_LIGHT_INL_H_
diff --git a/src/usage-analyzer.h b/src/virtual-frame-light.cc
similarity index 79%
copy from src/usage-analyzer.h
copy to src/virtual-frame-light.cc
index 1b0ea4a..27c48a5 100644
--- a/src/usage-analyzer.h
+++ b/src/virtual-frame-light.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,16 +25,25 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef V8_USAGE_ANALYZER_H_
-#define V8_USAGE_ANALYZER_H_
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "register-allocator-inl.h"
+#include "virtual-frame-inl.h"
 
 namespace v8 {
 namespace internal {
 
-// Compute usage counts for all variables.
-// Used for variable allocation.
-bool AnalyzeVariableUsage(FunctionLiteral* lit);
+void VirtualFrame::Adjust(int count) {
+  ASSERT(count >= 0);
+  element_count_ += count;
+}
+
+
+// If there are any registers referenced only by the frame, spill one.
+Register VirtualFrame::SpillAnyRegister() {
+  UNIMPLEMENTED();
+  return no_reg;
+}
 
 } }  // namespace v8::internal
-
-#endif  // V8_USAGE_ANALYZER_H_
diff --git a/src/virtual-frame.cc b/src/virtual-frame.cc
index 3624e25..310ff59 100644
--- a/src/virtual-frame.cc
+++ b/src/virtual-frame.cc
@@ -29,6 +29,7 @@
 
 #include "codegen-inl.h"
 #include "register-allocator-inl.h"
+#include "virtual-frame-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -36,363 +37,6 @@
 // -------------------------------------------------------------------------
 // VirtualFrame implementation.
 
-// When cloned, a frame is a deep copy of the original.
-VirtualFrame::VirtualFrame(VirtualFrame* original)
-    : elements_(original->element_count()),
-      stack_pointer_(original->stack_pointer_) {
-  elements_.AddAll(original->elements_);
-  // Copy register locations from original.
-  memcpy(&register_locations_,
-         original->register_locations_,
-         sizeof(register_locations_));
-}
-
-
-// Create a duplicate of an existing valid frame element.
-// We can pass an optional number type information that will override the
-// existing information about the backing element. The new information must
-// not conflict with the existing type information and must be equally or
-// more precise. The default parameter value kUninitialized means that there
-// is no additional information.
-FrameElement VirtualFrame::CopyElementAt(int index, NumberInfo::Type info) {
-  ASSERT(index >= 0);
-  ASSERT(index < element_count());
-
-  FrameElement target = elements_[index];
-  FrameElement result;
-
-  switch (target.type()) {
-    case FrameElement::CONSTANT:
-      // We do not copy constants and instead return a fresh unsynced
-      // constant.
-      result = FrameElement::ConstantElement(target.handle(),
-                                             FrameElement::NOT_SYNCED);
-      break;
-
-    case FrameElement::COPY:
-      // We do not allow copies of copies, so we follow one link to
-      // the actual backing store of a copy before making a copy.
-      index = target.index();
-      ASSERT(elements_[index].is_memory() || elements_[index].is_register());
-      // Fall through.
-
-    case FrameElement::MEMORY:  // Fall through.
-    case FrameElement::REGISTER: {
-      // All copies are backed by memory or register locations.
-      result.set_type(FrameElement::COPY);
-      result.clear_copied();
-      result.clear_sync();
-      result.set_index(index);
-      elements_[index].set_copied();
-      // Update backing element's number information.
-      NumberInfo::Type existing = elements_[index].number_info();
-      ASSERT(existing != NumberInfo::kUninitialized);
-      // Assert that the new type information (a) does not conflict with the
-      // existing one and (b) is equally or more precise.
-      ASSERT((info == NumberInfo::kUninitialized) ||
-             (existing | info) != NumberInfo::kUninitialized);
-      ASSERT(existing <= info);
-      elements_[index].set_number_info(info != NumberInfo::kUninitialized
-                                       ? info
-                                       : existing);
-      break;
-    }
-    case FrameElement::INVALID:
-      // We should not try to copy invalid elements.
-      UNREACHABLE();
-      break;
-  }
-  return result;
-}
-
-
-// Modify the state of the virtual frame to match the actual frame by adding
-// extra in-memory elements to the top of the virtual frame.  The extra
-// elements will be externally materialized on the actual frame (eg, by
-// pushing an exception handler).  No code is emitted.
-void VirtualFrame::Adjust(int count) {
-  ASSERT(count >= 0);
-  ASSERT(stack_pointer_ == element_count() - 1);
-
-  for (int i = 0; i < count; i++) {
-    elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown));
-  }
-  stack_pointer_ += count;
-}
-
-
-void VirtualFrame::ForgetElements(int count) {
-  ASSERT(count >= 0);
-  ASSERT(element_count() >= count);
-
-  for (int i = 0; i < count; i++) {
-    FrameElement last = elements_.RemoveLast();
-    if (last.is_register()) {
-      // A hack to properly count register references for the code
-      // generator's current frame and also for other frames.  The
-      // same code appears in PrepareMergeTo.
-      if (cgen()->frame() == this) {
-        Unuse(last.reg());
-      } else {
-        set_register_location(last.reg(), kIllegalIndex);
-      }
-    }
-  }
-}
-
-
-// If there are any registers referenced only by the frame, spill one.
-Register VirtualFrame::SpillAnyRegister() {
-  // Find the leftmost (ordered by register number) register whose only
-  // reference is in the frame.
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    if (is_used(i) && cgen()->allocator()->count(i) == 1) {
-      SpillElementAt(register_location(i));
-      ASSERT(!cgen()->allocator()->is_used(i));
-      return RegisterAllocator::ToRegister(i);
-    }
-  }
-  return no_reg;
-}
-
-
-// Make the type of the element at a given index be MEMORY.
-void VirtualFrame::SpillElementAt(int index) {
-  if (!elements_[index].is_valid()) return;
-
-  SyncElementAt(index);
-  // Number type information is preserved.
-  // Copies get their number information from their backing element.
-  NumberInfo::Type info;
-  if (!elements_[index].is_copy()) {
-    info = elements_[index].number_info();
-  } else {
-    info = elements_[elements_[index].index()].number_info();
-  }
-  // The element is now in memory.  Its copied flag is preserved.
-  FrameElement new_element = FrameElement::MemoryElement(info);
-  if (elements_[index].is_copied()) {
-    new_element.set_copied();
-  }
-  if (elements_[index].is_register()) {
-    Unuse(elements_[index].reg());
-  }
-  elements_[index] = new_element;
-}
-
-
-// Clear the dirty bit for the element at a given index.
-void VirtualFrame::SyncElementAt(int index) {
-  if (index <= stack_pointer_) {
-    if (!elements_[index].is_synced()) SyncElementBelowStackPointer(index);
-  } else if (index == stack_pointer_ + 1) {
-    SyncElementByPushing(index);
-  } else {
-    SyncRange(stack_pointer_ + 1, index);
-  }
-}
-
-
-// Make the type of all elements be MEMORY.
-void VirtualFrame::SpillAll() {
-  for (int i = 0; i < element_count(); i++) {
-    SpillElementAt(i);
-  }
-}
-
-
-void VirtualFrame::PrepareMergeTo(VirtualFrame* expected) {
-  // Perform state changes on this frame that will make merge to the
-  // expected frame simpler or else increase the likelihood that his
-  // frame will match another.
-  for (int i = 0; i < element_count(); i++) {
-    FrameElement source = elements_[i];
-    FrameElement target = expected->elements_[i];
-
-    if (!target.is_valid() ||
-        (target.is_memory() && !source.is_memory() && source.is_synced())) {
-      // No code needs to be generated to invalidate valid elements.
-      // No code needs to be generated to move values to memory if
-      // they are already synced.  We perform those moves here, before
-      // merging.
-      if (source.is_register()) {
-        // If the frame is the code generator's current frame, we have
-        // to decrement both the frame-internal and global register
-        // counts.
-        if (cgen()->frame() == this) {
-          Unuse(source.reg());
-        } else {
-          set_register_location(source.reg(), kIllegalIndex);
-        }
-      }
-      elements_[i] = target;
-    } else if (target.is_register() && !target.is_synced() &&
-               !source.is_memory()) {
-      // If an element's target is a register that doesn't need to be
-      // synced, and the element is not in memory, then the sync state
-      // of the element is irrelevant.  We clear the sync bit.
-      ASSERT(source.is_valid());
-      elements_[i].clear_sync();
-    }
-  }
-}
-
-
-void VirtualFrame::PrepareForCall(int spilled_args, int dropped_args) {
-  ASSERT(height() >= dropped_args);
-  ASSERT(height() >= spilled_args);
-  ASSERT(dropped_args <= spilled_args);
-
-  SyncRange(0, element_count() - 1);
-  // Spill registers.
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    if (is_used(i)) {
-      SpillElementAt(register_location(i));
-    }
-  }
-
-  // Spill the arguments.
-  for (int i = element_count() - spilled_args; i < element_count(); i++) {
-    if (!elements_[i].is_memory()) {
-      SpillElementAt(i);
-    }
-  }
-
-  // Forget the frame elements that will be popped by the call.
-  Forget(dropped_args);
-}
-
-
-void VirtualFrame::PrepareForReturn() {
-  // Spill all locals. This is necessary to make sure all locals have
-  // the right value when breaking at the return site in the debugger.
-  for (int i = 0; i < expression_base_index(); i++) {
-    SpillElementAt(i);
-  }
-}
-
-
-void VirtualFrame::SetElementAt(int index, Result* value) {
-  int frame_index = element_count() - index - 1;
-  ASSERT(frame_index >= 0);
-  ASSERT(frame_index < element_count());
-  ASSERT(value->is_valid());
-  FrameElement original = elements_[frame_index];
-
-  // Early exit if the element is the same as the one being set.
-  bool same_register = original.is_register()
-      && value->is_register()
-      && original.reg().is(value->reg());
-  bool same_constant = original.is_constant()
-      && value->is_constant()
-      && original.handle().is_identical_to(value->handle());
-  if (same_register || same_constant) {
-    value->Unuse();
-    return;
-  }
-
-  InvalidateFrameSlotAt(frame_index);
-
-  if (value->is_register()) {
-    if (is_used(value->reg())) {
-      // The register already appears on the frame.  Either the existing
-      // register element, or the new element at frame_index, must be made
-      // a copy.
-      int i = register_location(value->reg());
-
-      if (i < frame_index) {
-        // The register FrameElement is lower in the frame than the new copy.
-        elements_[frame_index] = CopyElementAt(i);
-      } else {
-        // There was an early bailout for the case of setting a
-        // register element to itself.
-        ASSERT(i != frame_index);
-        elements_[frame_index] = elements_[i];
-        elements_[i] = CopyElementAt(frame_index);
-        if (elements_[frame_index].is_synced()) {
-          elements_[i].set_sync();
-        }
-        elements_[frame_index].clear_sync();
-        set_register_location(value->reg(), frame_index);
-        for (int j = i + 1; j < element_count(); j++) {
-          if (elements_[j].is_copy() && elements_[j].index() == i) {
-            elements_[j].set_index(frame_index);
-          }
-        }
-      }
-    } else {
-      // The register value->reg() was not already used on the frame.
-      Use(value->reg(), frame_index);
-      elements_[frame_index] =
-          FrameElement::RegisterElement(value->reg(),
-                                        FrameElement::NOT_SYNCED,
-                                        value->number_info());
-    }
-  } else {
-    ASSERT(value->is_constant());
-    elements_[frame_index] =
-        FrameElement::ConstantElement(value->handle(),
-                                      FrameElement::NOT_SYNCED);
-  }
-  value->Unuse();
-}
-
-
-void VirtualFrame::PushFrameSlotAt(int index) {
-  elements_.Add(CopyElementAt(index));
-}
-
-
-void VirtualFrame::Push(Register reg, NumberInfo::Type info) {
-  if (is_used(reg)) {
-    int index = register_location(reg);
-    FrameElement element = CopyElementAt(index, info);
-    elements_.Add(element);
-  } else {
-    Use(reg, element_count());
-    FrameElement element =
-        FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED, info);
-    elements_.Add(element);
-  }
-}
-
-
-void VirtualFrame::Push(Handle<Object> value) {
-  FrameElement element =
-      FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED);
-  elements_.Add(element);
-}
-
-
-void VirtualFrame::Nip(int num_dropped) {
-  ASSERT(num_dropped >= 0);
-  if (num_dropped == 0) return;
-  Result tos = Pop();
-  if (num_dropped > 1) {
-    Drop(num_dropped - 1);
-  }
-  SetElementAt(0, &tos);
-}
-
-
-bool VirtualFrame::Equals(VirtualFrame* other) {
-#ifdef DEBUG
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    if (register_location(i) != other->register_location(i)) {
-      return false;
-    }
-  }
-  if (element_count() != other->element_count()) return false;
-#endif
-  if (stack_pointer_ != other->stack_pointer_) return false;
-  for (int i = 0; i < element_count(); i++) {
-    if (!elements_[i].Equals(other->elements_[i])) return false;
-  }
-
-  return true;
-}
-
-
 // Specialization of List::ResizeAdd to non-inlined version for FrameElements.
 // The function ResizeAdd becomes a real function, whose implementation is the
 // inlined ResizeAddInternal.
diff --git a/src/vm-state-inl.h b/src/vm-state-inl.h
new file mode 100644
index 0000000..4df2cfd
--- /dev/null
+++ b/src/vm-state-inl.h
@@ -0,0 +1,134 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_VM_STATE_INL_H_
+#define V8_VM_STATE_INL_H_
+
+#include "vm-state.h"
+
+namespace v8 {
+namespace internal {
+
+//
+// VMState class implementation.  A simple stack of VM states held by the
+// logger and partially threaded through the call stack.  States are pushed by
+// VMState construction and popped by destruction.
+//
+#ifdef ENABLE_VMSTATE_TRACKING
+inline const char* StateToString(StateTag state) {
+  switch (state) {
+    case JS:
+      return "JS";
+    case GC:
+      return "GC";
+    case COMPILER:
+      return "COMPILER";
+    case OTHER:
+      return "OTHER";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+VMState::VMState(StateTag state)
+    : disabled_(true),
+      state_(OTHER),
+      external_callback_(NULL) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Logger::is_logging() && !CpuProfiler::is_profiling()) {
+    return;
+  }
+#endif
+
+  disabled_ = false;
+#if !defined(ENABLE_HEAP_PROTECTION)
+  // When not protecting the heap, there is no difference between
+  // EXTERNAL and OTHER.  As an optimization in that case, we will not
+  // perform EXTERNAL->OTHER transitions through the API.  We thus
+  // compress the two states into one.
+  if (state == EXTERNAL) state = OTHER;
+#endif
+  state_ = state;
+  previous_ = current_state_;  // Save the previous state.
+  current_state_ = this;       // Install the new state.
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (FLAG_log_state_changes) {
+    LOG(UncheckedStringEvent("Entering", StateToString(state_)));
+    if (previous_ != NULL) {
+      LOG(UncheckedStringEvent("From", StateToString(previous_->state_)));
+    }
+  }
+#endif
+
+#ifdef ENABLE_HEAP_PROTECTION
+  if (FLAG_protect_heap) {
+    if (state_ == EXTERNAL) {
+      // We are leaving V8.
+      ASSERT((previous_ != NULL) && (previous_->state_ != EXTERNAL));
+      Heap::Protect();
+    } else if ((previous_ == NULL) || (previous_->state_ == EXTERNAL)) {
+      // We are entering V8.
+      Heap::Unprotect();
+    }
+  }
+#endif
+}
+
+
+VMState::~VMState() {
+  if (disabled_) return;
+  current_state_ = previous_;  // Return to the previous state.
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (FLAG_log_state_changes) {
+    LOG(UncheckedStringEvent("Leaving", StateToString(state_)));
+    if (previous_ != NULL) {
+      LOG(UncheckedStringEvent("To", StateToString(previous_->state_)));
+    }
+  }
+#endif  // ENABLE_LOGGING_AND_PROFILING
+
+#ifdef ENABLE_HEAP_PROTECTION
+  if (FLAG_protect_heap) {
+    if (state_ == EXTERNAL) {
+      // We are reentering V8.
+      ASSERT((previous_ != NULL) && (previous_->state_ != EXTERNAL));
+      Heap::Unprotect();
+    } else if ((previous_ == NULL) || (previous_->state_ == EXTERNAL)) {
+      // We are leaving V8.
+      Heap::Protect();
+    }
+  }
+#endif  // ENABLE_HEAP_PROTECTION
+}
+#endif  // ENABLE_VMSTATE_TRACKING
+
+} }  // namespace v8::internal
+
+#endif  // V8_VM_STATE_INL_H_
diff --git a/src/usage-analyzer.h b/src/vm-state.cc
similarity index 84%
rename from src/usage-analyzer.h
rename to src/vm-state.cc
index 1b0ea4a..3859efb 100644
--- a/src/usage-analyzer.h
+++ b/src/vm-state.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,16 +25,15 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef V8_USAGE_ANALYZER_H_
-#define V8_USAGE_ANALYZER_H_
+#include "v8.h"
+
+#include "vm-state.h"
 
 namespace v8 {
 namespace internal {
 
-// Compute usage counts for all variables.
-// Used for variable allocation.
-bool AnalyzeVariableUsage(FunctionLiteral* lit);
+#ifdef ENABLE_VMSTATE_TRACKING
+VMState* VMState::current_state_ = NULL;
+#endif
 
 } }  // namespace v8::internal
-
-#endif  // V8_USAGE_ANALYZER_H_
diff --git a/src/number-info.h b/src/vm-state.h
similarity index 65%
rename from src/number-info.h
rename to src/vm-state.h
index c6f32e4..241df4c 100644
--- a/src/number-info.h
+++ b/src/vm-state.h
@@ -25,48 +25,51 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef V8_NUMBER_INFO_H_
-#define V8_NUMBER_INFO_H_
+#ifndef V8_VM_STATE_H_
+#define V8_VM_STATE_H_
 
 namespace v8 {
 namespace internal {
 
-class NumberInfo : public AllStatic {
+class VMState BASE_EMBEDDED {
+#ifdef ENABLE_VMSTATE_TRACKING
  public:
-  enum Type {
-    kUnknown = 0,
-    kNumber = 1,
-    kSmi = 3,
-    kHeapNumber = 5,
-    kUninitialized = 7
-  };
+  inline VMState(StateTag state);
+  inline ~VMState();
 
-  // Return the weakest (least precise) common type.
-  static Type Combine(Type a, Type b) {
-    // Make use of the order of enum values.
-    return static_cast<Type>(a & b);
+  StateTag state() { return state_; }
+  void set_external_callback(Address external_callback) {
+    external_callback_ = external_callback;
   }
 
-  static bool IsNumber(Type a) {
-    ASSERT(a != kUninitialized);
-    return ((a & kNumber) != 0);
+  // Used for debug asserts.
+  static bool is_outermost_external() {
+    return current_state_ == NULL;
   }
 
-  static const char* ToString(Type a) {
-    switch (a) {
-      case kUnknown: return "UnknownType";
-      case kNumber: return "NumberType";
-      case kSmi: return "SmiType";
-      case kHeapNumber: return "HeapNumberType";
-      case kUninitialized:
-        UNREACHABLE();
-        return "UninitializedType";
-    }
-    UNREACHABLE();
-    return "Unreachable code";
+  static StateTag current_state() {
+    return current_state_ ? current_state_->state() : EXTERNAL;
   }
+
+  static Address external_callback() {
+    return current_state_ ? current_state_->external_callback_ : NULL;
+  }
+
+ private:
+  bool disabled_;
+  StateTag state_;
+  VMState* previous_;
+  Address external_callback_;
+
+  // A stack of VM states.
+  static VMState* current_state_;
+#else
+ public:
+  explicit VMState(StateTag state) {}
+#endif
 };
 
 } }  // namespace v8::internal
 
-#endif  // V8_NUMBER_INFO_H_
+
+#endif  // V8_VM_STATE_H_
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 9c7f9b6..c9ab627 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -89,6 +89,11 @@
 }
 
 
+void Assembler::emit_rex_64(Register reg, XMMRegister rm_reg) {
+  emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
+}
+
+
 void Assembler::emit_rex_64(Register reg, const Operand& op) {
   emit(0x48 | reg.high_bit() << 2 | op.rex_);
 }
@@ -160,6 +165,12 @@
 }
 
 
+void Assembler::emit_optional_rex_32(Register reg, XMMRegister base) {
+  byte rex_bits =  (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
+  if (rex_bits != 0) emit(0x40 | rex_bits);
+}
+
+
 void Assembler::emit_optional_rex_32(Register rm_reg) {
   if (rm_reg.high_bit()) emit(0x41);
 }
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index a994f45..1c00ebc 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -116,11 +116,13 @@
 
   CodeDesc desc;
   assm.GetCode(&desc);
-  Object* code =
-      Heap::CreateCode(desc, NULL, Code::ComputeFlags(Code::STUB), NULL);
+  Object* code = Heap::CreateCode(desc,
+                                  NULL,
+                                  Code::ComputeFlags(Code::STUB),
+                                  Handle<Object>());
   if (!code->IsCode()) return;
-  LOG(CodeCreateEvent(Logger::BUILTIN_TAG,
-                      Code::cast(code), "CpuFeatures::Probe"));
+  PROFILE(CodeCreateEvent(Logger::BUILTIN_TAG,
+                          Code::cast(code), "CpuFeatures::Probe"));
   typedef uint64_t (*F0)();
   F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
   supported_ = probe();
@@ -1030,6 +1032,22 @@
 }
 
 
+void Assembler::imull(Register dst, Register src, Immediate imm) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst, src);
+  if (is_int8(imm.value_)) {
+    emit(0x6B);
+    emit_modrm(dst, src);
+    emit(imm.value_);
+  } else {
+    emit(0x69);
+    emit_modrm(dst, src);
+    emitl(imm.value_);
+  }
+}
+
+
 void Assembler::incq(Register dst) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1122,23 +1140,25 @@
 void Assembler::jmp(Label* L) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
+  const int short_size = sizeof(int8_t);
+  const int long_size = sizeof(int32_t);
   if (L->is_bound()) {
     int offs = L->pos() - pc_offset() - 1;
     ASSERT(offs <= 0);
-    if (is_int8(offs - sizeof(int8_t))) {
+    if (is_int8(offs - short_size)) {
       // 1110 1011 #8-bit disp.
       emit(0xEB);
-      emit((offs - sizeof(int8_t)) & 0xFF);
+      emit((offs - short_size) & 0xFF);
     } else {
       // 1110 1001 #32-bit disp.
       emit(0xE9);
-      emitl(offs - sizeof(int32_t));
+      emitl(offs - long_size);
     }
   } else  if (L->is_linked()) {
     // 1110 1001 #32-bit disp.
     emit(0xE9);
     emitl(L->pos());
-    L->link_to(pc_offset() - sizeof(int32_t));
+    L->link_to(pc_offset() - long_size);
   } else {
     // 1110 1001 #32-bit disp.
     ASSERT(L->is_unused());
@@ -1190,6 +1210,15 @@
 }
 
 
+void Assembler::leal(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst, src);
+  emit(0x8D);
+  emit_operand(dst, src);
+}
+
+
 void Assembler::load_rax(void* value, RelocInfo::Mode mode) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1219,6 +1248,7 @@
   emit_operand(dst, src);
 }
 
+
 void Assembler::movb(Register dst, Immediate imm) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1228,6 +1258,7 @@
   emit(imm.value_);
 }
 
+
 void Assembler::movb(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1236,6 +1267,7 @@
   emit_operand(src, dst);
 }
 
+
 void Assembler::movw(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1245,6 +1277,7 @@
   emit_operand(src, dst);
 }
 
+
 void Assembler::movl(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1600,6 +1633,15 @@
 }
 
 
+void Assembler::notl(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst);
+  emit(0xF7);
+  emit_modrm(0x2, dst);
+}
+
+
 void Assembler::nop(int n) {
   // The recommended muti-byte sequences of NOP instructions from the Intel 64
   // and IA-32 Architectures Software Developer's Manual.
@@ -2000,6 +2042,14 @@
 }
 
 
+void Assembler::fldpi() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xD9);
+  emit(0xEB);
+}
+
+
 void Assembler::fld_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -2347,6 +2397,64 @@
 
 // SSE 2 operations.
 
+void Assembler::movd(XMMRegister dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x6E);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movd(Register dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x7E);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movq(XMMRegister dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x66);
+  emit_rex_64(dst, src);
+  emit(0x0F);
+  emit(0x6E);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movq(Register dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x66);
+  emit_rex_64(dst, src);
+  emit(0x0F);
+  emit(0x7E);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
+  ASSERT(is_uint2(imm8));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x3A);
+  emit(0x17);
+  emit_sse_operand(dst, src);
+  emit(imm8);
+}
+
+
 void Assembler::movsd(const Operand& dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -2435,6 +2543,17 @@
 }
 
 
+void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF3);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x5A);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::addsd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -2484,12 +2603,23 @@
   last_pc_ = pc_;
   emit(0x66);
   emit_optional_rex_32(dst, src);
-  emit(0x0f);
+  emit(0x0F);
   emit(0x57);
   emit_sse_operand(dst, src);
 }
 
 
+void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF2);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x51);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::comisd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -2526,6 +2656,10 @@
   emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
 }
 
+void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
+  emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
+}
+
 
 // Relocation information implementations.
 
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 5019525..d077865 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -742,14 +742,16 @@
   void imul(Register dst, Register src);                 // dst = dst * src.
   void imul(Register dst, const Operand& src);           // dst = dst * src.
   void imul(Register dst, Register src, Immediate imm);  // dst = src * imm.
-  // Multiply 32 bit registers
-  void imull(Register dst, Register src);                // dst = dst * src.
+  // Signed 32-bit multiply instructions.
+  void imull(Register dst, Register src);                 // dst = dst * src.
+  void imull(Register dst, Register src, Immediate imm);  // dst = src * imm.
 
   void incq(Register dst);
   void incq(const Operand& dst);
   void incl(const Operand& dst);
 
   void lea(Register dst, const Operand& src);
+  void leal(Register dst, const Operand& src);
 
   // Multiply rax by src, put the result in rdx:rax.
   void mul(Register src);
@@ -760,6 +762,7 @@
 
   void not_(Register dst);
   void not_(const Operand& dst);
+  void notl(Register dst);
 
   void or_(Register dst, Register src) {
     arithmetic_op(0x0B, dst, src);
@@ -1016,6 +1019,7 @@
 
   void fld1();
   void fldz();
+  void fldpi();
 
   void fld_s(const Operand& adr);
   void fld_d(const Operand& adr);
@@ -1076,9 +1080,15 @@
   void sahf();
 
   // SSE2 instructions
+  void movd(XMMRegister dst, Register src);
+  void movd(Register dst, XMMRegister src);
+  void movq(XMMRegister dst, Register src);
+  void movq(Register dst, XMMRegister src);
+  void extractps(Register dst, XMMRegister src, byte imm8);
+
   void movsd(const Operand& dst, XMMRegister src);
-  void movsd(XMMRegister src, XMMRegister dst);
-  void movsd(XMMRegister src, const Operand& dst);
+  void movsd(XMMRegister dst, XMMRegister src);
+  void movsd(XMMRegister dst, const Operand& src);
 
   void cvttss2si(Register dst, const Operand& src);
   void cvttsd2si(Register dst, const Operand& src);
@@ -1088,19 +1098,24 @@
   void cvtqsi2sd(XMMRegister dst, const Operand& src);
   void cvtqsi2sd(XMMRegister dst, Register src);
 
+  void cvtss2sd(XMMRegister dst, XMMRegister src);
+
   void addsd(XMMRegister dst, XMMRegister src);
   void subsd(XMMRegister dst, XMMRegister src);
   void mulsd(XMMRegister dst, XMMRegister src);
   void divsd(XMMRegister dst, XMMRegister src);
 
   void xorpd(XMMRegister dst, XMMRegister src);
+  void sqrtsd(XMMRegister dst, XMMRegister src);
 
   void comisd(XMMRegister dst, XMMRegister src);
   void ucomisd(XMMRegister dst, XMMRegister src);
 
+  // The first argument is the reg field, the second argument is the r/m field.
   void emit_sse_operand(XMMRegister dst, XMMRegister src);
   void emit_sse_operand(XMMRegister reg, const Operand& adr);
   void emit_sse_operand(XMMRegister dst, Register src);
+  void emit_sse_operand(Register dst, XMMRegister src);
 
   // Use either movsd or movlpd.
   // void movdbl(XMMRegister dst, const Operand& src);
@@ -1167,8 +1182,9 @@
   // the top bit of both register codes.
   // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
   // REX.W is set.
-  inline void emit_rex_64(Register reg, Register rm_reg);
   inline void emit_rex_64(XMMRegister reg, Register rm_reg);
+  inline void emit_rex_64(Register reg, XMMRegister rm_reg);
+  inline void emit_rex_64(Register reg, Register rm_reg);
 
   // Emits a REX prefix that encodes a 64-bit operand size and
   // the top bit of the destination, index, and base register codes.
@@ -1226,9 +1242,13 @@
   inline void emit_optional_rex_32(XMMRegister reg, XMMRegister base);
 
   // As for emit_optional_rex_32(Register, Register), except that
-  // the registers are XMM registers.
+  // one of the registers is an XMM registers.
   inline void emit_optional_rex_32(XMMRegister reg, Register base);
 
+  // As for emit_optional_rex_32(Register, Register), except that
+  // one of the registers is an XMM registers.
+  inline void emit_optional_rex_32(Register reg, XMMRegister base);
+
   // As for emit_optional_rex_32(Register, const Operand&), except that
   // the register is an XMM register.
   inline void emit_optional_rex_32(XMMRegister reg, const Operand& op);
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index b3c5e33..c55a4ea 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -61,10 +61,10 @@
     ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
   }
 
-  // JumpToRuntime expects rax to contain the number of arguments
+  // JumpToExternalReference expects rax to contain the number of arguments
   // including the receiver and the extra arguments.
   __ addq(rax, Immediate(num_extra_args + 1));
-  __ JumpToRuntime(ExternalReference(id), 1);
+  __ JumpToExternalReference(ExternalReference(id), 1);
 }
 
 
@@ -1212,7 +1212,7 @@
   __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
   // Load the function pointer into rdi.
   __ movq(rdi, rdx);
-#else  // !defined(_WIN64)
+#else  // _WIN64
   // GCC parameters in:
   // rdi : entry (ignored)
   // rsi : function
@@ -1240,7 +1240,7 @@
 
   // Set up the roots register.
   ExternalReference roots_address = ExternalReference::roots_address();
-  __ movq(r13, roots_address);
+  __ movq(kRootRegister, roots_address);
 
   // Current stack contents:
   // [rsp + 2 * kPointerSize ... ]: Internal frame
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index e418883..39f543d 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -36,6 +36,7 @@
 #include "regexp-macro-assembler.h"
 #include "register-allocator-inl.h"
 #include "scopes.h"
+#include "virtual-frame-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -190,6 +191,34 @@
 };
 
 
+// Call the appropriate binary operation stub to compute value op src
+// and leave the result in dst.
+class DeferredInlineSmiOperationReversed: public DeferredCode {
+ public:
+  DeferredInlineSmiOperationReversed(Token::Value op,
+                                     Register dst,
+                                     Smi* value,
+                                     Register src,
+                                     OverwriteMode overwrite_mode)
+      : op_(op),
+        dst_(dst),
+        value_(value),
+        src_(src),
+        overwrite_mode_(overwrite_mode) {
+    set_comment("[ DeferredInlineSmiOperationReversed");
+  }
+
+  virtual void Generate();
+
+ private:
+  Token::Value op_;
+  Register dst_;
+  Smi* value_;
+  Register src_;
+  OverwriteMode overwrite_mode_;
+};
+
+
 class FloatingPointHelper : public AllStatic {
  public:
   // Code pattern for loading a floating point value. Input value must
@@ -201,11 +230,21 @@
   // Code pattern for loading a floating point value. Input value must
   // be either a smi or a heap number object (fp value). Requirements:
   // operand in src register. Returns operand as floating point number
-  // in XMM register
+  // in XMM register.  May destroy src register.
   static void LoadFloatOperand(MacroAssembler* masm,
                                Register src,
                                XMMRegister dst);
 
+  // Code pattern for loading a possible number into a XMM register.
+  // If the contents of src is not a number, control branches to
+  // the Label not_number.  If contents of src is a smi or a heap number
+  // object (fp value), it is loaded into the XMM register as a double.
+  // The register src is not changed, and src may not be kScratchRegister.
+  static void LoadFloatOperand(MacroAssembler* masm,
+                               Register src,
+                               XMMRegister dst,
+                               Label *not_number);
+
   // Code pattern for loading floating point values. Input values must
   // be either smi or heap number objects (fp values). Requirements:
   // operand_1 in rdx, operand_2 in rax; Returns operands as
@@ -259,9 +298,6 @@
 }
 
 
-Scope* CodeGenerator::scope() { return info_->function()->scope(); }
-
-
 void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.  The inevitable call
   // will sync frame elements to memory anyway, so we do it eagerly to
@@ -280,6 +316,7 @@
 void CodeGenerator::Generate(CompilationInfo* info) {
   // Record the position for debugging purposes.
   CodeForFunctionPosition(info->function());
+  Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
 
   // Initialize state.
   info_ = info;
@@ -291,6 +328,7 @@
   set_in_spilled_code(false);
 
   // Adjust for function-level loop nesting.
+  ASSERT_EQ(0, loop_nesting_);
   loop_nesting_ += info->loop_nesting();
 
   JumpTarget::set_compiling_deferred_code(false);
@@ -304,8 +342,7 @@
 #endif
 
   // New scope to get automatic timing calculation.
-  {  // NOLINT
-    HistogramTimerScope codegen_timer(&Counters::code_generation);
+  { HistogramTimerScope codegen_timer(&Counters::code_generation);
     CodeGenState state(this);
 
     // Entry:
@@ -484,11 +521,11 @@
   }
 
   // Adjust for function-level loop nesting.
-  loop_nesting_ -= info->loop_nesting();
+  ASSERT_EQ(loop_nesting_, info->loop_nesting());
+  loop_nesting_ = 0;
 
   // Code generation state must be reset.
   ASSERT(state_ == NULL);
-  ASSERT(loop_nesting() == 0);
   ASSERT(!function_return_is_shadowed_);
   function_return_.Unuse();
   DeleteFrame();
@@ -1495,6 +1532,26 @@
 }
 
 
+void CodeGenerator::SetTypeForStackSlot(Slot* slot, TypeInfo info) {
+  ASSERT(slot->type() == Slot::LOCAL || slot->type() == Slot::PARAMETER);
+  if (slot->type() == Slot::LOCAL) {
+    frame_->SetTypeForLocalAt(slot->index(), info);
+  } else {
+    frame_->SetTypeForParamAt(slot->index(), info);
+  }
+  if (FLAG_debug_code && info.IsSmi()) {
+    if (slot->type() == Slot::LOCAL) {
+      frame_->PushLocalAt(slot->index());
+    } else {
+      frame_->PushParameterAt(slot->index());
+    }
+    Result var = frame_->Pop();
+    var.ToRegister();
+    __ AbortIfNotSmi(var.reg(), "Non-smi value in smi-typed stack slot.");
+  }
+}
+
+
 void CodeGenerator::VisitForStatement(ForStatement* node) {
   ASSERT(!in_spilled_code());
   Comment cmnt(masm_, "[ ForStatement");
@@ -1588,6 +1645,17 @@
   }
 
   CheckStack();  // TODO(1222600): ignore if body contains calls.
+
+  // We know that the loop index is a smi if it is not modified in the
+  // loop body and it is checked against a constant limit in the loop
+  // condition.  In this case, we reset the static type information of the
+  // loop index to smi before compiling the body, the update expression, and
+  // the bottom check of the loop condition.
+  if (node->is_fast_smi_loop()) {
+    // Set number type of the loop variable to smi.
+    SetTypeForStackSlot(node->loop_variable()->slot(), TypeInfo::Smi());
+  }
+
   Visit(node->body());
 
   // If there is an update expression, compile it if necessary.
@@ -1607,6 +1675,13 @@
     }
   }
 
+  // Set the type of the loop variable to smi before compiling the test
+  // expression if we are in a fast smi loop condition.
+  if (node->is_fast_smi_loop() && has_valid_frame()) {
+    // Set number type of the loop variable to smi.
+    SetTypeForStackSlot(node->loop_variable()->slot(), TypeInfo::Smi());
+  }
+
   // Based on the condition analysis, compile the backward jump as
   // necessary.
   switch (info) {
@@ -2230,9 +2305,8 @@
 }
 
 
-void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
-  ASSERT(boilerplate->IsBoilerplate());
-
+void CodeGenerator::InstantiateFunction(
+    Handle<SharedFunctionInfo> function_info) {
   // The inevitable call will sync frame elements to memory anyway, so
   // we do it eagerly to allow us to push the arguments directly into
   // place.
@@ -2240,16 +2314,16 @@
 
   // Use the fast case closure allocation code that allocates in new
   // space for nested functions that don't need literals cloning.
-  if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) {
+  if (scope()->is_function_scope() && function_info->num_literals() == 0) {
     FastNewClosureStub stub;
-    frame_->Push(boilerplate);
+    frame_->Push(function_info);
     Result answer = frame_->CallStub(&stub, 1);
     frame_->Push(&answer);
   } else {
-    // Call the runtime to instantiate the function boilerplate
-    // object.
+    // Call the runtime to instantiate the function based on the
+    // shared function info.
     frame_->EmitPush(rsi);
-    frame_->EmitPush(boilerplate);
+    frame_->EmitPush(function_info);
     Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
     frame_->Push(&result);
   }
@@ -2259,19 +2333,19 @@
 void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
   Comment cmnt(masm_, "[ FunctionLiteral");
 
-  // Build the function boilerplate and instantiate it.
-  Handle<JSFunction> boilerplate =
-      Compiler::BuildBoilerplate(node, script(), this);
+  // Build the function info and instantiate it.
+  Handle<SharedFunctionInfo> function_info =
+      Compiler::BuildFunctionInfo(node, script(), this);
   // Check for stack-overflow exception.
   if (HasStackOverflow()) return;
-  InstantiateBoilerplate(boilerplate);
+  InstantiateFunction(function_info);
 }
 
 
-void CodeGenerator::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* node) {
-  Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
-  InstantiateBoilerplate(node->boilerplate());
+void CodeGenerator::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* node) {
+  Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
+  InstantiateFunction(node->shared_function_info());
 }
 
 
@@ -2425,11 +2499,13 @@
   frame_->Push(Smi::FromInt(node->literal_index()));
   // Constant properties.
   frame_->Push(node->constant_properties());
+  // Should the object literal have fast elements?
+  frame_->Push(Smi::FromInt(node->fast_elements() ? 1 : 0));
   Result clone;
   if (node->depth() > 1) {
-    clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 3);
+    clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
   } else {
-    clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
+    clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
   }
   frame_->Push(&clone);
 
@@ -2634,8 +2710,9 @@
         target.GetValue();
       }
       Load(node->value());
-      GenericBinaryOperation(node->binary_op(),
-                             node->type(),
+      BinaryOperation expr(node, node->binary_op(), node->target(),
+                           node->value());
+      GenericBinaryOperation(&expr,
                              overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
     }
 
@@ -3079,6 +3156,7 @@
         GenericUnaryOpStub stub(Token::SUB, overwrite);
         Result operand = frame_->Pop();
         Result answer = frame_->CallStub(&stub, &operand);
+        answer.set_type_info(TypeInfo::Number());
         frame_->Push(&answer);
         break;
       }
@@ -3102,6 +3180,7 @@
         frame_->Spill(answer.reg());
         __ SmiNot(answer.reg(), answer.reg());
         continue_label.Bind(&answer);
+        answer.set_type_info(TypeInfo::Smi());
         frame_->Push(&answer);
         break;
       }
@@ -3110,6 +3189,7 @@
         // Smi check.
         JumpTarget continue_label;
         Result operand = frame_->Pop();
+        TypeInfo operand_info = operand.type_info();
         operand.ToRegister();
         Condition is_smi = masm_->CheckSmi(operand.reg());
         continue_label.Branch(is_smi, &operand);
@@ -3118,10 +3198,16 @@
                                               CALL_FUNCTION, 1);
 
         continue_label.Bind(&answer);
+        if (operand_info.IsSmi()) {
+          answer.set_type_info(TypeInfo::Smi());
+        } else if (operand_info.IsInteger32()) {
+          answer.set_type_info(TypeInfo::Integer32());
+        } else {
+          answer.set_type_info(TypeInfo::Number());
+        }
         frame_->Push(&answer);
         break;
       }
-
       default:
         UNREACHABLE();
     }
@@ -3129,14 +3215,16 @@
 }
 
 
-// The value in dst was optimistically incremented or decremented.  The
-// result overflowed or was not smi tagged.  Undo the operation, call
-// into the runtime to convert the argument to a number, and call the
-// specialized add or subtract stub.  The result is left in dst.
+// The value in dst was optimistically incremented or decremented.
+// The result overflowed or was not smi tagged.  Call into the runtime
+// to convert the argument to a number, and call the specialized add
+// or subtract stub.  The result is left in dst.
 class DeferredPrefixCountOperation: public DeferredCode {
  public:
-  DeferredPrefixCountOperation(Register dst, bool is_increment)
-      : dst_(dst), is_increment_(is_increment) {
+  DeferredPrefixCountOperation(Register dst,
+                               bool is_increment,
+                               TypeInfo input_type)
+      : dst_(dst), is_increment_(is_increment), input_type_(input_type) {
     set_comment("[ DeferredCountOperation");
   }
 
@@ -3145,32 +3233,45 @@
  private:
   Register dst_;
   bool is_increment_;
+  TypeInfo input_type_;
 };
 
 
 void DeferredPrefixCountOperation::Generate() {
-  __ push(dst_);
-  __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
-  __ push(rax);
-  __ Push(Smi::FromInt(1));
-  if (is_increment_) {
-    __ CallRuntime(Runtime::kNumberAdd, 2);
+  Register left;
+  if (input_type_.IsNumber()) {
+    left = dst_;
   } else {
-    __ CallRuntime(Runtime::kNumberSub, 2);
+    __ push(dst_);
+    __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+    left = rax;
   }
+
+  GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
+                           NO_OVERWRITE,
+                           NO_GENERIC_BINARY_FLAGS,
+                           TypeInfo::Number());
+  stub.GenerateCall(masm_, left, Smi::FromInt(1));
+
   if (!dst_.is(rax)) __ movq(dst_, rax);
 }
 
 
-// The value in dst was optimistically incremented or decremented.  The
-// result overflowed or was not smi tagged.  Undo the operation and call
-// into the runtime to convert the argument to a number.  Update the
-// original value in old.  Call the specialized add or subtract stub.
-// The result is left in dst.
+// The value in dst was optimistically incremented or decremented.
+// The result overflowed or was not smi tagged.  Call into the runtime
+// to convert the argument to a number.  Update the original value in
+// old.  Call the specialized add or subtract stub.  The result is
+// left in dst.
 class DeferredPostfixCountOperation: public DeferredCode {
  public:
-  DeferredPostfixCountOperation(Register dst, Register old, bool is_increment)
-      : dst_(dst), old_(old), is_increment_(is_increment) {
+  DeferredPostfixCountOperation(Register dst,
+                                Register old,
+                                bool is_increment,
+                                TypeInfo input_type)
+      : dst_(dst),
+        old_(old),
+        is_increment_(is_increment),
+        input_type_(input_type) {
     set_comment("[ DeferredCountOperation");
   }
 
@@ -3180,24 +3281,28 @@
   Register dst_;
   Register old_;
   bool is_increment_;
+  TypeInfo input_type_;
 };
 
 
 void DeferredPostfixCountOperation::Generate() {
-  __ push(dst_);
-  __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
-
-  // Save the result of ToNumber to use as the old value.
-  __ push(rax);
-
-  // Call the runtime for the addition or subtraction.
-  __ push(rax);
-  __ Push(Smi::FromInt(1));
-  if (is_increment_) {
-    __ CallRuntime(Runtime::kNumberAdd, 2);
+  Register left;
+  if (input_type_.IsNumber()) {
+    __ push(dst_);  // Save the input to use as the old value.
+    left = dst_;
   } else {
-    __ CallRuntime(Runtime::kNumberSub, 2);
+    __ push(dst_);
+    __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+    __ push(rax);  // Save the result of ToNumber to use as the old value.
+    left = rax;
   }
+
+  GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
+                           NO_OVERWRITE,
+                           NO_GENERIC_BINARY_FLAGS,
+                           TypeInfo::Number());
+  stub.GenerateCall(masm_, left, Smi::FromInt(1));
+
   if (!dst_.is(rax)) __ movq(dst_, rax);
   __ pop(old_);
 }
@@ -3238,6 +3343,14 @@
       old_value = allocator_->Allocate();
       ASSERT(old_value.is_valid());
       __ movq(old_value.reg(), new_value.reg());
+
+      // The return value for postfix operations is ToNumber(input).
+      // Keep more precise type info if the input is some kind of
+      // number already. If the input is not a number we have to wait
+      // for the deferred code to convert it.
+      if (new_value.type_info().IsNumber()) {
+        old_value.set_type_info(new_value.type_info());
+      }
     }
     // Ensure the new value is writable.
     frame_->Spill(new_value.reg());
@@ -3246,10 +3359,12 @@
     if (is_postfix) {
       deferred = new DeferredPostfixCountOperation(new_value.reg(),
                                                    old_value.reg(),
-                                                   is_increment);
+                                                   is_increment,
+                                                   new_value.type_info());
     } else {
       deferred = new DeferredPrefixCountOperation(new_value.reg(),
-                                                  is_increment);
+                                                  is_increment,
+                                                  new_value.type_info());
     }
 
     __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
@@ -3267,6 +3382,15 @@
     __ movq(new_value.reg(), kScratchRegister);
     deferred->BindExit();
 
+    // Postfix count operations return their input converted to
+    // number. The case when the input is already a number is covered
+    // above in the allocation code for old_value.
+    if (is_postfix && !new_value.type_info().IsNumber()) {
+      old_value.set_type_info(TypeInfo::Number());
+    }
+
+    new_value.set_type_info(TypeInfo::Number());
+
     // Postfix: store the old value in the allocated slot under the
     // reference.
     if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
@@ -3281,13 +3405,7 @@
 }
 
 
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
-  // TODO(X64): This code was copied verbatim from codegen-ia32.
-  //     Either find a reason to change it or move it to a shared location.
-
-  Comment cmnt(masm_, "[ BinaryOperation");
-  Token::Value op = node->op();
-
+void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
   // According to ECMA-262 section 11.11, page 58, the binary logical
   // operators must yield the result of one of the two expressions
   // before any ToBoolean() conversions. This means that the value
@@ -3297,7 +3415,7 @@
   // control flow), we force the right hand side to do the same. This
   // is necessary because we assume that if we get control flow on the
   // last path out of an expression we got it on all paths.
-  if (op == Token::AND) {
+  if (node->op() == Token::AND) {
     JumpTarget is_true;
     ControlDestination dest(&is_true, destination()->false_target(), true);
     LoadCondition(node->left(), &dest, false);
@@ -3360,7 +3478,8 @@
       exit.Bind();
     }
 
-  } else if (op == Token::OR) {
+  } else {
+    ASSERT(node->op() == Token::OR);
     JumpTarget is_false;
     ControlDestination dest(destination()->true_target(), &is_false, false);
     LoadCondition(node->left(), &dest, false);
@@ -3421,7 +3540,14 @@
       // Exit (always with a materialized value).
       exit.Bind();
     }
+  }
+}
 
+void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
+  Comment cmnt(masm_, "[ BinaryOperation");
+
+  if (node->op() == Token::AND || node->op() == Token::OR) {
+    GenerateLogicalBooleanOperation(node);
   } else {
     // NOTE: The code below assumes that the slow cases (calls to runtime)
     // never return a constant/immutable object.
@@ -3434,9 +3560,16 @@
       overwrite_mode = OVERWRITE_RIGHT;
     }
 
-    Load(node->left());
-    Load(node->right());
-    GenericBinaryOperation(node->op(), node->type(), overwrite_mode);
+    if (node->left()->IsTrivial()) {
+      Load(node->right());
+      Result right = frame_->Pop();
+      frame_->Push(node->left());
+      frame_->Push(&right);
+    } else {
+      Load(node->left());
+      Load(node->right());
+    }
+    GenericBinaryOperation(node, overwrite_mode);
   }
 }
 
@@ -3601,7 +3734,7 @@
 }
 
 
-void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   // ArgumentsAccessStub expects the key in rdx and the formal
@@ -3732,12 +3865,32 @@
 
 void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 0);
-  // ArgumentsAccessStub takes the parameter count as an input argument
-  // in register eax.  Create a constant result for it.
-  Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
-  // Call the shared stub to get to the arguments.length.
-  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
-  Result result = frame_->CallStub(&stub, &count);
+
+  Result fp = allocator_->Allocate();
+  Result result = allocator_->Allocate();
+  ASSERT(fp.is_valid() && result.is_valid());
+
+  Label exit;
+
+  // Get the number of formal parameters.
+  __ Move(result.reg(), Smi::FromInt(scope()->num_parameters()));
+
+  // Check if the calling frame is an arguments adaptor frame.
+  __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+  __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
+                Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ j(not_equal, &exit);
+
+  // Arguments adaptor case: Read the arguments length from the
+  // adaptor frame.
+  __ movq(result.reg(),
+          Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+  __ bind(&exit);
+  result.set_type_info(TypeInfo::Smi());
+  if (FLAG_debug_code) {
+    __ AbortIfNotSmi(result.reg(), "Computed arguments.length is not a smi.");
+  }
   frame_->Push(&result);
 }
 
@@ -3746,43 +3899,11 @@
   Comment(masm_, "[ GenerateFastCharCodeAt");
   ASSERT(args->length() == 2);
 
-  Label slow_case;
-  Label end;
-  Label not_a_flat_string;
-  Label try_again_with_new_string;
-  Label ascii_string;
-  Label got_char_code;
-
   Load(args->at(0));
   Load(args->at(1));
   Result index = frame_->Pop();
   Result object = frame_->Pop();
 
-  // Get register rcx to use as shift amount later.
-  Result shift_amount;
-  if (object.is_register() && object.reg().is(rcx)) {
-    Result fresh = allocator_->Allocate();
-    shift_amount = object;
-    object = fresh;
-    __ movq(object.reg(), rcx);
-  }
-  if (index.is_register() && index.reg().is(rcx)) {
-    Result fresh = allocator_->Allocate();
-    shift_amount = index;
-    index = fresh;
-    __ movq(index.reg(), rcx);
-  }
-  // There could be references to ecx in the frame. Allocating will
-  // spill them, otherwise spill explicitly.
-  if (shift_amount.is_valid()) {
-    frame_->Spill(rcx);
-  } else {
-    shift_amount = allocator()->Allocate(rcx);
-  }
-  ASSERT(shift_amount.is_register());
-  ASSERT(shift_amount.reg().is(rcx));
-  ASSERT(allocator_->count(rcx) == 1);
-
   // We will mutate the index register and possibly the object register.
   // The case where they are somehow the same register is handled
   // because we only mutate them in the case where the receiver is a
@@ -3792,89 +3913,61 @@
   frame_->Spill(object.reg());
   frame_->Spill(index.reg());
 
-  // We need a single extra temporary register.
-  Result temp = allocator()->Allocate();
-  ASSERT(temp.is_valid());
+  // We need two extra registers.
+  Result result = allocator()->Allocate();
+  ASSERT(result.is_valid());
+  Result scratch = allocator()->Allocate();
+  ASSERT(scratch.is_valid());
 
   // There is no virtual frame effect from here up to the final result
   // push.
-
-  // If the receiver is a smi trigger the slow case.
-  __ JumpIfSmi(object.reg(), &slow_case);
-
-  // If the index is negative or non-smi trigger the slow case.
-  __ JumpIfNotPositiveSmi(index.reg(), &slow_case);
-
-  // Untag the index.
-  __ SmiToInteger32(index.reg(), index.reg());
-
-  __ bind(&try_again_with_new_string);
-  // Fetch the instance type of the receiver into rcx.
-  __ movq(rcx, FieldOperand(object.reg(), HeapObject::kMapOffset));
-  __ movzxbl(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
-  // If the receiver is not a string trigger the slow case.
-  __ testb(rcx, Immediate(kIsNotStringMask));
-  __ j(not_zero, &slow_case);
-
-  // Check for index out of range.
-  __ cmpl(index.reg(), FieldOperand(object.reg(), String::kLengthOffset));
-  __ j(greater_equal, &slow_case);
-  // Reload the instance type (into the temp register this time)..
-  __ movq(temp.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
-  __ movzxbl(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
-
-  // We need special handling for non-flat strings.
-  ASSERT_EQ(0, kSeqStringTag);
-  __ testb(temp.reg(), Immediate(kStringRepresentationMask));
-  __ j(not_zero, &not_a_flat_string);
-  // Check for 1-byte or 2-byte string.
-  ASSERT_EQ(0, kTwoByteStringTag);
-  __ testb(temp.reg(), Immediate(kStringEncodingMask));
-  __ j(not_zero, &ascii_string);
-
-  // 2-byte string.
-  // Load the 2-byte character code into the temp register.
-  __ movzxwl(temp.reg(), FieldOperand(object.reg(),
-                                      index.reg(),
-                                      times_2,
-                                      SeqTwoByteString::kHeaderSize));
-  __ jmp(&got_char_code);
-
-  // ASCII string.
-  __ bind(&ascii_string);
-  // Load the byte into the temp register.
-  __ movzxbl(temp.reg(), FieldOperand(object.reg(),
-                                      index.reg(),
-                                      times_1,
-                                      SeqAsciiString::kHeaderSize));
-  __ bind(&got_char_code);
-  __ Integer32ToSmi(temp.reg(), temp.reg());
-  __ jmp(&end);
-
-  // Handle non-flat strings.
-  __ bind(&not_a_flat_string);
-  __ and_(temp.reg(), Immediate(kStringRepresentationMask));
-  __ cmpb(temp.reg(), Immediate(kConsStringTag));
-  __ j(not_equal, &slow_case);
-
-  // ConsString.
-  // Check that the right hand side is the empty string (ie if this is really a
-  // flat string in a cons string).  If that is not the case we would rather go
-  // to the runtime system now, to flatten the string.
-  __ movq(temp.reg(), FieldOperand(object.reg(), ConsString::kSecondOffset));
-  __ CompareRoot(temp.reg(), Heap::kEmptyStringRootIndex);
-  __ j(not_equal, &slow_case);
-  // Get the first of the two strings.
-  __ movq(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
-  __ jmp(&try_again_with_new_string);
+  Label slow_case;
+  Label exit;
+  StringHelper::GenerateFastCharCodeAt(masm_,
+                                       object.reg(),
+                                       index.reg(),
+                                       scratch.reg(),
+                                       result.reg(),
+                                       &slow_case,
+                                       &slow_case,
+                                       &slow_case,
+                                       &slow_case);
+  __ jmp(&exit);
 
   __ bind(&slow_case);
   // Move the undefined value into the result register, which will
   // trigger the slow case.
-  __ LoadRoot(temp.reg(), Heap::kUndefinedValueRootIndex);
+  __ LoadRoot(result.reg(), Heap::kUndefinedValueRootIndex);
 
-  __ bind(&end);
-  frame_->Push(&temp);
+  __ bind(&exit);
+  frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
+  Comment(masm_, "[ GenerateCharFromCode");
+  ASSERT(args->length() == 1);
+
+  Load(args->at(0));
+
+  Result code = frame_->Pop();
+  code.ToRegister();
+  ASSERT(code.is_valid());
+
+  // StringHelper::GenerateCharFromCode may do a runtime call.
+  frame_->SpillAll();
+
+  Result result = allocator()->Allocate();
+  ASSERT(result.is_valid());
+  Result scratch = allocator()->Allocate();
+  ASSERT(scratch.is_valid());
+
+  StringHelper::GenerateCharFromCode(masm_,
+                                     code.reg(),
+                                     result.reg(),
+                                     scratch.reg(),
+                                     CALL_FUNCTION);
+  frame_->Push(&result);
 }
 
 
@@ -3890,6 +3983,233 @@
 }
 
 
+// Generates the Math.pow method. Only handles special cases and
+// branches to the runtime system for everything else. Please note
+// that this function assumes that the callsite has executed ToNumber
+// on both arguments.
+void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 2);
+  Load(args->at(0));
+  Load(args->at(1));
+
+  Label allocate_return;
+  // Load the two operands while leaving the values on the frame.
+  frame()->Dup();
+  Result exponent = frame()->Pop();
+  exponent.ToRegister();
+  frame()->Spill(exponent.reg());
+  frame()->PushElementAt(1);
+  Result base = frame()->Pop();
+  base.ToRegister();
+  frame()->Spill(base.reg());
+
+  Result answer = allocator()->Allocate();
+  ASSERT(answer.is_valid());
+  ASSERT(!exponent.reg().is(base.reg()));
+  JumpTarget call_runtime;
+
+  // Save 1 in xmm3 - we need this several times later on.
+  __ movl(answer.reg(), Immediate(1));
+  __ cvtlsi2sd(xmm3, answer.reg());
+
+  Label exponent_nonsmi;
+  Label base_nonsmi;
+  // If the exponent is a heap number go to that specific case.
+  __ JumpIfNotSmi(exponent.reg(), &exponent_nonsmi);
+  __ JumpIfNotSmi(base.reg(), &base_nonsmi);
+
+  // Optimized version when y is an integer.
+  Label powi;
+  __ SmiToInteger32(base.reg(), base.reg());
+  __ cvtlsi2sd(xmm0, base.reg());
+  __ jmp(&powi);
+  // exponent is smi and base is a heapnumber.
+  __ bind(&base_nonsmi);
+  __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
+                 Heap::kHeapNumberMapRootIndex);
+  call_runtime.Branch(not_equal);
+
+  __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
+
+  // Optimized version of pow if y is an integer.
+  __ bind(&powi);
+  __ SmiToInteger32(exponent.reg(), exponent.reg());
+
+  // Save exponent in base as we need to check if exponent is negative later.
+  // We know that base and exponent are in different registers.
+  __ movl(base.reg(), exponent.reg());
+
+  // Get absolute value of exponent.
+  Label no_neg;
+  __ cmpl(exponent.reg(), Immediate(0));
+  __ j(greater_equal, &no_neg);
+  __ negl(exponent.reg());
+  __ bind(&no_neg);
+
+  // Load xmm1 with 1.
+  __ movsd(xmm1, xmm3);
+  Label while_true;
+  Label no_multiply;
+
+  __ bind(&while_true);
+  __ shrl(exponent.reg(), Immediate(1));
+  __ j(not_carry, &no_multiply);
+  __ mulsd(xmm1, xmm0);
+  __ bind(&no_multiply);
+  __ testl(exponent.reg(), exponent.reg());
+  __ mulsd(xmm0, xmm0);
+  __ j(not_zero, &while_true);
+
+  // x has the original value of y - if y is negative return 1/result.
+  __ testl(base.reg(), base.reg());
+  __ j(positive, &allocate_return);
+  // Special case if xmm1 has reached infinity.
+  __ movl(answer.reg(), Immediate(0x7FB00000));
+  __ movd(xmm0, answer.reg());
+  __ cvtss2sd(xmm0, xmm0);
+  __ ucomisd(xmm0, xmm1);
+  call_runtime.Branch(equal);
+  __ divsd(xmm3, xmm1);
+  __ movsd(xmm1, xmm3);
+  __ jmp(&allocate_return);
+
+  // exponent (or both) is a heapnumber - no matter what we should now work
+  // on doubles.
+  __ bind(&exponent_nonsmi);
+  __ CompareRoot(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
+                 Heap::kHeapNumberMapRootIndex);
+  call_runtime.Branch(not_equal);
+  __ movsd(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
+  // Test if exponent is nan.
+  __ ucomisd(xmm1, xmm1);
+  call_runtime.Branch(parity_even);
+
+  Label base_not_smi;
+  Label handle_special_cases;
+  __ JumpIfNotSmi(base.reg(), &base_not_smi);
+  __ SmiToInteger32(base.reg(), base.reg());
+  __ cvtlsi2sd(xmm0, base.reg());
+  __ jmp(&handle_special_cases);
+  __ bind(&base_not_smi);
+  __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
+                 Heap::kHeapNumberMapRootIndex);
+  call_runtime.Branch(not_equal);
+  __ movl(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
+  __ andl(answer.reg(), Immediate(HeapNumber::kExponentMask));
+  __ cmpl(answer.reg(), Immediate(HeapNumber::kExponentMask));
+  // base is NaN or +/-Infinity
+  call_runtime.Branch(greater_equal);
+  __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
+
+  // base is in xmm0 and exponent is in xmm1.
+  __ bind(&handle_special_cases);
+  Label not_minus_half;
+  // Test for -0.5.
+  // Load xmm2 with -0.5.
+  __ movl(answer.reg(), Immediate(0xBF000000));
+  __ movd(xmm2, answer.reg());
+  __ cvtss2sd(xmm2, xmm2);
+  // xmm2 now has -0.5.
+  __ ucomisd(xmm2, xmm1);
+  __ j(not_equal, &not_minus_half);
+
+  // Calculates reciprocal of square root.
+  // Note that 1/sqrt(x) = sqrt(1/x))
+  __ divsd(xmm3, xmm0);
+  __ movsd(xmm1, xmm3);
+  __ sqrtsd(xmm1, xmm1);
+  __ jmp(&allocate_return);
+
+  // Test for 0.5.
+  __ bind(&not_minus_half);
+  // Load xmm2 with 0.5.
+  // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
+  __ addsd(xmm2, xmm3);
+  // xmm2 now has 0.5.
+  __ comisd(xmm2, xmm1);
+  call_runtime.Branch(not_equal);
+
+  // Calculates square root.
+  __ movsd(xmm1, xmm0);
+  __ sqrtsd(xmm1, xmm1);
+
+  JumpTarget done;
+  Label failure, success;
+  __ bind(&allocate_return);
+  // Make a copy of the frame to enable us to handle allocation
+  // failure after the JumpTarget jump.
+  VirtualFrame* clone = new VirtualFrame(frame());
+  __ AllocateHeapNumber(answer.reg(), exponent.reg(), &failure);
+  __ movsd(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1);
+  // Remove the two original values from the frame - we only need those
+  // in the case where we branch to runtime.
+  frame()->Drop(2);
+  exponent.Unuse();
+  base.Unuse();
+  done.Jump(&answer);
+  // Use the copy of the original frame as our current frame.
+  RegisterFile empty_regs;
+  SetFrame(clone, &empty_regs);
+  // If we experience an allocation failure we branch to runtime.
+  __ bind(&failure);
+  call_runtime.Bind();
+  answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2);
+
+  done.Bind(&answer);
+  frame()->Push(&answer);
+}
+
+
+// Generates the Math.sqrt method. Please note - this function assumes that
+// the callsite has executed ToNumber on the argument.
+void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+  Load(args->at(0));
+
+  // Leave original value on the frame if we need to call runtime.
+  frame()->Dup();
+  Result result = frame()->Pop();
+  result.ToRegister();
+  frame()->Spill(result.reg());
+  Label runtime;
+  Label non_smi;
+  Label load_done;
+  JumpTarget end;
+
+  __ JumpIfNotSmi(result.reg(), &non_smi);
+  __ SmiToInteger32(result.reg(), result.reg());
+  __ cvtlsi2sd(xmm0, result.reg());
+  __ jmp(&load_done);
+  __ bind(&non_smi);
+  __ CompareRoot(FieldOperand(result.reg(), HeapObject::kMapOffset),
+                 Heap::kHeapNumberMapRootIndex);
+  __ j(not_equal, &runtime);
+  __ movsd(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
+
+  __ bind(&load_done);
+  __ sqrtsd(xmm0, xmm0);
+  // A copy of the virtual frame to allow us to go to runtime after the
+  // JumpTarget jump.
+  Result scratch = allocator()->Allocate();
+  VirtualFrame* clone = new VirtualFrame(frame());
+  __ AllocateHeapNumber(result.reg(), scratch.reg(), &runtime);
+
+  __ movsd(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0);
+  frame()->Drop(1);
+  scratch.Unuse();
+  end.Jump(&result);
+  // We only branch to runtime if we have an allocation error.
+  // Use the copy of the original frame as our current frame.
+  RegisterFile empty_regs;
+  SetFrame(clone, &empty_regs);
+  __ bind(&runtime);
+  result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
+
+  end.Bind(&result);
+  frame()->Push(&result);
+}
+
+
 void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
   Load(args->at(0));
@@ -3952,28 +4272,43 @@
 }
 
 
-void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateRandomHeapNumber(
+    ZoneList<Expression*>* args) {
   ASSERT(args->length() == 0);
   frame_->SpillAll();
-  __ push(rsi);
 
-  // Make sure the frame is aligned like the OS expects.
-  static const int kFrameAlignment = OS::ActivationFrameAlignment();
-  if (kFrameAlignment > 0) {
-    ASSERT(IsPowerOf2(kFrameAlignment));
-    __ movq(rbx, rsp);  // Save in AMD-64 abi callee-saved register.
-    __ and_(rsp, Immediate(-kFrameAlignment));
-  }
+  Label slow_allocate_heapnumber;
+  Label heapnumber_allocated;
+  __ AllocateHeapNumber(rbx, rcx, &slow_allocate_heapnumber);
+  __ jmp(&heapnumber_allocated);
 
-  // Call V8::RandomPositiveSmi().
-  __ Call(FUNCTION_ADDR(V8::RandomPositiveSmi), RelocInfo::RUNTIME_ENTRY);
+  __ bind(&slow_allocate_heapnumber);
+  // To allocate a heap number, and ensure that it is not a smi, we
+  // call the runtime function FUnaryMinus on 0, returning the double
+  // -0.0.  A new, distinct heap number is returned each time.
+  __ Push(Smi::FromInt(0));
+  __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
+  __ movq(rbx, rax);
 
-  // Restore stack pointer from callee-saved register.
-  if (kFrameAlignment > 0) {
-    __ movq(rsp, rbx);
-  }
+  __ bind(&heapnumber_allocated);
 
-  __ pop(rsi);
+  // Return a random uint32 number in rax.
+  // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
+  __ PrepareCallCFunction(0);
+  __ CallCFunction(ExternalReference::random_uint32_function(), 0);
+
+  // Convert 32 random bits in eax to 0.(32 random bits) in a double
+  // by computing:
+  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
+  __ movl(rcx, Immediate(0x49800000));  // 1.0 x 2^20 as single.
+  __ movd(xmm1, rcx);
+  __ movd(xmm0, rax);
+  __ cvtss2sd(xmm1, xmm1);
+  __ xorpd(xmm0, xmm1);
+  __ subsd(xmm0, xmm1);
+  __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
+
+  __ movq(rax, rbx);
   Result result = allocator_->Allocate(rax);
   frame_->Push(&result);
 }
@@ -3993,32 +4328,331 @@
 }
 
 
+void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
+  // No stub. This code only occurs a few times in regexp.js.
+  const int kMaxInlineLength = 100;
+  ASSERT_EQ(3, args->length());
+  Load(args->at(0));  // Size of array, smi.
+  Load(args->at(1));  // "index" property value.
+  Load(args->at(2));  // "input" property value.
+  {
+    VirtualFrame::SpilledScope spilled_scope;
+
+    Label slowcase;
+    Label done;
+    __ movq(r8, Operand(rsp, kPointerSize * 2));
+    __ JumpIfNotSmi(r8, &slowcase);
+    __ SmiToInteger32(rbx, r8);
+    __ cmpl(rbx, Immediate(kMaxInlineLength));
+    __ j(above, &slowcase);
+    // Smi-tagging is equivalent to multiplying by 2.
+    STATIC_ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTagSize == 1);
+    // Allocate RegExpResult followed by FixedArray with size in ebx.
+    // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
+    // Elements:  [Map][Length][..elements..]
+    __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
+                          times_pointer_size,
+                          rbx,  // In: Number of elements.
+                          rax,  // Out: Start of allocation (tagged).
+                          rcx,  // Out: End of allocation.
+                          rdx,  // Scratch register
+                          &slowcase,
+                          TAG_OBJECT);
+    // rax: Start of allocated area, object-tagged.
+    // rbx: Number of array elements as int32.
+    // r8: Number of array elements as smi.
+
+    // Set JSArray map to global.regexp_result_map().
+    __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_INDEX));
+    __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
+    __ movq(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
+    __ movq(FieldOperand(rax, HeapObject::kMapOffset), rdx);
+
+    // Set empty properties FixedArray.
+    __ Move(FieldOperand(rax, JSObject::kPropertiesOffset),
+            Factory::empty_fixed_array());
+
+    // Set elements to point to FixedArray allocated right after the JSArray.
+    __ lea(rcx, Operand(rax, JSRegExpResult::kSize));
+    __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
+
+    // Set input, index and length fields from arguments.
+    __ pop(FieldOperand(rax, JSRegExpResult::kInputOffset));
+    __ pop(FieldOperand(rax, JSRegExpResult::kIndexOffset));
+    __ lea(rsp, Operand(rsp, kPointerSize));
+    __ movq(FieldOperand(rax, JSArray::kLengthOffset), r8);
+
+    // Fill out the elements FixedArray.
+    // rax: JSArray.
+    // rcx: FixedArray.
+    // rbx: Number of elements in array as int32.
+
+    // Set map.
+    __ Move(FieldOperand(rcx, HeapObject::kMapOffset),
+            Factory::fixed_array_map());
+    // Set length.
+    __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rbx);
+    // Fill contents of fixed-array with the-hole.
+    __ Move(rdx, Factory::the_hole_value());
+    __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
+    // Fill fixed array elements with hole.
+    // rax: JSArray.
+    // rbx: Number of elements in array that remains to be filled, as int32.
+    // rcx: Start of elements in FixedArray.
+    // rdx: the hole.
+    Label loop;
+    __ testl(rbx, rbx);
+    __ bind(&loop);
+    __ j(less_equal, &done);  // Jump if ecx is negative or zero.
+    __ subl(rbx, Immediate(1));
+    __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
+    __ jmp(&loop);
+
+    __ bind(&slowcase);
+    __ CallRuntime(Runtime::kRegExpConstructResult, 3);
+
+    __ bind(&done);
+  }
+  frame_->Forget(3);
+  frame_->Push(rax);
+}
+
+
+class DeferredSearchCache: public DeferredCode {
+ public:
+  DeferredSearchCache(Register dst, Register cache, Register key)
+      : dst_(dst), cache_(cache), key_(key) {
+    set_comment("[ DeferredSearchCache");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register dst_, cache_, key_;
+};
+
+
+void DeferredSearchCache::Generate() {
+  __ push(cache_);
+  __ push(key_);
+  __ CallRuntime(Runtime::kGetFromCache, 2);
+  if (!dst_.is(rax)) {
+    __ movq(dst_, rax);
+  }
+}
+
+
+void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
+  ASSERT_EQ(2, args->length());
+
+  ASSERT_NE(NULL, args->at(0)->AsLiteral());
+  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+
+  Handle<FixedArray> jsfunction_result_caches(
+      Top::global_context()->jsfunction_result_caches());
+  if (jsfunction_result_caches->length() <= cache_id) {
+    __ Abort("Attempt to use undefined cache.");
+    frame_->Push(Factory::undefined_value());
+    return;
+  }
+
+  Load(args->at(1));
+  Result key = frame_->Pop();
+  key.ToRegister();
+
+  Result cache = allocator()->Allocate();
+  ASSERT(cache.is_valid());
+  __ movq(cache.reg(), ContextOperand(rsi, Context::GLOBAL_INDEX));
+  __ movq(cache.reg(),
+          FieldOperand(cache.reg(), GlobalObject::kGlobalContextOffset));
+  __ movq(cache.reg(),
+          ContextOperand(cache.reg(), Context::JSFUNCTION_RESULT_CACHES_INDEX));
+  __ movq(cache.reg(),
+          FieldOperand(cache.reg(), FixedArray::OffsetOfElementAt(cache_id)));
+
+  Result tmp = allocator()->Allocate();
+  ASSERT(tmp.is_valid());
+
+  DeferredSearchCache* deferred = new DeferredSearchCache(tmp.reg(),
+                                                          cache.reg(),
+                                                          key.reg());
+
+  const int kFingerOffset =
+      FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
+  // tmp.reg() now holds finger offset as a smi.
+  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  __ movq(tmp.reg(), FieldOperand(cache.reg(), kFingerOffset));
+  SmiIndex index =
+      masm()->SmiToIndex(kScratchRegister, tmp.reg(), kPointerSizeLog2);
+  __ cmpq(key.reg(), FieldOperand(cache.reg(),
+                                  index.reg,
+                                  index.scale,
+                                  FixedArray::kHeaderSize));
+  deferred->Branch(not_equal);
+
+  __ movq(tmp.reg(), FieldOperand(cache.reg(),
+                                  index.reg,
+                                  index.scale,
+                                  kPointerSize + FixedArray::kHeaderSize));
+
+  deferred->BindExit();
+  frame_->Push(&tmp);
+}
+
+
 void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
   ASSERT_EQ(args->length(), 1);
 
   // Load the argument on the stack and jump to the runtime.
   Load(args->at(0));
 
-  Result answer = frame_->CallRuntime(Runtime::kNumberToString, 1);
-  frame_->Push(&answer);
+  NumberToStringStub stub;
+  Result result = frame_->CallStub(&stub, 1);
+  frame_->Push(&result);
+}
+
+
+class DeferredSwapElements: public DeferredCode {
+ public:
+  DeferredSwapElements(Register object, Register index1, Register index2)
+      : object_(object), index1_(index1), index2_(index2) {
+    set_comment("[ DeferredSwapElements");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register object_, index1_, index2_;
+};
+
+
+void DeferredSwapElements::Generate() {
+  __ push(object_);
+  __ push(index1_);
+  __ push(index2_);
+  __ CallRuntime(Runtime::kSwapElements, 3);
+}
+
+
+void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
+  Comment cmnt(masm_, "[ GenerateSwapElements");
+
+  ASSERT_EQ(3, args->length());
+
+  Load(args->at(0));
+  Load(args->at(1));
+  Load(args->at(2));
+
+  Result index2 = frame_->Pop();
+  index2.ToRegister();
+
+  Result index1 = frame_->Pop();
+  index1.ToRegister();
+
+  Result object = frame_->Pop();
+  object.ToRegister();
+
+  Result tmp1 = allocator()->Allocate();
+  tmp1.ToRegister();
+  Result tmp2 = allocator()->Allocate();
+  tmp2.ToRegister();
+
+  frame_->Spill(object.reg());
+  frame_->Spill(index1.reg());
+  frame_->Spill(index2.reg());
+
+  DeferredSwapElements* deferred = new DeferredSwapElements(object.reg(),
+                                                            index1.reg(),
+                                                            index2.reg());
+
+  // Fetch the map and check if array is in fast case.
+  // Check that object doesn't require security checks and
+  // has no indexed interceptor.
+  __ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg());
+  deferred->Branch(below);
+  __ testb(FieldOperand(tmp1.reg(), Map::kBitFieldOffset),
+           Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
+  deferred->Branch(not_zero);
+
+  // Check the object's elements are in fast case.
+  __ movq(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
+  __ CompareRoot(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
+                 Heap::kFixedArrayMapRootIndex);
+  deferred->Branch(not_equal);
+
+  // Check that both indices are smis.
+  Condition both_smi = __ CheckBothSmi(index1.reg(), index2.reg());
+  deferred->Branch(NegateCondition(both_smi));
+
+  // Bring addresses into index1 and index2.
+  __ SmiToInteger32(index1.reg(), index1.reg());
+  __ lea(index1.reg(), FieldOperand(tmp1.reg(),
+                                    index1.reg(),
+                                    times_pointer_size,
+                                    FixedArray::kHeaderSize));
+  __ SmiToInteger32(index2.reg(), index2.reg());
+  __ lea(index2.reg(), FieldOperand(tmp1.reg(),
+                                    index2.reg(),
+                                    times_pointer_size,
+                                    FixedArray::kHeaderSize));
+
+  // Swap elements.
+  __ movq(object.reg(), Operand(index1.reg(), 0));
+  __ movq(tmp2.reg(), Operand(index2.reg(), 0));
+  __ movq(Operand(index2.reg(), 0), object.reg());
+  __ movq(Operand(index1.reg(), 0), tmp2.reg());
+
+  Label done;
+  __ InNewSpace(tmp1.reg(), tmp2.reg(), equal, &done);
+  // Possible optimization: do a check that both values are Smis
+  // (or them and test against Smi mask.)
+
+  __ movq(tmp2.reg(), tmp1.reg());
+  RecordWriteStub recordWrite1(tmp2.reg(), index1.reg(), object.reg());
+  __ CallStub(&recordWrite1);
+
+  RecordWriteStub recordWrite2(tmp1.reg(), index2.reg(), object.reg());
+  __ CallStub(&recordWrite2);
+
+  __ bind(&done);
+
+  deferred->BindExit();
+  frame_->Push(Factory::undefined_value());
+}
+
+
+void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
+  Comment cmnt(masm_, "[ GenerateCallFunction");
+
+  ASSERT(args->length() >= 2);
+
+  int n_args = args->length() - 2;  // for receiver and function.
+  Load(args->at(0));  // receiver
+  for (int i = 0; i < n_args; i++) {
+    Load(args->at(i + 1));
+  }
+  Load(args->at(n_args + 1));  // function
+  Result result = frame_->CallJSFunction(n_args);
+  frame_->Push(&result);
 }
 
 
 void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
   ASSERT_EQ(args->length(), 1);
-  // Load the argument on the stack and jump to the runtime.
   Load(args->at(0));
-  Result answer = frame_->CallRuntime(Runtime::kMath_sin, 1);
-  frame_->Push(&answer);
+  TranscendentalCacheStub stub(TranscendentalCache::SIN);
+  Result result = frame_->CallStub(&stub, 1);
+  frame_->Push(&result);
 }
 
 
 void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
   ASSERT_EQ(args->length(), 1);
-  // Load the argument on the stack and jump to the runtime.
   Load(args->at(0));
-  Result answer = frame_->CallRuntime(Runtime::kMath_cos, 1);
-  frame_->Push(&answer);
+  TranscendentalCacheStub stub(TranscendentalCache::COS);
+  Result result = frame_->CallStub(&stub, 1);
+  frame_->Push(&result);
 }
 
 
@@ -4314,7 +4948,7 @@
 
   if (value.is_number()) {
     Comment cmnt(masm_, "ONLY_NUMBER");
-    // Fast case if NumberInfo indicates only numbers.
+    // Fast case if TypeInfo indicates only numbers.
     if (FLAG_debug_code) {
       __ AbortIfNotNumber(value.reg(), "ToBoolean operand is not a number.");
     }
@@ -4937,6 +5571,31 @@
 }
 
 
+static bool CouldBeNaN(const Result& result) {
+  if (result.type_info().IsSmi()) return false;
+  if (result.type_info().IsInteger32()) return false;
+  if (!result.is_constant()) return true;
+  if (!result.handle()->IsHeapNumber()) return false;
+  return isnan(HeapNumber::cast(*result.handle())->value());
+}
+
+
+// Convert from signed to unsigned comparison to match the way EFLAGS are set
+// by FPU and XMM compare instructions.
+static Condition DoubleCondition(Condition cc) {
+  switch (cc) {
+    case less:          return below;
+    case equal:         return equal;
+    case less_equal:    return below_equal;
+    case greater:       return above;
+    case greater_equal: return above_equal;
+    default:            UNREACHABLE();
+  }
+  UNREACHABLE();
+  return equal;
+}
+
+
 void CodeGenerator::Comparison(AstNode* node,
                                Condition cc,
                                bool strict,
@@ -4958,14 +5617,28 @@
   ASSERT(cc == less || cc == equal || cc == greater_equal);
 
   // If either side is a constant smi, optimize the comparison.
-  bool left_side_constant_smi =
-      left_side.is_constant() && left_side.handle()->IsSmi();
-  bool right_side_constant_smi =
-      right_side.is_constant() && right_side.handle()->IsSmi();
-  bool left_side_constant_null =
-      left_side.is_constant() && left_side.handle()->IsNull();
-  bool right_side_constant_null =
-      right_side.is_constant() && right_side.handle()->IsNull();
+  bool left_side_constant_smi = false;
+  bool left_side_constant_null = false;
+  bool left_side_constant_1_char_string = false;
+  if (left_side.is_constant()) {
+    left_side_constant_smi = left_side.handle()->IsSmi();
+    left_side_constant_null = left_side.handle()->IsNull();
+    left_side_constant_1_char_string =
+        (left_side.handle()->IsString() &&
+         String::cast(*left_side.handle())->length() == 1 &&
+         String::cast(*left_side.handle())->IsAsciiRepresentation());
+  }
+  bool right_side_constant_smi = false;
+  bool right_side_constant_null = false;
+  bool right_side_constant_1_char_string = false;
+  if (right_side.is_constant()) {
+    right_side_constant_smi = right_side.handle()->IsSmi();
+    right_side_constant_null = right_side.handle()->IsNull();
+    right_side_constant_1_char_string =
+        (right_side.handle()->IsString() &&
+         String::cast(*right_side.handle())->length() == 1 &&
+         String::cast(*right_side.handle())->IsAsciiRepresentation());
+  }
 
   if (left_side_constant_smi || right_side_constant_smi) {
     if (left_side_constant_smi && right_side_constant_smi) {
@@ -4994,7 +5667,7 @@
         left_side = right_side;
         right_side = temp;
         cc = ReverseCondition(cc);
-        // This may reintroduce greater or less_equal as the value of cc.
+        // This may re-introduce greater or less_equal as the value of cc.
         // CompareStub and the inline code both support all values of cc.
       }
       // Implement comparison against a constant Smi, inlining the case
@@ -5013,9 +5686,9 @@
       Condition left_is_smi = masm_->CheckSmi(left_side.reg());
       is_smi.Branch(left_is_smi);
 
-      bool is_for_loop_compare = (node->AsCompareOperation() != NULL)
-          && node->AsCompareOperation()->is_for_loop_condition();
-      if (!is_for_loop_compare && right_val->IsSmi()) {
+      bool is_loop_condition = (node->AsExpression() != NULL) &&
+          node->AsExpression()->is_loop_condition();
+      if (!is_loop_condition && right_val->IsSmi()) {
         // Right side is a constant smi and left side has been checked
         // not to be a smi.
         JumpTarget not_number;
@@ -5037,22 +5710,13 @@
         // Jump to builtin for NaN.
         not_number.Branch(parity_even, &left_side);
         left_side.Unuse();
-        Condition double_cc = cc;
-        switch (cc) {
-          case less:          double_cc = below;       break;
-          case equal:         double_cc = equal;       break;
-          case less_equal:    double_cc = below_equal; break;
-          case greater:       double_cc = above;       break;
-          case greater_equal: double_cc = above_equal; break;
-          default: UNREACHABLE();
-        }
-        dest->true_target()->Branch(double_cc);
+        dest->true_target()->Branch(DoubleCondition(cc));
         dest->false_target()->Jump();
         not_number.Bind(&left_side);
       }
 
       // Setup and call the compare stub.
-      CompareStub stub(cc, strict);
+      CompareStub stub(cc, strict, kCantBothBeNaN);
       Result result = frame_->CallStub(&stub, &left_side, &right_side);
       result.ToRegister();
       __ testq(result.reg(), result.reg());
@@ -5105,20 +5769,189 @@
       operand.Unuse();
       dest->Split(not_zero);
     }
-  } else {  // Neither side is a constant Smi or null.
+  } else if (left_side_constant_1_char_string ||
+             right_side_constant_1_char_string) {
+    if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
+      // Trivial case, comparing two constants.
+      int left_value = String::cast(*left_side.handle())->Get(0);
+      int right_value = String::cast(*right_side.handle())->Get(0);
+      switch (cc) {
+        case less:
+          dest->Goto(left_value < right_value);
+          break;
+        case equal:
+          dest->Goto(left_value == right_value);
+          break;
+        case greater_equal:
+          dest->Goto(left_value >= right_value);
+          break;
+        default:
+          UNREACHABLE();
+      }
+    } else {
+      // Only one side is a constant 1 character string.
+      // If left side is a constant 1-character string, reverse the operands.
+      // Since one side is a constant string, conversion order does not matter.
+      if (left_side_constant_1_char_string) {
+        Result temp = left_side;
+        left_side = right_side;
+        right_side = temp;
+        cc = ReverseCondition(cc);
+        // This may reintroduce greater or less_equal as the value of cc.
+        // CompareStub and the inline code both support all values of cc.
+      }
+      // Implement comparison against a constant string, inlining the case
+      // where both sides are strings.
+      left_side.ToRegister();
+
+      // Here we split control flow to the stub call and inlined cases
+      // before finally splitting it to the control destination.  We use
+      // a jump target and branching to duplicate the virtual frame at
+      // the first split.  We manually handle the off-frame references
+      // by reconstituting them on the non-fall-through path.
+      JumpTarget is_not_string, is_string;
+      Register left_reg = left_side.reg();
+      Handle<Object> right_val = right_side.handle();
+      ASSERT(StringShape(String::cast(*right_val)).IsSymbol());
+      Condition is_smi = masm()->CheckSmi(left_reg);
+      is_not_string.Branch(is_smi, &left_side);
+      Result temp = allocator_->Allocate();
+      ASSERT(temp.is_valid());
+      __ movq(temp.reg(),
+              FieldOperand(left_reg, HeapObject::kMapOffset));
+      __ movzxbl(temp.reg(),
+                 FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
+      // If we are testing for equality then make use of the symbol shortcut.
+      // Check if the left hand side has the same type as the right hand
+      // side (which is always a symbol).
+      if (cc == equal) {
+        Label not_a_symbol;
+        ASSERT(kSymbolTag != 0);
+        // Ensure that no non-strings have the symbol bit set.
+        ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
+        __ testb(temp.reg(), Immediate(kIsSymbolMask));  // Test the symbol bit.
+        __ j(zero, &not_a_symbol);
+        // They are symbols, so do identity compare.
+        __ Cmp(left_reg, right_side.handle());
+        dest->true_target()->Branch(equal);
+        dest->false_target()->Branch(not_equal);
+        __ bind(&not_a_symbol);
+      }
+      // Call the compare stub if the left side is not a flat ascii string.
+      __ andb(temp.reg(),
+              Immediate(kIsNotStringMask |
+                        kStringRepresentationMask |
+                        kStringEncodingMask));
+      __ cmpb(temp.reg(),
+              Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
+      temp.Unuse();
+      is_string.Branch(equal, &left_side);
+
+      // Setup and call the compare stub.
+      is_not_string.Bind(&left_side);
+      CompareStub stub(cc, strict, kCantBothBeNaN);
+      Result result = frame_->CallStub(&stub, &left_side, &right_side);
+      result.ToRegister();
+      __ testq(result.reg(), result.reg());
+      result.Unuse();
+      dest->true_target()->Branch(cc);
+      dest->false_target()->Jump();
+
+      is_string.Bind(&left_side);
+      // left_side is a sequential ASCII string.
+      ASSERT(left_side.reg().is(left_reg));
+      right_side = Result(right_val);
+      Result temp2 = allocator_->Allocate();
+      ASSERT(temp2.is_valid());
+      // Test string equality and comparison.
+      if (cc == equal) {
+        Label comparison_done;
+        __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
+                Smi::FromInt(1));
+        __ j(not_equal, &comparison_done);
+        uint8_t char_value =
+            static_cast<uint8_t>(String::cast(*right_val)->Get(0));
+        __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
+                Immediate(char_value));
+        __ bind(&comparison_done);
+      } else {
+        __ movq(temp2.reg(),
+                FieldOperand(left_side.reg(), String::kLengthOffset));
+        __ SmiSubConstant(temp2.reg(), temp2.reg(), Smi::FromInt(1));
+        Label comparison;
+        // If the length is 0 then the subtraction gave -1 which compares less
+        // than any character.
+        __ j(negative, &comparison);
+        // Otherwise load the first character.
+        __ movzxbl(temp2.reg(),
+                   FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize));
+        __ bind(&comparison);
+        // Compare the first character of the string with the
+        // constant 1-character string.
+        uint8_t char_value =
+            static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0));
+        __ cmpb(temp2.reg(), Immediate(char_value));
+        Label characters_were_different;
+        __ j(not_equal, &characters_were_different);
+        // If the first character is the same then the long string sorts after
+        // the short one.
+        __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
+               Smi::FromInt(1));
+        __ bind(&characters_were_different);
+      }
+      temp2.Unuse();
+      left_side.Unuse();
+      right_side.Unuse();
+      dest->Split(cc);
+    }
+  } else {
+    // Neither side is a constant Smi, constant 1-char string, or constant null.
     // If either side is a non-smi constant, skip the smi check.
     bool known_non_smi =
         (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
-        (right_side.is_constant() && !right_side.handle()->IsSmi());
+        (right_side.is_constant() && !right_side.handle()->IsSmi()) ||
+        left_side.type_info().IsDouble() ||
+        right_side.type_info().IsDouble();
+
+    NaNInformation nan_info =
+        (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
+        kBothCouldBeNaN :
+        kCantBothBeNaN;
+
+    // Inline number comparison handling any combination of smi's and heap
+    // numbers if:
+    //   code is in a loop
+    //   the compare operation is different from equal
+    //   compare is not a for-loop comparison
+    // The reason for excluding equal is that it will most likely be done
+    // with smi's (not heap numbers) and the code to comparing smi's is inlined
+    // separately. The same reason applies for for-loop comparison which will
+    // also most likely be smi comparisons.
+    bool is_loop_condition = (node->AsExpression() != NULL)
+        && node->AsExpression()->is_loop_condition();
+    bool inline_number_compare =
+        loop_nesting() > 0 && cc != equal && !is_loop_condition;
+
     left_side.ToRegister();
     right_side.ToRegister();
 
     if (known_non_smi) {
-      // When non-smi, call out to the compare stub.
-      CompareStub stub(cc, strict);
+      // Inlined equality check:
+      // If at least one of the objects is not NaN, then if the objects
+      // are identical, they are equal.
+      if (nan_info == kCantBothBeNaN && cc == equal) {
+        __ cmpq(left_side.reg(), right_side.reg());
+        dest->true_target()->Branch(equal);
+      }
+
+      // Inlined number comparison:
+      if (inline_number_compare) {
+        GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
+      }
+
+      CompareStub stub(cc, strict, nan_info, !inline_number_compare);
       Result answer = frame_->CallStub(&stub, &left_side, &right_side);
-      // The result is a Smi, which is negative, zero, or positive.
-      __ SmiTest(answer.reg());  // Sets both zero and sign flag.
+      __ testq(answer.reg(), answer.reg());  // Sets both zero and sign flag.
       answer.Unuse();
       dest->Split(cc);
     } else {
@@ -5133,10 +5966,22 @@
 
       Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
       is_smi.Branch(both_smi);
-      // When non-smi, call out to the compare stub.
-      CompareStub stub(cc, strict);
+
+      // Inline the equality check if both operands can't be a NaN. If both
+      // objects are the same they are equal.
+      if (nan_info == kCantBothBeNaN && cc == equal) {
+        __ cmpq(left_side.reg(), right_side.reg());
+        dest->true_target()->Branch(equal);
+      }
+
+      // Inlined number comparison:
+      if (inline_number_compare) {
+        GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
+      }
+
+      CompareStub stub(cc, strict, nan_info, !inline_number_compare);
       Result answer = frame_->CallStub(&stub, &left_side, &right_side);
-      __ SmiTest(answer.reg());  // Sets both zero and sign flags.
+      __ testq(answer.reg(), answer.reg());  // Sets both zero and sign flags.
       answer.Unuse();
       dest->true_target()->Branch(cc);
       dest->false_target()->Jump();
@@ -5153,6 +5998,73 @@
 }
 
 
+// Load a comparison operand into into a XMM register. Jump to not_numbers jump
+// target passing the left and right result if the operand is not a number.
+static void LoadComparisonOperand(MacroAssembler* masm_,
+                                  Result* operand,
+                                  XMMRegister xmm_reg,
+                                  Result* left_side,
+                                  Result* right_side,
+                                  JumpTarget* not_numbers) {
+  Label done;
+  if (operand->type_info().IsDouble()) {
+    // Operand is known to be a heap number, just load it.
+    __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
+  } else if (operand->type_info().IsSmi()) {
+    // Operand is known to be a smi. Convert it to double and keep the original
+    // smi.
+    __ SmiToInteger32(kScratchRegister, operand->reg());
+    __ cvtlsi2sd(xmm_reg, kScratchRegister);
+  } else {
+    // Operand type not known, check for smi or heap number.
+    Label smi;
+    __ JumpIfSmi(operand->reg(), &smi);
+    if (!operand->type_info().IsNumber()) {
+      __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
+      __ cmpq(FieldOperand(operand->reg(), HeapObject::kMapOffset),
+              kScratchRegister);
+      not_numbers->Branch(not_equal, left_side, right_side, taken);
+    }
+    __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
+    __ jmp(&done);
+
+    __ bind(&smi);
+    // Comvert smi to float and keep the original smi.
+    __ SmiToInteger32(kScratchRegister, operand->reg());
+    __ cvtlsi2sd(xmm_reg, kScratchRegister);
+    __ jmp(&done);
+  }
+  __ bind(&done);
+}
+
+
+void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
+                                                   Result* right_side,
+                                                   Condition cc,
+                                                   ControlDestination* dest) {
+  ASSERT(left_side->is_register());
+  ASSERT(right_side->is_register());
+
+  JumpTarget not_numbers;
+  // Load left and right operand into registers xmm0 and xmm1 and compare.
+  LoadComparisonOperand(masm_, left_side, xmm0, left_side, right_side,
+                        &not_numbers);
+  LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side,
+                        &not_numbers);
+  __ comisd(xmm0, xmm1);
+  // Bail out if a NaN is involved.
+  not_numbers.Branch(parity_even, left_side, right_side);
+
+  // Split to destination targets based on comparison.
+  left_side->Unuse();
+  right_side->Unuse();
+  dest->true_target()->Branch(DoubleCondition(cc));
+  dest->false_target()->Jump();
+
+  not_numbers.Bind(left_side, right_side);
+}
+
+
 class DeferredInlineBinaryOperation: public DeferredCode {
  public:
   DeferredInlineBinaryOperation(Token::Value op,
@@ -5176,16 +6088,130 @@
 
 
 void DeferredInlineBinaryOperation::Generate() {
+  Label done;
+  if ((op_ == Token::ADD)
+      || (op_ ==Token::SUB)
+      || (op_ == Token::MUL)
+      || (op_ == Token::DIV)) {
+    Label call_runtime;
+    Label left_smi, right_smi, load_right, do_op;
+    __ JumpIfSmi(left_, &left_smi);
+    __ CompareRoot(FieldOperand(left_, HeapObject::kMapOffset),
+                   Heap::kHeapNumberMapRootIndex);
+    __ j(not_equal, &call_runtime);
+    __ movsd(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
+    if (mode_ == OVERWRITE_LEFT) {
+      __ movq(dst_, left_);
+    }
+    __ jmp(&load_right);
+
+    __ bind(&left_smi);
+    __ SmiToInteger32(left_, left_);
+    __ cvtlsi2sd(xmm0, left_);
+    __ Integer32ToSmi(left_, left_);
+    if (mode_ == OVERWRITE_LEFT) {
+      Label alloc_failure;
+      __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
+    }
+
+    __ bind(&load_right);
+    __ JumpIfSmi(right_, &right_smi);
+    __ CompareRoot(FieldOperand(right_, HeapObject::kMapOffset),
+                   Heap::kHeapNumberMapRootIndex);
+    __ j(not_equal, &call_runtime);
+    __ movsd(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
+    if (mode_ == OVERWRITE_RIGHT) {
+      __ movq(dst_, right_);
+    } else if (mode_ == NO_OVERWRITE) {
+      Label alloc_failure;
+      __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
+    }
+    __ jmp(&do_op);
+
+    __ bind(&right_smi);
+    __ SmiToInteger32(right_, right_);
+    __ cvtlsi2sd(xmm1, right_);
+    __ Integer32ToSmi(right_, right_);
+    if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
+      Label alloc_failure;
+      __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
+    }
+
+    __ bind(&do_op);
+    switch (op_) {
+      case Token::ADD: __ addsd(xmm0, xmm1); break;
+      case Token::SUB: __ subsd(xmm0, xmm1); break;
+      case Token::MUL: __ mulsd(xmm0, xmm1); break;
+      case Token::DIV: __ divsd(xmm0, xmm1); break;
+      default: UNREACHABLE();
+    }
+    __ movsd(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
+    __ jmp(&done);
+
+    __ bind(&call_runtime);
+  }
   GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
   stub.GenerateCall(masm_, left_, right_);
   if (!dst_.is(rax)) __ movq(dst_, rax);
+  __ bind(&done);
 }
 
 
-void CodeGenerator::GenericBinaryOperation(Token::Value op,
-                                           StaticType* type,
+static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
+                                  Token::Value op,
+                                  const Result& right,
+                                  const Result& left) {
+  // Set TypeInfo of result according to the operation performed.
+  // We rely on the fact that smis have a 32 bit payload on x64.
+  STATIC_ASSERT(kSmiValueSize == 32);
+  switch (op) {
+    case Token::COMMA:
+      return right.type_info();
+    case Token::OR:
+    case Token::AND:
+      // Result type can be either of the two input types.
+      return operands_type;
+    case Token::BIT_OR:
+    case Token::BIT_XOR:
+    case Token::BIT_AND:
+      // Result is always a smi.
+      return TypeInfo::Smi();
+    case Token::SAR:
+    case Token::SHL:
+      // Result is always a smi.
+      return TypeInfo::Smi();
+    case Token::SHR:
+      // Result of x >>> y is always a smi if masked y >= 1, otherwise a number.
+      return (right.is_constant() && right.handle()->IsSmi()
+                     && (Smi::cast(*right.handle())->value() & 0x1F) >= 1)
+          ? TypeInfo::Smi()
+          : TypeInfo::Number();
+    case Token::ADD:
+      if (operands_type.IsNumber()) {
+        return TypeInfo::Number();
+      } else if (left.type_info().IsString() || right.type_info().IsString()) {
+        return TypeInfo::String();
+      } else {
+        return TypeInfo::Unknown();
+      }
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+    case Token::MOD:
+      // Result is always a number.
+      return TypeInfo::Number();
+    default:
+      UNREACHABLE();
+  }
+  UNREACHABLE();
+  return TypeInfo::Unknown();
+}
+
+
+void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
                                            OverwriteMode overwrite_mode) {
   Comment cmnt(masm_, "[ BinaryOperation");
+  Token::Value op = expr->op();
   Comment cmnt_token(masm_, Token::String(op));
 
   if (op == Token::COMMA) {
@@ -5198,17 +6224,21 @@
   Result left = frame_->Pop();
 
   if (op == Token::ADD) {
-    bool left_is_string = left.is_constant() && left.handle()->IsString();
-    bool right_is_string = right.is_constant() && right.handle()->IsString();
+    const bool left_is_string = left.type_info().IsString();
+    const bool right_is_string = right.type_info().IsString();
+    // Make sure constant strings have string type info.
+    ASSERT(!(left.is_constant() && left.handle()->IsString()) ||
+           left_is_string);
+    ASSERT(!(right.is_constant() && right.handle()->IsString()) ||
+           right_is_string);
     if (left_is_string || right_is_string) {
       frame_->Push(&left);
       frame_->Push(&right);
       Result answer;
       if (left_is_string) {
         if (right_is_string) {
-          // TODO(lrn): if both are constant strings
-          // -- do a compile time cons, if allocation during codegen is allowed.
-          answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
+          StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+          answer = frame_->CallStub(&stub, 2);
         } else {
           answer =
             frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
@@ -5217,6 +6247,7 @@
         answer =
           frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
       }
+      answer.set_type_info(TypeInfo::String());
       frame_->Push(&answer);
       return;
     }
@@ -5237,30 +6268,36 @@
   }
 
   // Get number type of left and right sub-expressions.
-  NumberInfo::Type operands_type =
-      NumberInfo::Combine(left.number_info(), right.number_info());
+  TypeInfo operands_type =
+      TypeInfo::Combine(left.type_info(), right.type_info());
+
+  TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left);
 
   Result answer;
   if (left_is_non_smi_constant || right_is_non_smi_constant) {
+    // Go straight to the slow case, with no smi code.
     GenericBinaryOpStub stub(op,
                              overwrite_mode,
                              NO_SMI_CODE_IN_STUB,
                              operands_type);
     answer = stub.GenerateCall(masm_, frame_, &left, &right);
   } else if (right_is_smi_constant) {
-    answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
-                                        type, false, overwrite_mode);
+    answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
+                                        false, overwrite_mode);
   } else if (left_is_smi_constant) {
-    answer = ConstantSmiBinaryOperation(op, &right, left.handle(),
-                                        type, true, overwrite_mode);
+    answer = ConstantSmiBinaryOperation(expr, &right, left.handle(),
+                                        true, overwrite_mode);
   } else {
     // Set the flags based on the operation, type and loop nesting level.
     // Bit operations always assume they likely operate on Smis. Still only
     // generate the inline Smi check code if this operation is part of a loop.
     // For all other operations only inline the Smi check code for likely smis
     // if the operation is part of a loop.
-    if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
-      answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
+    if (loop_nesting() > 0 &&
+        (Token::IsBitOp(op) ||
+         operands_type.IsInteger32() ||
+         expr->type()->IsLikelySmi())) {
+      answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode);
     } else {
       GenericBinaryOpStub stub(op,
                                overwrite_mode,
@@ -5270,54 +6307,7 @@
     }
   }
 
-  // Set NumberInfo of result according to the operation performed.
-  // We rely on the fact that smis have a 32 bit payload on x64.
-  ASSERT(kSmiValueSize == 32);
-  NumberInfo::Type result_type = NumberInfo::kUnknown;
-  switch (op) {
-    case Token::COMMA:
-      result_type = right.number_info();
-      break;
-    case Token::OR:
-    case Token::AND:
-      // Result type can be either of the two input types.
-      result_type = operands_type;
-      break;
-    case Token::BIT_OR:
-    case Token::BIT_XOR:
-    case Token::BIT_AND:
-      // Result is always a smi.
-      result_type = NumberInfo::kSmi;
-      break;
-    case Token::SAR:
-    case Token::SHL:
-      // Result is always a smi.
-      result_type = NumberInfo::kSmi;
-      break;
-    case Token::SHR:
-      // Result of x >>> y is always a smi if y >= 1, otherwise a number.
-      result_type = (right.is_constant() && right.handle()->IsSmi()
-                     && Smi::cast(*right.handle())->value() >= 1)
-          ? NumberInfo::kSmi
-          : NumberInfo::kNumber;
-      break;
-    case Token::ADD:
-      // Result could be a string or a number. Check types of inputs.
-      result_type = NumberInfo::IsNumber(operands_type)
-          ? NumberInfo::kNumber
-          : NumberInfo::kUnknown;
-      break;
-    case Token::SUB:
-    case Token::MUL:
-    case Token::DIV:
-    case Token::MOD:
-      // Result is always a number.
-      result_type = NumberInfo::kNumber;
-      break;
-    default:
-      UNREACHABLE();
-  }
-  answer.set_number_info(result_type);
+  answer.set_type_info(result_type);
   frame_->Push(&answer);
 }
 
@@ -5399,26 +6389,32 @@
 }
 
 
-Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
+void DeferredInlineSmiOperationReversed::Generate() {
+  GenericBinaryOpStub stub(
+      op_,
+      overwrite_mode_,
+      NO_SMI_CODE_IN_STUB);
+  stub.GenerateCall(masm_, value_, src_);
+  if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+
+
+Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
                                                  Result* operand,
                                                  Handle<Object> value,
-                                                 StaticType* type,
                                                  bool reversed,
                                                  OverwriteMode overwrite_mode) {
   // NOTE: This is an attempt to inline (a bit) more of the code for
   // some possible smi operations (like + and -) when (at least) one
   // of the operands is a constant smi.
   // Consumes the argument "operand".
-
-  // TODO(199): Optimize some special cases of operations involving a
-  // smi literal (multiply by 2, shift by 0, etc.).
   if (IsUnsafeSmi(value)) {
     Result unsafe_operand(value);
     if (reversed) {
-      return LikelySmiBinaryOperation(op, &unsafe_operand, operand,
+      return LikelySmiBinaryOperation(expr, &unsafe_operand, operand,
                                overwrite_mode);
     } else {
-      return LikelySmiBinaryOperation(op, operand, &unsafe_operand,
+      return LikelySmiBinaryOperation(expr, operand, &unsafe_operand,
                                overwrite_mode);
     }
   }
@@ -5427,6 +6423,7 @@
   Smi* smi_value = Smi::cast(*value);
   int int_value = smi_value->value();
 
+  Token::Value op = expr->op();
   Result answer;
   switch (op) {
     case Token::ADD: {
@@ -5455,7 +6452,7 @@
     case Token::SUB: {
       if (reversed) {
         Result constant_operand(value);
-        answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
                                           overwrite_mode);
       } else {
         operand->ToRegister();
@@ -5478,7 +6475,7 @@
     case Token::SAR:
       if (reversed) {
         Result constant_operand(value);
-        answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
                                           overwrite_mode);
       } else {
         // Only the least significant 5 bits of the shift value are used.
@@ -5504,7 +6501,7 @@
     case Token::SHR:
       if (reversed) {
         Result constant_operand(value);
-        answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
                                           overwrite_mode);
       } else {
         // Only the least significant 5 bits of the shift value are used.
@@ -5531,9 +6528,45 @@
 
     case Token::SHL:
       if (reversed) {
-        Result constant_operand(value);
-        answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
-                                          overwrite_mode);
+        // Move operand into rcx and also into a second register.
+        // If operand is already in a register, take advantage of that.
+        // This lets us modify rcx, but still bail out to deferred code.
+        Result right;
+        Result right_copy_in_rcx;
+        TypeInfo right_type_info = operand->type_info();
+        operand->ToRegister();
+        if (operand->reg().is(rcx)) {
+          right = allocator()->Allocate();
+          __ movq(right.reg(), rcx);
+          frame_->Spill(rcx);
+          right_copy_in_rcx = *operand;
+        } else {
+          right_copy_in_rcx = allocator()->Allocate(rcx);
+          __ movq(rcx, operand->reg());
+          right = *operand;
+        }
+        operand->Unuse();
+
+        answer = allocator()->Allocate();
+        DeferredInlineSmiOperationReversed* deferred =
+            new DeferredInlineSmiOperationReversed(op,
+                                                   answer.reg(),
+                                                   smi_value,
+                                                   right.reg(),
+                                                   overwrite_mode);
+        __ movq(answer.reg(), Immediate(int_value));
+        __ SmiToInteger32(rcx, rcx);
+        if (!right_type_info.IsSmi()) {
+          Condition is_smi = masm_->CheckSmi(right.reg());
+          deferred->Branch(NegateCondition(is_smi));
+        } else if (FLAG_debug_code) {
+          __ AbortIfNotSmi(right.reg(),
+              "Static type info claims non-smi is smi in (const SHL smi).");
+        }
+        __ shl_cl(answer.reg());
+        __ Integer32ToSmi(answer.reg(), answer.reg());
+
+        deferred->BindExit();
       } else {
         // Only the least significant 5 bits of the shift value are used.
         // In the slow case, this masking is done inside the runtime call.
@@ -5639,10 +6672,10 @@
     default: {
       Result constant_operand(value);
       if (reversed) {
-        answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
                                           overwrite_mode);
       } else {
-        answer = LikelySmiBinaryOperation(op, operand, &constant_operand,
+        answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
                                           overwrite_mode);
       }
       break;
@@ -5652,10 +6685,11 @@
   return answer;
 }
 
-Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
+Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
                                                Result* left,
                                                Result* right,
                                                OverwriteMode overwrite_mode) {
+  Token::Value op = expr->op();
   Result answer;
   // Special handling of div and mod because they use fixed registers.
   if (op == Token::DIV || op == Token::MOD) {
@@ -6216,6 +7250,8 @@
 
         Result tmp = cgen_->allocator_->Allocate();
         ASSERT(tmp.is_valid());
+        Result tmp2 = cgen_->allocator_->Allocate();
+        ASSERT(tmp2.is_valid());
 
         // Determine whether the value is a constant before putting it
         // in a register.
@@ -6231,32 +7267,42 @@
                                                key.reg(),
                                                receiver.reg());
 
-        // Check that the value is a smi if it is not a constant.
-        // We can skip the write barrier for smis and constants.
-        if (!value_is_constant) {
-          __ JumpIfNotSmi(value.reg(), deferred->entry_label());
-        }
-
-        // Check that the key is a non-negative smi.
-        __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
-
         // Check that the receiver is not a smi.
         __ JumpIfSmi(receiver.reg(), deferred->entry_label());
 
+        // Check that the key is a smi.
+        if (!key.is_smi()) {
+          __ JumpIfNotSmi(key.reg(), deferred->entry_label());
+        } else if (FLAG_debug_code) {
+          __ AbortIfNotSmi(key.reg(), "Non-smi value in smi-typed value.");
+        }
+
         // Check that the receiver is a JSArray.
         __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
         deferred->Branch(not_equal);
 
         // Check that the key is within bounds.  Both the key and the
-        // length of the JSArray are smis.
+        // length of the JSArray are smis. Use unsigned comparison to handle
+        // negative keys.
         __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset),
                       key.reg());
-        deferred->Branch(less_equal);
+        deferred->Branch(below_equal);
 
         // Get the elements array from the receiver and check that it
         // is a flat array (not a dictionary).
         __ movq(tmp.reg(),
                 FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+
+        // Check whether it is possible to omit the write barrier. If the
+        // elements array is in new space or the value written is a smi we can
+        // safely update the elements array without updating the remembered set.
+        Label in_new_space;
+        __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
+        if (!value_is_constant) {
+          __ JumpIfNotSmi(value.reg(), deferred->entry_label());
+        }
+
+        __ bind(&in_new_space);
         // Bind the deferred code patch site to be able to locate the
         // fixed array map comparison.  When debugging, we patch this
         // comparison to always fail so that we will hit the IC call
@@ -6306,12 +7352,12 @@
 
 
 void FastNewClosureStub::Generate(MacroAssembler* masm) {
-  // Clone the boilerplate in new space. Set the context to the
-  // current context in rsi.
+  // Create a new closure from the given function info in new
+  // space. Set the context to the current context in rsi.
   Label gc;
   __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
 
-  // Get the boilerplate function from the stack.
+  // Get the function info from the stack.
   __ movq(rdx, Operand(rsp, 1 * kPointerSize));
 
   // Compute the function map in the current global context and set that
@@ -6321,18 +7367,16 @@
   __ movq(rcx, Operand(rcx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
   __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx);
 
-  // Clone the rest of the boilerplate fields. We don't have to update
-  // the write barrier because the allocated object is in new space.
-  for (int offset = kPointerSize;
-       offset < JSFunction::kSize;
-       offset += kPointerSize) {
-    if (offset == JSFunction::kContextOffset) {
-      __ movq(FieldOperand(rax, offset), rsi);
-    } else {
-      __ movq(rbx, FieldOperand(rdx, offset));
-      __ movq(FieldOperand(rax, offset), rbx);
-    }
-  }
+  // Initialize the rest of the function. We don't have to update the
+  // write barrier because the allocated object is in new space.
+  __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
+  __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
+  __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
+  __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
+  __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), rcx);
+  __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
+  __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
+  __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
 
   // Return and remove the on-stack parameter.
   __ ret(1 * kPointerSize);
@@ -6344,7 +7388,7 @@
   __ push(rsi);
   __ push(rdx);
   __ push(rcx);  // Restore return address.
-  __ TailCallRuntime(ExternalReference(Runtime::kNewClosure), 2, 1);
+  __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
 }
 
 
@@ -6386,7 +7430,7 @@
 
   // Need to collect. Call into runtime system.
   __ bind(&gc);
-  __ TailCallRuntime(ExternalReference(Runtime::kNewContext), 1, 1);
+  __ TailCallRuntime(Runtime::kNewContext, 1, 1);
 }
 
 
@@ -6442,8 +7486,7 @@
   __ ret(3 * kPointerSize);
 
   __ bind(&slow_case);
-  ExternalReference runtime(Runtime::kCreateArrayLiteralShallow);
-  __ TailCallRuntime(runtime, 3, 1);
+  __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
 }
 
 
@@ -6472,8 +7515,8 @@
   // String value => false iff empty.
   __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
   __ j(above_equal, &not_string);
-  __ movl(rdx, FieldOperand(rax, String::kLengthOffset));
-  __ testl(rdx, rdx);
+  __ movq(rdx, FieldOperand(rax, String::kLengthOffset));
+  __ SmiTest(rdx);
   __ j(zero, &false_result);
   __ jmp(&true_result);
 
@@ -6584,6 +7627,213 @@
 
 // End of CodeGenerator implementation.
 
+void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
+  // Input on stack:
+  // rsp[8]: argument (should be number).
+  // rsp[0]: return address.
+  Label runtime_call;
+  Label runtime_call_clear_stack;
+  Label input_not_smi;
+  Label loaded;
+  // Test that rax is a number.
+  __ movq(rax, Operand(rsp, kPointerSize));
+  __ JumpIfNotSmi(rax, &input_not_smi);
+  // Input is a smi. Untag and load it onto the FPU stack.
+  // Then load the bits of the double into rbx.
+  __ SmiToInteger32(rax, rax);
+  __ subq(rsp, Immediate(kPointerSize));
+  __ cvtlsi2sd(xmm1, rax);
+  __ movsd(Operand(rsp, 0), xmm1);
+  __ movq(rbx, xmm1);
+  __ movq(rdx, xmm1);
+  __ fld_d(Operand(rsp, 0));
+  __ addq(rsp, Immediate(kPointerSize));
+  __ jmp(&loaded);
+
+  __ bind(&input_not_smi);
+  // Check if input is a HeapNumber.
+  __ Move(rbx, Factory::heap_number_map());
+  __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+  __ j(not_equal, &runtime_call);
+  // Input is a HeapNumber. Push it on the FPU stack and load its
+  // bits into rbx.
+  __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
+  __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
+  __ movq(rdx, rbx);
+  __ bind(&loaded);
+  // ST[0] == double value
+  // rbx = bits of double value.
+  // rdx = also bits of double value.
+  // Compute hash (h is 32 bits, bits are 64):
+  //   h = h0 = bits ^ (bits >> 32);
+  //   h ^= h >> 16;
+  //   h ^= h >> 8;
+  //   h = h & (cacheSize - 1);
+  // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1)
+  __ sar(rdx, Immediate(32));
+  __ xorl(rdx, rbx);
+  __ movl(rcx, rdx);
+  __ movl(rax, rdx);
+  __ movl(rdi, rdx);
+  __ sarl(rdx, Immediate(8));
+  __ sarl(rcx, Immediate(16));
+  __ sarl(rax, Immediate(24));
+  __ xorl(rcx, rdx);
+  __ xorl(rax, rdi);
+  __ xorl(rcx, rax);
+  ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
+  __ andl(rcx, Immediate(TranscendentalCache::kCacheSize - 1));
+  // ST[0] == double value.
+  // rbx = bits of double value.
+  // rcx = TranscendentalCache::hash(double value).
+  __ movq(rax, ExternalReference::transcendental_cache_array_address());
+  // rax points to cache array.
+  __ movq(rax, Operand(rax, type_ * sizeof(TranscendentalCache::caches_[0])));
+  // rax points to the cache for the type type_.
+  // If NULL, the cache hasn't been initialized yet, so go through runtime.
+  __ testq(rax, rax);
+  __ j(zero, &runtime_call_clear_stack);
+#ifdef DEBUG
+  // Check that the layout of cache elements match expectations.
+  {  // NOLINT - doesn't like a single brace on a line.
+    TranscendentalCache::Element test_elem[2];
+    char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
+    char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
+    char* elem_in0  = reinterpret_cast<char*>(&(test_elem[0].in[0]));
+    char* elem_in1  = reinterpret_cast<char*>(&(test_elem[0].in[1]));
+    char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
+    // Two uint_32's and a pointer per element.
+    CHECK_EQ(16, static_cast<int>(elem2_start - elem_start));
+    CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
+    CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
+    CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
+  }
+#endif
+  // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16].
+  __ addl(rcx, rcx);
+  __ lea(rcx, Operand(rax, rcx, times_8, 0));
+  // Check if cache matches: Double value is stored in uint32_t[2] array.
+  Label cache_miss;
+  __ cmpq(rbx, Operand(rcx, 0));
+  __ j(not_equal, &cache_miss);
+  // Cache hit!
+  __ movq(rax, Operand(rcx, 2 * kIntSize));
+  __ fstp(0);  // Clear FPU stack.
+  __ ret(kPointerSize);
+
+  __ bind(&cache_miss);
+  // Update cache with new value.
+  Label nan_result;
+  GenerateOperation(masm, &nan_result);
+  __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
+  __ movq(Operand(rcx, 0), rbx);
+  __ movq(Operand(rcx, 2 * kIntSize), rax);
+  __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
+  __ ret(kPointerSize);
+
+  __ bind(&runtime_call_clear_stack);
+  __ fstp(0);
+  __ bind(&runtime_call);
+  __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
+
+  __ bind(&nan_result);
+  __ fstp(0);  // Remove argument from FPU stack.
+  __ LoadRoot(rax, Heap::kNanValueRootIndex);
+  __ movq(Operand(rcx, 0), rbx);
+  __ movq(Operand(rcx, 2 * kIntSize), rax);
+  __ ret(kPointerSize);
+}
+
+
+Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
+  switch (type_) {
+    // Add more cases when necessary.
+    case TranscendentalCache::SIN: return Runtime::kMath_sin;
+    case TranscendentalCache::COS: return Runtime::kMath_cos;
+    default:
+      UNIMPLEMENTED();
+      return Runtime::kAbort;
+  }
+}
+
+
+void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm,
+                                                Label* on_nan_result) {
+  // Registers:
+  // rbx: Bits of input double. Must be preserved.
+  // rcx: Pointer to cache entry. Must be preserved.
+  // st(0): Input double
+  Label done;
+  ASSERT(type_ == TranscendentalCache::SIN ||
+         type_ == TranscendentalCache::COS);
+  // More transcendental types can be added later.
+
+  // Both fsin and fcos require arguments in the range +/-2^63 and
+  // return NaN for infinities and NaN. They can share all code except
+  // the actual fsin/fcos operation.
+  Label in_range;
+  // If argument is outside the range -2^63..2^63, fsin/cos doesn't
+  // work. We must reduce it to the appropriate range.
+  __ movq(rdi, rbx);
+  // Move exponent and sign bits to low bits.
+  __ shr(rdi, Immediate(HeapNumber::kMantissaBits));
+  // Remove sign bit.
+  __ andl(rdi, Immediate((1 << HeapNumber::KExponentBits) - 1));
+  int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
+  __ cmpl(rdi, Immediate(supported_exponent_limit));
+  __ j(below, &in_range);
+  // Check for infinity and NaN. Both return NaN for sin.
+  __ cmpl(rdi, Immediate(0x7ff));
+  __ j(equal, on_nan_result);
+
+  // Use fpmod to restrict argument to the range +/-2*PI.
+  __ fldpi();
+  __ fadd(0);
+  __ fld(1);
+  // FPU Stack: input, 2*pi, input.
+  {
+    Label no_exceptions;
+    __ fwait();
+    __ fnstsw_ax();
+    // Clear if Illegal Operand or Zero Division exceptions are set.
+    __ testl(rax, Immediate(5));  // #IO and #ZD flags of FPU status word.
+    __ j(zero, &no_exceptions);
+    __ fnclex();
+    __ bind(&no_exceptions);
+  }
+
+  // Compute st(0) % st(1)
+  {
+    Label partial_remainder_loop;
+    __ bind(&partial_remainder_loop);
+    __ fprem1();
+    __ fwait();
+    __ fnstsw_ax();
+    __ testl(rax, Immediate(0x400));  // Check C2 bit of FPU status word.
+    // If C2 is set, computation only has partial result. Loop to
+    // continue computation.
+    __ j(not_zero, &partial_remainder_loop);
+  }
+  // FPU Stack: input, 2*pi, input % 2*pi
+  __ fstp(2);
+  // FPU Stack: input % 2*pi, 2*pi,
+  __ fstp(0);
+  // FPU Stack: input % 2*pi
+  __ bind(&in_range);
+  switch (type_) {
+    case TranscendentalCache::SIN:
+      __ fsin();
+      break;
+    case TranscendentalCache::COS:
+      __ fcos();
+      break;
+    default:
+      UNREACHABLE();
+  }
+  __ bind(&done);
+}
+
+
 // Get the integer part of a heap number.  Surprisingly, all this bit twiddling
 // is faster than using the built-in instructions on floating point registers.
 // Trashes rdi and rbx.  Dest is rcx.  Source cannot be rcx or one of the
@@ -6801,11 +8051,11 @@
   // Just jump directly to runtime if native RegExp is not selected at compile
   // time or if regexp entry in generated code is turned off runtime switch or
   // at compilation.
-#ifndef V8_NATIVE_REGEXP
-  __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
-#else  // V8_NATIVE_REGEXP
+#ifdef V8_INTERPRETED_REGEXP
+  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#else  // V8_INTERPRETED_REGEXP
   if (!FLAG_regexp_entry_native) {
-    __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
+    __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
     return;
   }
 
@@ -6873,17 +8123,17 @@
   Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
   __ j(NegateCondition(is_string), &runtime);
   // Get the length of the string to rbx.
-  __ movl(rbx, FieldOperand(rax, String::kLengthOffset));
+  __ movq(rbx, FieldOperand(rax, String::kLengthOffset));
 
-  // rbx: Length of subject string
+  // rbx: Length of subject string as smi
   // rcx: RegExp data (FixedArray)
   // rdx: Number of capture registers
   // Check that the third argument is a positive smi less than the string
-  // length. A negative value will be greater (usigned comparison).
+  // length. A negative value will be greater (unsigned comparison).
   __ movq(rax, Operand(rsp, kPreviousIndexOffset));
-  __ SmiToInteger32(rax, rax);
-  __ cmpl(rax, rbx);
-  __ j(above, &runtime);
+  __ JumpIfNotSmi(rax, &runtime);
+  __ SmiCompare(rax, rbx);
+  __ j(above_equal, &runtime);
 
   // rcx: RegExp data (FixedArray)
   // rdx: Number of capture registers
@@ -6925,9 +8175,8 @@
   // string. In that case the subject string is just the first part of the cons
   // string. Also in this case the first part of the cons string is known to be
   // a sequential string or an external string.
-  __ movl(rdx, rbx);
-  __ andb(rdx, Immediate(kStringRepresentationMask));
-  __ cmpb(rdx, Immediate(kConsStringTag));
+  __ andb(rbx, Immediate(kStringRepresentationMask));
+  __ cmpb(rbx, Immediate(kConsStringTag));
   __ j(not_equal, &runtime);
   __ movq(rdx, FieldOperand(rax, ConsString::kSecondOffset));
   __ Cmp(rdx, Factory::empty_string());
@@ -6946,7 +8195,8 @@
   // rcx: RegExp data (FixedArray)
   // Check that the irregexp code has been generated for an ascii string. If
   // it has, the field contains a code object otherwise it contains the hole.
-  __ cmpb(rbx, Immediate(kStringTag | kSeqStringTag | kTwoByteStringTag));
+  const int kSeqTwoByteString = kStringTag | kSeqStringTag | kTwoByteStringTag;
+  __ cmpb(rbx, Immediate(kSeqTwoByteString));
   __ j(equal, &seq_two_byte_string);
   if (FLAG_debug_code) {
     __ cmpb(rbx, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
@@ -7036,12 +8286,14 @@
   // Argument 3: Start of string data
   Label setup_two_byte, setup_rest;
   __ testb(rdi, rdi);
-  __ movl(rdi, FieldOperand(rax, String::kLengthOffset));
+  __ movq(rdi, FieldOperand(rax, String::kLengthOffset));
   __ j(zero, &setup_two_byte);
+  __ SmiToInteger32(rdi, rdi);
   __ lea(arg4, FieldOperand(rax, rdi, times_1, SeqAsciiString::kHeaderSize));
   __ lea(arg3, FieldOperand(rax, rbx, times_1, SeqAsciiString::kHeaderSize));
   __ jmp(&setup_rest);
   __ bind(&setup_two_byte);
+  __ SmiToInteger32(rdi, rdi);
   __ lea(arg4, FieldOperand(rax, rdi, times_2, SeqTwoByteString::kHeaderSize));
   __ lea(arg3, FieldOperand(rax, rbx, times_2, SeqTwoByteString::kHeaderSize));
 
@@ -7072,7 +8324,7 @@
   // Result must now be exception. If there is no pending exception already a
   // stack overflow (on the backtrack stack) was detected in RegExp code but
   // haven't created the exception yet. Handle that in the runtime system.
-  // TODO(592) Rerunning the RegExp to get the stack overflow exception.
+  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
   ExternalReference pending_exception_address(Top::k_pending_exception_address);
   __ movq(kScratchRegister, pending_exception_address);
   __ Cmp(kScratchRegister, Factory::the_hole_value());
@@ -7119,7 +8371,6 @@
   // rcx: offsets vector
   // rdx: number of capture registers
   Label next_capture, done;
-  __ movq(rax, Operand(rsp, kPreviousIndexOffset));
   // Capture register counter starts from number of capture registers and
   // counts down until wraping after zero.
   __ bind(&next_capture);
@@ -7128,12 +8379,6 @@
   // Read the value from the static offsets vector buffer and make it a smi.
   __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
   __ Integer32ToSmi(rdi, rdi, &runtime);
-  // Add previous index (from its stack slot) if value is not negative.
-  Label capture_negative;
-  // Negative flag set by smi convertion above.
-  __ j(negative, &capture_negative);
-  __ SmiAdd(rdi, rdi, rax, &runtime);  // Add previous index.
-  __ bind(&capture_negative);
   // Store the smi value in the last match info.
   __ movq(FieldOperand(rbx,
                        rdx,
@@ -7149,65 +8394,208 @@
 
   // Do the runtime call to execute the regexp.
   __ bind(&runtime);
-  __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
-#endif  // V8_NATIVE_REGEXP
+  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+#endif  // V8_INTERPRETED_REGEXP
+}
+
+
+void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
+                                                        Register hash,
+                                                        Register mask) {
+  __ and_(hash, mask);
+  // Each entry in string cache consists of two pointer sized fields,
+  // but times_twice_pointer_size (multiplication by 16) scale factor
+  // is not supported by addrmode on x64 platform.
+  // So we have to premultiply entry index before lookup.
+  __ shl(hash, Immediate(kPointerSizeLog2 + 1));
+}
+
+
+void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
+                                                         Register object,
+                                                         Register result,
+                                                         Register scratch1,
+                                                         Register scratch2,
+                                                         bool object_is_smi,
+                                                         Label* not_found) {
+  // Use of registers. Register result is used as a temporary.
+  Register number_string_cache = result;
+  Register mask = scratch1;
+  Register scratch = scratch2;
+
+  // Load the number string cache.
+  __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+  // Make the hash mask from the length of the number string cache. It
+  // contains two elements (number and string) for each cache entry.
+  __ movl(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+  __ shrl(mask, Immediate(1));  // Divide length by two (length is not a smi).
+  __ subl(mask, Immediate(1));  // Make mask.
+
+  // Calculate the entry in the number string cache. The hash value in the
+  // number string cache for smis is just the smi value, and the hash for
+  // doubles is the xor of the upper and lower words. See
+  // Heap::GetNumberStringCache.
+  Label is_smi;
+  Label load_result_from_cache;
+  if (!object_is_smi) {
+    __ JumpIfSmi(object, &is_smi);
+    __ CheckMap(object, Factory::heap_number_map(), not_found, true);
+
+    ASSERT_EQ(8, kDoubleSize);
+    __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
+    __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+    GenerateConvertHashCodeToIndex(masm, scratch, mask);
+
+    Register index = scratch;
+    Register probe = mask;
+    __ movq(probe,
+            FieldOperand(number_string_cache,
+                         index,
+                         times_1,
+                         FixedArray::kHeaderSize));
+    __ JumpIfSmi(probe, not_found);
+    ASSERT(CpuFeatures::IsSupported(SSE2));
+    CpuFeatures::Scope fscope(SSE2);
+    __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+    __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
+    __ comisd(xmm0, xmm1);
+    __ j(parity_even, not_found);  // Bail out if NaN is involved.
+    __ j(not_equal, not_found);  // The cache did not contain this value.
+    __ jmp(&load_result_from_cache);
+  }
+
+  __ bind(&is_smi);
+  __ movq(scratch, object);
+  __ SmiToInteger32(scratch, scratch);
+  GenerateConvertHashCodeToIndex(masm, scratch, mask);
+
+  Register index = scratch;
+  // Check if the entry is the smi we are looking for.
+  __ cmpq(object,
+          FieldOperand(number_string_cache,
+                       index,
+                       times_1,
+                       FixedArray::kHeaderSize));
+  __ j(not_equal, not_found);
+
+  // Get the result from the cache.
+  __ bind(&load_result_from_cache);
+  __ movq(result,
+          FieldOperand(number_string_cache,
+                       index,
+                       times_1,
+                       FixedArray::kHeaderSize + kPointerSize));
+  __ IncrementCounter(&Counters::number_to_string_native, 1);
+}
+
+
+void NumberToStringStub::Generate(MacroAssembler* masm) {
+  Label runtime;
+
+  __ movq(rbx, Operand(rsp, kPointerSize));
+
+  // Generate code to lookup number in the number string cache.
+  GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime);
+  __ ret(1 * kPointerSize);
+
+  __ bind(&runtime);
+  // Handle number to string in the runtime system if not found in the cache.
+  __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
+}
+
+
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+  masm->RecordWriteHelper(object_, addr_, scratch_);
+  masm->ret(0);
+}
+
+
+static int NegativeComparisonResult(Condition cc) {
+  ASSERT(cc != equal);
+  ASSERT((cc == less) || (cc == less_equal)
+      || (cc == greater) || (cc == greater_equal));
+  return (cc == greater || cc == greater_equal) ? LESS : GREATER;
 }
 
 
 void CompareStub::Generate(MacroAssembler* masm) {
   Label call_builtin, done;
-
+  // The compare stub returns a positive, negative, or zero 64-bit integer
+  // value in rax, corresponding to result of comparing the two inputs.
   // NOTICE! This code is only reached after a smi-fast-case check, so
   // it is certain that at least one operand isn't a smi.
 
-  if (cc_ == equal) {  // Both strict and non-strict.
-    Label slow;  // Fallthrough label.
-    // Equality is almost reflexive (everything but NaN), so start by testing
-    // for "identity and not NaN".
-    {
-      Label not_identical;
-      __ cmpq(rax, rdx);
-      __ j(not_equal, &not_identical);
-      // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
-      // so we do the second best thing - test it ourselves.
+  // Two identical objects are equal unless they are both NaN or undefined.
+  {
+    Label not_identical;
+    __ cmpq(rax, rdx);
+    __ j(not_equal, &not_identical);
 
-      if (never_nan_nan_) {
-        __ xor_(rax, rax);
-        __ ret(0);
-      } else {
-        Label return_equal;
-        Label heap_number;
-        // If it's not a heap number, then return equal.
-        __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
-               Factory::heap_number_map());
-        __ j(equal, &heap_number);
-        __ bind(&return_equal);
-        __ xor_(rax, rax);
-        __ ret(0);
+    if (cc_ != equal) {
+      // Check for undefined.  undefined OP undefined is false even though
+      // undefined == undefined.
+      Label check_for_nan;
+      __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
+      __ j(not_equal, &check_for_nan);
+      __ Set(rax, NegativeComparisonResult(cc_));
+      __ ret(0);
+      __ bind(&check_for_nan);
+    }
 
-        __ bind(&heap_number);
-        // It is a heap number, so return non-equal if it's NaN and equal if
-        // it's not NaN.
-        // The representation of NaN values has all exponent bits (52..62) set,
-        // and not all mantissa bits (0..51) clear.
-        // We only allow QNaNs, which have bit 51 set (which also rules out
-        // the value being Infinity).
+    // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+    // so we do the second best thing - test it ourselves.
+    // Note: if cc_ != equal, never_nan_nan_ is not used.
+    if (never_nan_nan_ && (cc_ == equal)) {
+      __ Set(rax, EQUAL);
+      __ ret(0);
+    } else {
+      Label return_equal;
+      Label heap_number;
+      // If it's not a heap number, then return equal.
+      __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
+             Factory::heap_number_map());
+      __ j(equal, &heap_number);
+      __ bind(&return_equal);
+      __ Set(rax, EQUAL);
+      __ ret(0);
 
-        // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
-        // all bits in the mask are set. We only need to check the word
-        // that contains the exponent and high bit of the mantissa.
-        ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
-        __ movl(rdx, FieldOperand(rdx, HeapNumber::kExponentOffset));
-        __ xorl(rax, rax);
-        __ addl(rdx, rdx);  // Shift value and mask so mask applies to top bits.
-        __ cmpl(rdx, Immediate(kQuietNaNHighBitsMask << 1));
+      __ bind(&heap_number);
+      // It is a heap number, so return non-equal if it's NaN and equal if
+      // it's not NaN.
+      // The representation of NaN values has all exponent bits (52..62) set,
+      // and not all mantissa bits (0..51) clear.
+      // We only allow QNaNs, which have bit 51 set (which also rules out
+      // the value being Infinity).
+
+      // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
+      // all bits in the mask are set. We only need to check the word
+      // that contains the exponent and high bit of the mantissa.
+      ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
+      __ movl(rdx, FieldOperand(rdx, HeapNumber::kExponentOffset));
+      __ xorl(rax, rax);
+      __ addl(rdx, rdx);  // Shift value and mask so mask applies to top bits.
+      __ cmpl(rdx, Immediate(kQuietNaNHighBitsMask << 1));
+      if (cc_ == equal) {
         __ setcc(above_equal, rax);
         __ ret(0);
+      } else {
+        Label nan;
+        __ j(above_equal, &nan);
+        __ Set(rax, EQUAL);
+        __ ret(0);
+        __ bind(&nan);
+        __ Set(rax, NegativeComparisonResult(cc_));
+        __ ret(0);
       }
-
-      __ bind(&not_identical);
     }
 
+    __ bind(&not_identical);
+  }
+
+  if (cc_ == equal) {  // Both strict and non-strict.
+    Label slow;  // Fallthrough label.
+
     // If we're doing a strict equality comparison, we don't have to do
     // type conversion, so we generate code to do fast comparison for objects
     // and oddballs. Non-smi numbers and strings still go through the usual
@@ -7269,36 +8657,43 @@
   __ push(rdx);
   __ push(rcx);
 
-  // Inlined floating point compare.
-  // Call builtin if operands are not floating point or smi.
-  Label check_for_symbols;
-  // Push arguments on stack, for helper functions.
-  FloatingPointHelper::CheckNumberOperands(masm, &check_for_symbols);
-  FloatingPointHelper::LoadFloatOperands(masm, rax, rdx);
-  __ FCmp();
+  // Generate the number comparison code.
+  if (include_number_compare_) {
+    Label non_number_comparison;
+    Label unordered;
+    FloatingPointHelper::LoadFloatOperand(masm, rdx, xmm0,
+                                          &non_number_comparison);
+    FloatingPointHelper::LoadFloatOperand(masm, rax, xmm1,
+                                          &non_number_comparison);
 
-  // Jump to builtin for NaN.
-  __ j(parity_even, &call_builtin);
+    __ comisd(xmm0, xmm1);
 
-  // TODO(1243847): Use cmov below once CpuFeatures are properly hooked up.
-  Label below_lbl, above_lbl;
-  // use rdx, rax to convert unsigned to signed comparison
-  __ j(below, &below_lbl);
-  __ j(above, &above_lbl);
+    // Don't base result on EFLAGS when a NaN is involved.
+    __ j(parity_even, &unordered);
+    // Return a result of -1, 0, or 1, based on EFLAGS.
+    __ movq(rax, Immediate(0));  // equal
+    __ movq(rcx, Immediate(1));
+    __ cmovq(above, rax, rcx);
+    __ movq(rcx, Immediate(-1));
+    __ cmovq(below, rax, rcx);
+    __ ret(2 * kPointerSize);  // rax, rdx were pushed
 
-  __ xor_(rax, rax);  // equal
-  __ ret(2 * kPointerSize);
+    // If one of the numbers was NaN, then the result is always false.
+    // The cc is never not-equal.
+    __ bind(&unordered);
+    ASSERT(cc_ != not_equal);
+    if (cc_ == less || cc_ == less_equal) {
+      __ Set(rax, 1);
+    } else {
+      __ Set(rax, -1);
+    }
+    __ ret(2 * kPointerSize);  // rax, rdx were pushed
 
-  __ bind(&below_lbl);
-  __ movq(rax, Immediate(-1));
-  __ ret(2 * kPointerSize);
-
-  __ bind(&above_lbl);
-  __ movq(rax, Immediate(1));
-  __ ret(2 * kPointerSize);  // rax, rdx were pushed
+    // The number comparison code did not provide a valid result.
+    __ bind(&non_number_comparison);
+  }
 
   // Fast negative check for symbol-to-symbol equality.
-  __ bind(&check_for_symbols);
   Label check_for_strings;
   if (cc_ == equal) {
     BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
@@ -7341,14 +8736,7 @@
     builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
   } else {
     builtin = Builtins::COMPARE;
-    int ncr;  // NaN compare result
-    if (cc_ == less || cc_ == less_equal) {
-      ncr = GREATER;
-    } else {
-      ASSERT(cc_ == greater || cc_ == greater_equal);  // remaining cases
-      ncr = LESS;
-    }
-    __ Push(Smi::FromInt(ncr));
+    __ Push(Smi::FromInt(NegativeComparisonResult(cc_)));
   }
 
   // Restore return address on the stack.
@@ -7560,7 +8948,7 @@
 
   // Do the runtime call to allocate the arguments object.
   __ bind(&runtime);
-  __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
+  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
 }
 
 
@@ -7617,26 +9005,7 @@
   __ pop(rbx);  // Return address.
   __ push(rdx);
   __ push(rbx);
-  Runtime::Function* f =
-      Runtime::FunctionForId(Runtime::kGetArgumentsProperty);
-  __ TailCallRuntime(ExternalReference(f), 1, f->result_size);
-}
-
-
-void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
-  // Check if the calling frame is an arguments adaptor frame.
-  Label adaptor;
-  __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
-                Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-
-  // Arguments adaptor case: Read the arguments length from the
-  // adaptor frame and return it.
-  // Otherwise nothing to do: The number of formal parameters has already been
-  // passed in register eax by calling function. Just return it.
-  __ cmovq(equal, rax,
-           Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ ret(0);
+  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
 }
 
 
@@ -7674,7 +9043,8 @@
                               Label* throw_termination_exception,
                               Label* throw_out_of_memory_exception,
                               bool do_gc,
-                              bool always_allocate_scope) {
+                              bool always_allocate_scope,
+                              int /* alignment_skew */) {
   // rax: result parameter for PerformGC, if any.
   // rbx: pointer to C function  (C callee-saved).
   // rbp: frame pointer  (restored after C call).
@@ -7688,11 +9058,19 @@
   // Complex results must be written to address passed as first argument.
   // AMD64 calling convention: a struct of two pointers in rax+rdx
 
+  // Check stack alignment.
+  if (FLAG_debug_code) {
+    __ CheckStackAlignment();
+  }
+
   if (do_gc) {
-    // Pass failure code returned from last attempt as first argument to GC.
+    // Pass failure code returned from last attempt as first argument to
+    // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
+    // stack is known to be aligned. This function takes one argument which is
+    // passed in register.
 #ifdef _WIN64
     __ movq(rcx, rax);
-#else  // ! defined(_WIN64)
+#else  // _WIN64
     __ movq(rdi, rax);
 #endif
     __ movq(kScratchRegister,
@@ -7726,7 +9104,7 @@
     __ lea(rdx, Operand(rsp, 4 * kPointerSize));
   }
 
-#else  // ! defined(_WIN64)
+#else  // _WIN64
   // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
   __ movq(rdi, r14);  // argc.
   __ movq(rsi, r15);  // argv.
@@ -8108,8 +9486,7 @@
   __ push(rax);
 
   // Do tail-call to runtime routine.
-  Runtime::Function* f = Runtime::FunctionForId(Runtime::kStackGuard);
-  __ TailCallRuntime(ExternalReference(f), 1, f->result_size);
+  __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
 }
 
 
@@ -8148,6 +9525,27 @@
 }
 
 
+void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
+                                           Register src,
+                                           XMMRegister dst,
+                                           Label* not_number) {
+  Label load_smi, done;
+  ASSERT(!src.is(kScratchRegister));
+  __ JumpIfSmi(src, &load_smi);
+  __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
+  __ cmpq(FieldOperand(src, HeapObject::kMapOffset), kScratchRegister);
+  __ j(not_equal, not_number);
+  __ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset));
+  __ jmp(&done);
+
+  __ bind(&load_smi);
+  __ SmiToInteger32(kScratchRegister, src);
+  __ cvtlsi2sd(dst, kScratchRegister);
+
+  __ bind(&done);
+}
+
+
 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
                                             XMMRegister dst1,
                                             XMMRegister dst2) {
@@ -8286,14 +9684,15 @@
   }
 
   OS::SNPrintF(Vector<char>(name_, len),
-               "GenericBinaryOpStub_%s_%s%s_%s%s_%s%s",
+               "GenericBinaryOpStub_%s_%s%s_%s%s_%s%s_%s",
                op_name,
                overwrite_name,
                (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
                args_in_registers_ ? "RegArgs" : "StackArgs",
                args_reversed_ ? "_R" : "",
                use_sse3_ ? "SSE3" : "SSE2",
-               NumberInfo::ToString(operands_type_));
+               static_operands_type_.ToString(),
+               BinaryOpIC::GetName(runtime_operands_type_));
   return name_;
 }
 
@@ -8443,8 +9842,8 @@
 
 
 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
-  // 1. Move arguments into edx, eax except for DIV and MOD, which need the
-  // dividend in eax and edx free for the division.  Use eax, ebx for those.
+  // 1. Move arguments into rdx, rax except for DIV and MOD, which need the
+  // dividend in rax and rdx free for the division.  Use rax, rbx for those.
   Comment load_comment(masm, "-- Load arguments");
   Register left = rdx;
   Register right = rax;
@@ -8543,7 +9942,7 @@
       break;
   }
 
-  // 4. Emit return of result in eax.
+  // 4. Emit return of result in rax.
   GenerateReturn(masm);
 
   // 5. For some operations emit inline code to perform floating point
@@ -8604,199 +10003,240 @@
 
 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
   Label call_runtime;
-  if (HasSmiCodeInStub()) {
+
+  if (ShouldGenerateSmiCode()) {
     GenerateSmiCode(masm, &call_runtime);
   } else if (op_ != Token::MOD) {
-    GenerateLoadArguments(masm);
+    if (!HasArgsInRegisters()) {
+      GenerateLoadArguments(masm);
+    }
   }
   // Floating point case.
-  switch (op_) {
-    case Token::ADD:
-    case Token::SUB:
-    case Token::MUL:
-    case Token::DIV: {
-      // rax: y
-      // rdx: x
-      if (NumberInfo::IsNumber(operands_type_)) {
-        if (FLAG_debug_code) {
-          // Assert at runtime that inputs are only numbers.
-          __ AbortIfNotNumber(rdx, "GenericBinaryOpStub operand not a number.");
-          __ AbortIfNotNumber(rax, "GenericBinaryOpStub operand not a number.");
+  if (ShouldGenerateFPCode()) {
+    switch (op_) {
+      case Token::ADD:
+      case Token::SUB:
+      case Token::MUL:
+      case Token::DIV: {
+        if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
+            HasSmiCodeInStub()) {
+          // Execution reaches this point when the first non-smi argument occurs
+          // (and only if smi code is generated). This is the right moment to
+          // patch to HEAP_NUMBERS state. The transition is attempted only for
+          // the four basic operations. The stub stays in the DEFAULT state
+          // forever for all other operations (also if smi code is skipped).
+          GenerateTypeTransition(masm);
         }
+
+        Label not_floats;
+        // rax: y
+        // rdx: x
+      if (static_operands_type_.IsNumber() && FLAG_debug_code) {
+        // Assert at runtime that inputs are only numbers.
+        __ AbortIfNotNumber(rdx, "GenericBinaryOpStub operand not a number.");
+        __ AbortIfNotNumber(rax, "GenericBinaryOpStub operand not a number.");
       } else {
         FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
       }
-      // Fast-case: Both operands are numbers.
-      // xmm4 and xmm5 are volatile XMM registers.
-      FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
+        // Fast-case: Both operands are numbers.
+        // xmm4 and xmm5 are volatile XMM registers.
+        FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
 
-      switch (op_) {
-        case Token::ADD: __ addsd(xmm4, xmm5); break;
-        case Token::SUB: __ subsd(xmm4, xmm5); break;
-        case Token::MUL: __ mulsd(xmm4, xmm5); break;
-        case Token::DIV: __ divsd(xmm4, xmm5); break;
-        default: UNREACHABLE();
-      }
-      // Allocate a heap number, if needed.
-      Label skip_allocation;
-      OverwriteMode mode = mode_;
-      if (HasArgsReversed()) {
-        if (mode == OVERWRITE_RIGHT) {
-          mode = OVERWRITE_LEFT;
-        } else if (mode == OVERWRITE_LEFT) {
-          mode = OVERWRITE_RIGHT;
+        switch (op_) {
+          case Token::ADD: __ addsd(xmm4, xmm5); break;
+          case Token::SUB: __ subsd(xmm4, xmm5); break;
+          case Token::MUL: __ mulsd(xmm4, xmm5); break;
+          case Token::DIV: __ divsd(xmm4, xmm5); break;
+          default: UNREACHABLE();
         }
-      }
-      switch (mode) {
-        case OVERWRITE_LEFT:
-          __ JumpIfNotSmi(rdx, &skip_allocation);
-          __ AllocateHeapNumber(rbx, rcx, &call_runtime);
-          __ movq(rdx, rbx);
-          __ bind(&skip_allocation);
-          __ movq(rax, rdx);
-          break;
-        case OVERWRITE_RIGHT:
-          // If the argument in rax is already an object, we skip the
-          // allocation of a heap number.
-          __ JumpIfNotSmi(rax, &skip_allocation);
-          // Fall through!
-        case NO_OVERWRITE:
-          // Allocate a heap number for the result. Keep rax and rdx intact
-          // for the possible runtime call.
-          __ AllocateHeapNumber(rbx, rcx, &call_runtime);
-          __ movq(rax, rbx);
-          __ bind(&skip_allocation);
-          break;
-        default: UNREACHABLE();
-      }
-      __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
-      GenerateReturn(masm);
-    }
-    case Token::MOD: {
-      // For MOD we go directly to runtime in the non-smi case.
-      break;
-    }
-    case Token::BIT_OR:
-    case Token::BIT_AND:
-    case Token::BIT_XOR:
-    case Token::SAR:
-    case Token::SHL:
-    case Token::SHR: {
-      Label skip_allocation, non_smi_result;
-      FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
-      switch (op_) {
-        case Token::BIT_OR:  __ orl(rax, rcx); break;
-        case Token::BIT_AND: __ andl(rax, rcx); break;
-        case Token::BIT_XOR: __ xorl(rax, rcx); break;
-        case Token::SAR: __ sarl_cl(rax); break;
-        case Token::SHL: __ shll_cl(rax); break;
-        case Token::SHR: __ shrl_cl(rax); break;
-        default: UNREACHABLE();
-      }
-      if (op_ == Token::SHR) {
-        // Check if result is non-negative. This can only happen for a shift
-        // by zero, which also doesn't update the sign flag.
-        __ testl(rax, rax);
-        __ j(negative, &non_smi_result);
-      }
-      __ JumpIfNotValidSmiValue(rax, &non_smi_result);
-      // Tag smi result, if possible, and return.
-      __ Integer32ToSmi(rax, rax);
-      GenerateReturn(masm);
-
-      // All ops except SHR return a signed int32 that we load in a HeapNumber.
-      if (op_ != Token::SHR && non_smi_result.is_linked()) {
-        __ bind(&non_smi_result);
-        // Allocate a heap number if needed.
-        __ movsxlq(rbx, rax);  // rbx: sign extended 32-bit result
-        switch (mode_) {
+        // Allocate a heap number, if needed.
+        Label skip_allocation;
+        OverwriteMode mode = mode_;
+        if (HasArgsReversed()) {
+          if (mode == OVERWRITE_RIGHT) {
+            mode = OVERWRITE_LEFT;
+          } else if (mode == OVERWRITE_LEFT) {
+            mode = OVERWRITE_RIGHT;
+          }
+        }
+        switch (mode) {
           case OVERWRITE_LEFT:
+            __ JumpIfNotSmi(rdx, &skip_allocation);
+            __ AllocateHeapNumber(rbx, rcx, &call_runtime);
+            __ movq(rdx, rbx);
+            __ bind(&skip_allocation);
+            __ movq(rax, rdx);
+            break;
           case OVERWRITE_RIGHT:
-            // If the operand was an object, we skip the
+            // If the argument in rax is already an object, we skip the
             // allocation of a heap number.
-            __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
-                                 1 * kPointerSize : 2 * kPointerSize));
             __ JumpIfNotSmi(rax, &skip_allocation);
             // Fall through!
           case NO_OVERWRITE:
-            __ AllocateHeapNumber(rax, rcx, &call_runtime);
+            // Allocate a heap number for the result. Keep rax and rdx intact
+            // for the possible runtime call.
+            __ AllocateHeapNumber(rbx, rcx, &call_runtime);
+            __ movq(rax, rbx);
             __ bind(&skip_allocation);
             break;
           default: UNREACHABLE();
         }
-        // Store the result in the HeapNumber and return.
-        __ movq(Operand(rsp, 1 * kPointerSize), rbx);
-        __ fild_s(Operand(rsp, 1 * kPointerSize));
-        __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
+        __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
         GenerateReturn(masm);
+        __ bind(&not_floats);
+        if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
+            !HasSmiCodeInStub()) {
+            // Execution reaches this point when the first non-number argument
+            // occurs (and only if smi code is skipped from the stub, otherwise
+            // the patching has already been done earlier in this case branch).
+            // A perfect moment to try patching to STRINGS for ADD operation.
+            if (op_ == Token::ADD) {
+              GenerateTypeTransition(masm);
+            }
+        }
+        break;
       }
+      case Token::MOD: {
+        // For MOD we go directly to runtime in the non-smi case.
+        break;
+      }
+      case Token::BIT_OR:
+      case Token::BIT_AND:
+      case Token::BIT_XOR:
+      case Token::SAR:
+      case Token::SHL:
+      case Token::SHR: {
+        Label skip_allocation, non_smi_result;
+        FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
+        switch (op_) {
+          case Token::BIT_OR:  __ orl(rax, rcx); break;
+          case Token::BIT_AND: __ andl(rax, rcx); break;
+          case Token::BIT_XOR: __ xorl(rax, rcx); break;
+          case Token::SAR: __ sarl_cl(rax); break;
+          case Token::SHL: __ shll_cl(rax); break;
+          case Token::SHR: __ shrl_cl(rax); break;
+          default: UNREACHABLE();
+        }
+        if (op_ == Token::SHR) {
+          // Check if result is non-negative. This can only happen for a shift
+          // by zero, which also doesn't update the sign flag.
+          __ testl(rax, rax);
+          __ j(negative, &non_smi_result);
+        }
+        __ JumpIfNotValidSmiValue(rax, &non_smi_result);
+        // Tag smi result, if possible, and return.
+        __ Integer32ToSmi(rax, rax);
+        GenerateReturn(masm);
 
-      // SHR should return uint32 - go to runtime for non-smi/negative result.
-      if (op_ == Token::SHR) {
-        __ bind(&non_smi_result);
+        // All ops except SHR return a signed int32 that we load in
+        // a HeapNumber.
+        if (op_ != Token::SHR && non_smi_result.is_linked()) {
+          __ bind(&non_smi_result);
+          // Allocate a heap number if needed.
+          __ movsxlq(rbx, rax);  // rbx: sign extended 32-bit result
+          switch (mode_) {
+            case OVERWRITE_LEFT:
+            case OVERWRITE_RIGHT:
+              // If the operand was an object, we skip the
+              // allocation of a heap number.
+              __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
+                                   1 * kPointerSize : 2 * kPointerSize));
+              __ JumpIfNotSmi(rax, &skip_allocation);
+              // Fall through!
+            case NO_OVERWRITE:
+              __ AllocateHeapNumber(rax, rcx, &call_runtime);
+              __ bind(&skip_allocation);
+              break;
+            default: UNREACHABLE();
+          }
+          // Store the result in the HeapNumber and return.
+          __ movq(Operand(rsp, 1 * kPointerSize), rbx);
+          __ fild_s(Operand(rsp, 1 * kPointerSize));
+          __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
+          GenerateReturn(masm);
+        }
+
+        // SHR should return uint32 - go to runtime for non-smi/negative result.
+        if (op_ == Token::SHR) {
+          __ bind(&non_smi_result);
+        }
+        break;
       }
-      break;
+      default: UNREACHABLE(); break;
     }
-    default: UNREACHABLE(); break;
   }
 
   // If all else fails, use the runtime system to get the correct
   // result. If arguments was passed in registers now place them on the
   // stack in the correct order below the return address.
   __ bind(&call_runtime);
+
   if (HasArgsInRegisters()) {
-    __ pop(rcx);
-    if (HasArgsReversed()) {
-      __ push(rax);
-      __ push(rdx);
-    } else {
-      __ push(rdx);
-      __ push(rax);
-    }
-    __ push(rcx);
+    GenerateRegisterArgsPush(masm);
   }
+
   switch (op_) {
     case Token::ADD: {
+      // Registers containing left and right operands respectively.
+      Register lhs, rhs;
+
+      if (HasArgsReversed()) {
+        lhs = rax;
+        rhs = rdx;
+      } else {
+        lhs = rdx;
+        rhs = rax;
+      }
+
       // Test for string arguments before calling runtime.
-      Label not_strings, both_strings, not_string1, string1;
+      Label not_strings, both_strings, not_string1, string1, string1_smi2;
+
+      // If this stub has already generated FP-specific code then the arguments
+      // are already in rdx, rax
+      if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
+        GenerateLoadArguments(masm);
+      }
+
       Condition is_smi;
-      Result answer;
-      is_smi = masm->CheckSmi(rdx);
+      is_smi = masm->CheckSmi(lhs);
       __ j(is_smi, &not_string1);
-      __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rdx);
+      __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8);
       __ j(above_equal, &not_string1);
 
       // First argument is a a string, test second.
-      is_smi = masm->CheckSmi(rax);
-      __ j(is_smi, &string1);
-      __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rax);
+      is_smi = masm->CheckSmi(rhs);
+      __ j(is_smi, &string1_smi2);
+      __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
       __ j(above_equal, &string1);
 
       // First and second argument are strings.
-      StringAddStub stub(NO_STRING_CHECK_IN_STUB);
-      __ TailCallStub(&stub);
+      StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+      __ TailCallStub(&string_add_stub);
+
+      __ bind(&string1_smi2);
+      // First argument is a string, second is a smi. Try to lookup the number
+      // string for the smi in the number string cache.
+      NumberToStringStub::GenerateLookupNumberStringCache(
+          masm, rhs, rbx, rcx, r8, true, &string1);
+
+      // Replace second argument on stack and tailcall string add stub to make
+      // the result.
+      __ movq(Operand(rsp, 1 * kPointerSize), rbx);
+      __ TailCallStub(&string_add_stub);
 
       // Only first argument is a string.
       __ bind(&string1);
-      __ InvokeBuiltin(
-          HasArgsReversed() ?
-              Builtins::STRING_ADD_RIGHT :
-              Builtins::STRING_ADD_LEFT,
-          JUMP_FUNCTION);
+      __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
 
       // First argument was not a string, test second.
       __ bind(&not_string1);
-      is_smi = masm->CheckSmi(rax);
+      is_smi = masm->CheckSmi(rhs);
       __ j(is_smi, &not_strings);
-      __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rax);
+      __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs);
       __ j(above_equal, &not_strings);
 
       // Only second argument is a string.
-      __ InvokeBuiltin(
-          HasArgsReversed() ?
-              Builtins::STRING_ADD_LEFT :
-              Builtins::STRING_ADD_RIGHT,
-          JUMP_FUNCTION);
+      __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
 
       __ bind(&not_strings);
       // Neither argument is a string.
@@ -8840,11 +10280,9 @@
 
 
 void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
-  // If arguments are not passed in registers read them from the stack.
-  if (!HasArgsInRegisters()) {
-    __ movq(rax, Operand(rsp, 1 * kPointerSize));
-    __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-  }
+  ASSERT(!HasArgsInRegisters());
+  __ movq(rax, Operand(rsp, 1 * kPointerSize));
+  __ movq(rdx, Operand(rsp, 2 * kPointerSize));
 }
 
 
@@ -8859,52 +10297,276 @@
 }
 
 
-int CompareStub::MinorKey() {
-  // Encode the three parameters in a unique 16 bit value.
-  ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
-  int nnn_value = (never_nan_nan_ ? 2 : 0);
-  if (cc_ != equal) nnn_value = 0;  // Avoid duplicate stubs.
-  return (static_cast<unsigned>(cc_) << 2) | nnn_value | (strict_ ? 1 : 0);
+void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+  ASSERT(HasArgsInRegisters());
+  __ pop(rcx);
+  if (HasArgsReversed()) {
+    __ push(rax);
+    __ push(rdx);
+  } else {
+    __ push(rdx);
+    __ push(rax);
+  }
+  __ push(rcx);
 }
 
 
+void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+  Label get_result;
+
+  // Keep a copy of operands on the stack and make sure they are also in
+  // rdx, rax.
+  if (HasArgsInRegisters()) {
+    GenerateRegisterArgsPush(masm);
+  } else {
+    GenerateLoadArguments(masm);
+  }
+
+  // Internal frame is necessary to handle exceptions properly.
+  __ EnterInternalFrame();
+
+  // Push arguments on stack if the stub expects them there.
+  if (!HasArgsInRegisters()) {
+    __ push(rdx);
+    __ push(rax);
+  }
+  // Call the stub proper to get the result in rax.
+  __ call(&get_result);
+  __ LeaveInternalFrame();
+
+  // Left and right arguments are already on stack.
+  __ pop(rcx);
+  // Push the operation result. The tail call to BinaryOp_Patch will
+  // return it to the original caller..
+  __ push(rax);
+
+  // Push this stub's key.
+  __ movq(rax, Immediate(MinorKey()));
+  __ Integer32ToSmi(rax, rax);
+  __ push(rax);
+
+  // Although the operation and the type info are encoded into the key,
+  // the encoding is opaque, so push them too.
+  __ movq(rax, Immediate(op_));
+  __ Integer32ToSmi(rax, rax);
+  __ push(rax);
+
+  __ movq(rax, Immediate(runtime_operands_type_));
+  __ Integer32ToSmi(rax, rax);
+  __ push(rax);
+
+  __ push(rcx);
+
+  // Perform patching to an appropriate fast case and return the result.
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
+      6,
+      1);
+
+  // The entry point for the result calculation is assumed to be immediately
+  // after this sequence.
+  __ bind(&get_result);
+}
+
+
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
+  GenericBinaryOpStub stub(key, type_info);
+  return stub.GetCode();
+}
+
+
+int CompareStub::MinorKey() {
+  // Encode the three parameters in a unique 16 bit value. To avoid duplicate
+  // stubs the never NaN NaN condition is only taken into account if the
+  // condition is equals.
+  ASSERT(static_cast<unsigned>(cc_) < (1 << 13));
+  return ConditionField::encode(static_cast<unsigned>(cc_))
+         | StrictField::encode(strict_)
+         | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
+         | IncludeNumberCompareField::encode(include_number_compare_);
+}
+
+
+// Unfortunately you have to run without snapshots to see most of these
+// names in the profile since most compare stubs end up in the snapshot.
 const char* CompareStub::GetName() {
+  if (name_ != NULL) return name_;
+  const int kMaxNameLength = 100;
+  name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+  if (name_ == NULL) return "OOM";
+
+  const char* cc_name;
   switch (cc_) {
-    case less: return "CompareStub_LT";
-    case greater: return "CompareStub_GT";
-    case less_equal: return "CompareStub_LE";
-    case greater_equal: return "CompareStub_GE";
-    case not_equal: {
-      if (strict_) {
-        if (never_nan_nan_) {
-          return "CompareStub_NE_STRICT_NO_NAN";
-        } else {
-          return "CompareStub_NE_STRICT";
-        }
-      } else {
-        if (never_nan_nan_) {
-          return "CompareStub_NE_NO_NAN";
-        } else {
-          return "CompareStub_NE";
-        }
-      }
+    case less: cc_name = "LT"; break;
+    case greater: cc_name = "GT"; break;
+    case less_equal: cc_name = "LE"; break;
+    case greater_equal: cc_name = "GE"; break;
+    case equal: cc_name = "EQ"; break;
+    case not_equal: cc_name = "NE"; break;
+    default: cc_name = "UnknownCondition"; break;
+  }
+
+  const char* strict_name = "";
+  if (strict_ && (cc_ == equal || cc_ == not_equal)) {
+    strict_name = "_STRICT";
+  }
+
+  const char* never_nan_nan_name = "";
+  if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
+    never_nan_nan_name = "_NO_NAN";
+  }
+
+  const char* include_number_compare_name = "";
+  if (!include_number_compare_) {
+    include_number_compare_name = "_NO_NUMBER";
+  }
+
+  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+               "CompareStub_%s%s%s%s",
+               cc_name,
+               strict_name,
+               never_nan_nan_name,
+               include_number_compare_name);
+  return name_;
+}
+
+
+void StringHelper::GenerateFastCharCodeAt(MacroAssembler* masm,
+                                          Register object,
+                                          Register index,
+                                          Register scratch,
+                                          Register result,
+                                          Label* receiver_not_string,
+                                          Label* index_not_smi,
+                                          Label* index_out_of_range,
+                                          Label* slow_case) {
+  Label not_a_flat_string;
+  Label try_again_with_new_string;
+  Label ascii_string;
+  Label got_char_code;
+
+  // If the receiver is a smi trigger the non-string case.
+  __ JumpIfSmi(object, receiver_not_string);
+
+  // Fetch the instance type of the receiver into result register.
+  __ movq(result, FieldOperand(object, HeapObject::kMapOffset));
+  __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
+  // If the receiver is not a string trigger the non-string case.
+  __ testb(result, Immediate(kIsNotStringMask));
+  __ j(not_zero, receiver_not_string);
+
+  // If the index is non-smi trigger the non-smi case.
+  __ JumpIfNotSmi(index, index_not_smi);
+
+  // Check for index out of range.
+  __ SmiCompare(index, FieldOperand(object, String::kLengthOffset));
+  __ j(above_equal, index_out_of_range);
+
+  __ bind(&try_again_with_new_string);
+  // ----------- S t a t e -------------
+  //  -- object  : string to access
+  //  -- result  : instance type of the string
+  //  -- scratch : non-negative index < length
+  // -----------------------------------
+
+  // We need special handling for non-flat strings.
+  ASSERT_EQ(0, kSeqStringTag);
+  __ testb(result, Immediate(kStringRepresentationMask));
+  __ j(not_zero, &not_a_flat_string);
+
+  // Put untagged index into scratch register.
+  __ SmiToInteger32(scratch, index);
+
+  // Check for 1-byte or 2-byte string.
+  ASSERT_EQ(0, kTwoByteStringTag);
+  __ testb(result, Immediate(kStringEncodingMask));
+  __ j(not_zero, &ascii_string);
+
+  // 2-byte string.
+  // Load the 2-byte character code into the result register.
+  __ movzxwl(result, FieldOperand(object,
+                                  scratch,
+                                  times_2,
+                                  SeqTwoByteString::kHeaderSize));
+  __ jmp(&got_char_code);
+
+  // Handle non-flat strings.
+  __ bind(&not_a_flat_string);
+  __ and_(result, Immediate(kStringRepresentationMask));
+  __ cmpb(result, Immediate(kConsStringTag));
+  __ j(not_equal, slow_case);
+
+  // ConsString.
+  // Check that the right hand side is the empty string (ie if this is really a
+  // flat string in a cons string).  If that is not the case we would rather go
+  // to the runtime system now, to flatten the string.
+  __ movq(result, FieldOperand(object, ConsString::kSecondOffset));
+  __ CompareRoot(result, Heap::kEmptyStringRootIndex);
+  __ j(not_equal, slow_case);
+  // Get the first of the two strings and load its instance type.
+  __ movq(object, FieldOperand(object, ConsString::kFirstOffset));
+  __ movq(result, FieldOperand(object, HeapObject::kMapOffset));
+  __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
+  __ jmp(&try_again_with_new_string);
+
+  // ASCII string.
+  __ bind(&ascii_string);
+  // Load the byte into the result register.
+  __ movzxbl(result, FieldOperand(object,
+                                  scratch,
+                                  times_1,
+                                  SeqAsciiString::kHeaderSize));
+  __ bind(&got_char_code);
+  __ Integer32ToSmi(result, result);
+}
+
+
+void StringHelper::GenerateCharFromCode(MacroAssembler* masm,
+                                        Register code,
+                                        Register result,
+                                        Register scratch,
+                                        InvokeFlag flag) {
+  ASSERT(!code.is(result));
+
+  Label slow_case;
+  Label exit;
+
+  // Fast case of Heap::LookupSingleCharacterStringFromCode.
+  __ JumpIfNotSmi(code, &slow_case);
+  __ SmiToInteger32(scratch, code);
+  __ cmpl(scratch, Immediate(String::kMaxAsciiCharCode));
+  __ j(above, &slow_case);
+
+  __ Move(result, Factory::single_character_string_cache());
+  __ movq(result, FieldOperand(result,
+                               scratch,
+                               times_pointer_size,
+                               FixedArray::kHeaderSize));
+
+  __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
+  __ j(equal, &slow_case);
+  __ jmp(&exit);
+
+  __ bind(&slow_case);
+  if (flag == CALL_FUNCTION) {
+    __ push(code);
+    __ CallRuntime(Runtime::kCharFromCode, 1);
+    if (!result.is(rax)) {
+      __ movq(result, rax);
     }
-    case equal: {
-      if (strict_) {
-        if (never_nan_nan_) {
-          return "CompareStub_EQ_STRICT_NO_NAN";
-        } else {
-          return "CompareStub_EQ_STRICT";
-        }
-      } else {
-        if (never_nan_nan_) {
-          return "CompareStub_EQ_NO_NAN";
-        } else {
-          return "CompareStub_EQ";
-        }
-      }
-    }
-    default: return "CompareStub";
+  } else {
+    ASSERT(flag == JUMP_FUNCTION);
+    ASSERT(result.is(rax));
+    __ pop(rax);  // Save return address.
+    __ push(code);
+    __ push(rax);  // Restore return address.
+    __ TailCallRuntime(Runtime::kCharFromCode, 1, 1);
+  }
+
+  __ bind(&exit);
+  if (flag == JUMP_FUNCTION) {
+    ASSERT(result.is(rax));
+    __ ret(0);
   }
 }
 
@@ -8936,15 +10598,15 @@
   // rdx: second string
   // Check if either of the strings are empty. In that case return the other.
   Label second_not_zero_length, both_not_zero_length;
-  __ movl(rcx, FieldOperand(rdx, String::kLengthOffset));
-  __ testl(rcx, rcx);
+  __ movq(rcx, FieldOperand(rdx, String::kLengthOffset));
+  __ SmiTest(rcx);
   __ j(not_zero, &second_not_zero_length);
   // Second string is empty, result is first string which is already in rax.
   __ IncrementCounter(&Counters::string_add_native, 1);
   __ ret(2 * kPointerSize);
   __ bind(&second_not_zero_length);
-  __ movl(rbx, FieldOperand(rax, String::kLengthOffset));
-  __ testl(rbx, rbx);
+  __ movq(rbx, FieldOperand(rax, String::kLengthOffset));
+  __ SmiTest(rbx);
   __ j(not_zero, &both_not_zero_length);
   // First string is empty, result is second string which is in rdx.
   __ movq(rax, rdx);
@@ -8956,16 +10618,11 @@
   // rbx: length of first string
   // rcx: length of second string
   // rdx: second string
-  // r8: instance type of first string if string check was performed above
-  // r9: instance type of first string if string check was performed above
-  Label string_add_flat_result;
+  // r8: map of first string if string check was performed above
+  // r9: map of second string if string check was performed above
+  Label string_add_flat_result, longer_than_two;
   __ bind(&both_not_zero_length);
-  // Look at the length of the result of adding the two strings.
-  __ addl(rbx, rcx);
-  // Use the runtime system when adding two one character strings, as it
-  // contains optimizations for this specific case using the symbol table.
-  __ cmpl(rbx, Immediate(2));
-  __ j(equal, &string_add_runtime);
+
   // If arguments where known to be strings, maps are not loaded to r8 and r9
   // by the code above.
   if (!string_check_) {
@@ -8975,12 +10632,42 @@
   // Get the instance types of the two strings as they will be needed soon.
   __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
   __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
+
+  // Look at the length of the result of adding the two strings.
+  ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
+  __ SmiAdd(rbx, rbx, rcx, NULL);
+  // Use the runtime system when adding two one character strings, as it
+  // contains optimizations for this specific case using the symbol table.
+  __ SmiCompare(rbx, Smi::FromInt(2));
+  __ j(not_equal, &longer_than_two);
+
+  // Check that both strings are non-external ascii strings.
+  __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
+                                                 &string_add_runtime);
+
+  // Get the two characters forming the sub string.
+  __ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+  __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
+
+  // Try to lookup two character string in symbol table. If it is not found
+  // just allocate a new one.
+  Label make_two_character_string, make_flat_ascii_string;
+  StringHelper::GenerateTwoCharacterSymbolTableProbe(
+      masm, rbx, rcx, r14, r12, rdi, r15, &make_two_character_string);
+  __ IncrementCounter(&Counters::string_add_native, 1);
+  __ ret(2 * kPointerSize);
+
+  __ bind(&make_two_character_string);
+  __ Set(rbx, 2);
+  __ jmp(&make_flat_ascii_string);
+
+  __ bind(&longer_than_two);
   // Check if resulting string will be flat.
-  __ cmpl(rbx, Immediate(String::kMinNonFlatLength));
+  __ SmiCompare(rbx, Smi::FromInt(String::kMinNonFlatLength));
   __ j(below, &string_add_flat_result);
   // Handle exceptionally long strings in the runtime system.
   ASSERT((String::kMaxLength & 0x80000000) == 0);
-  __ cmpl(rbx, Immediate(String::kMaxLength));
+  __ SmiCompare(rbx, Smi::FromInt(String::kMaxLength));
   __ j(above, &string_add_runtime);
 
   // If result is not supposed to be flat, allocate a cons string object. If
@@ -9000,7 +10687,7 @@
   __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
   __ bind(&allocated);
   // Fill the fields of the cons string.
-  __ movl(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
+  __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
   __ movl(FieldOperand(rcx, ConsString::kHashFieldOffset),
           Immediate(String::kEmptyHashField));
   __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
@@ -9016,11 +10703,12 @@
   // Handle creating a flat result. First check that both strings are not
   // external strings.
   // rax: first string
-  // ebx: length of resulting flat string
+  // ebx: length of resulting flat string as smi
   // rdx: second string
   // r8: instance type of first string
   // r9: instance type of first string
   __ bind(&string_add_flat_result);
+  __ SmiToInteger32(rbx, rbx);
   __ movl(rcx, r8);
   __ and_(rcx, Immediate(kStringRepresentationMask));
   __ cmpl(rcx, Immediate(kExternalStringTag));
@@ -9041,6 +10729,8 @@
   __ j(zero, &non_ascii_string_add_flat_result);
   __ testl(r9, Immediate(kAsciiStringTag));
   __ j(zero, &string_add_runtime);
+
+  __ bind(&make_flat_ascii_string);
   // Both strings are ascii strings. As they are short they are both flat.
   __ AllocateAsciiString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
   // rcx: result string
@@ -9048,22 +10738,24 @@
   // Locate first character of result.
   __ addq(rcx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   // Locate first character of first argument
-  __ movl(rdi, FieldOperand(rax, String::kLengthOffset));
+  __ movq(rdi, FieldOperand(rax, String::kLengthOffset));
+  __ SmiToInteger32(rdi, rdi);
   __ addq(rax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   // rax: first char of first argument
   // rbx: result string
   // rcx: first character of result
   // rdx: second string
   // rdi: length of first argument
-  GenerateCopyCharacters(masm, rcx, rax, rdi, true);
+  StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, true);
   // Locate first character of second argument.
-  __ movl(rdi, FieldOperand(rdx, String::kLengthOffset));
+  __ movq(rdi, FieldOperand(rdx, String::kLengthOffset));
+  __ SmiToInteger32(rdi, rdi);
   __ addq(rdx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   // rbx: result string
   // rcx: next character of result
   // rdx: first char of second argument
   // rdi: length of second argument
-  GenerateCopyCharacters(masm, rcx, rdx, rdi, true);
+  StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, true);
   __ movq(rax, rbx);
   __ IncrementCounter(&Counters::string_add_native, 1);
   __ ret(2 * kPointerSize);
@@ -9085,37 +10777,39 @@
   // Locate first character of result.
   __ addq(rcx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   // Locate first character of first argument.
-  __ movl(rdi, FieldOperand(rax, String::kLengthOffset));
+  __ movq(rdi, FieldOperand(rax, String::kLengthOffset));
+  __ SmiToInteger32(rdi, rdi);
   __ addq(rax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   // rax: first char of first argument
   // rbx: result string
   // rcx: first character of result
   // rdx: second argument
   // rdi: length of first argument
-  GenerateCopyCharacters(masm, rcx, rax, rdi, false);
+  StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, false);
   // Locate first character of second argument.
-  __ movl(rdi, FieldOperand(rdx, String::kLengthOffset));
+  __ movq(rdi, FieldOperand(rdx, String::kLengthOffset));
+  __ SmiToInteger32(rdi, rdi);
   __ addq(rdx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   // rbx: result string
   // rcx: next character of result
   // rdx: first char of second argument
   // rdi: length of second argument
-  GenerateCopyCharacters(masm, rcx, rdx, rdi, false);
+  StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, false);
   __ movq(rax, rbx);
   __ IncrementCounter(&Counters::string_add_native, 1);
   __ ret(2 * kPointerSize);
 
   // Just jump to runtime to add the two strings.
   __ bind(&string_add_runtime);
-  __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
+  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
 }
 
 
-void StringStubBase::GenerateCopyCharacters(MacroAssembler* masm,
-                                            Register dest,
-                                            Register src,
-                                            Register count,
-                                            bool ascii) {
+void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
+                                          Register dest,
+                                          Register src,
+                                          Register count,
+                                          bool ascii) {
   Label loop;
   __ bind(&loop);
   // This loop just copies one character at a time, as it is only used for very
@@ -9136,11 +10830,11 @@
 }
 
 
-void StringStubBase::GenerateCopyCharactersREP(MacroAssembler* masm,
-                                               Register dest,
-                                               Register src,
-                                               Register count,
-                                               bool ascii) {
+void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
+                                             Register dest,
+                                             Register src,
+                                             Register count,
+                                             bool ascii) {
   // Copy characters using rep movs of doublewords. Align destination on 4 byte
   // boundary before starting rep movs. Copy remaining characters after running
   // rep movs.
@@ -9191,6 +10885,180 @@
   __ bind(&done);
 }
 
+void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+                                                        Register c1,
+                                                        Register c2,
+                                                        Register scratch1,
+                                                        Register scratch2,
+                                                        Register scratch3,
+                                                        Register scratch4,
+                                                        Label* not_found) {
+  // Register scratch3 is the general scratch register in this function.
+  Register scratch = scratch3;
+
+  // Make sure that both characters are not digits as such strings has a
+  // different hash algorithm. Don't try to look for these in the symbol table.
+  Label not_array_index;
+  __ movq(scratch, c1);
+  __ subq(scratch, Immediate(static_cast<int>('0')));
+  __ cmpq(scratch, Immediate(static_cast<int>('9' - '0')));
+  __ j(above, &not_array_index);
+  __ movq(scratch, c2);
+  __ subq(scratch, Immediate(static_cast<int>('0')));
+  __ cmpq(scratch, Immediate(static_cast<int>('9' - '0')));
+  __ j(below_equal, not_found);
+
+  __ bind(&not_array_index);
+  // Calculate the two character string hash.
+  Register hash = scratch1;
+  GenerateHashInit(masm, hash, c1, scratch);
+  GenerateHashAddCharacter(masm, hash, c2, scratch);
+  GenerateHashGetHash(masm, hash, scratch);
+
+  // Collect the two characters in a register.
+  Register chars = c1;
+  __ shl(c2, Immediate(kBitsPerByte));
+  __ orl(chars, c2);
+
+  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+  // hash:  hash of two character string.
+
+  // Load the symbol table.
+  Register symbol_table = c2;
+  __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
+
+  // Calculate capacity mask from the symbol table capacity.
+  Register mask = scratch2;
+  __ movq(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
+  __ SmiToInteger32(mask, mask);
+  __ decl(mask);
+
+  Register undefined = scratch4;
+  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+  // Registers
+  // chars:        two character string, char 1 in byte 0 and char 2 in byte 1.
+  // hash:         hash of two character string (32-bit int)
+  // symbol_table: symbol table
+  // mask:         capacity mask (32-bit int)
+  // undefined:    undefined value
+  // scratch:      -
+
+  // Perform a number of probes in the symbol table.
+  static const int kProbes = 4;
+  Label found_in_symbol_table;
+  Label next_probe[kProbes];
+  for (int i = 0; i < kProbes; i++) {
+    // Calculate entry in symbol table.
+    __ movl(scratch, hash);
+    if (i > 0) {
+      __ addl(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
+    }
+    __ andl(scratch, mask);
+
+    // Load the entry from the symble table.
+    Register candidate = scratch;  // Scratch register contains candidate.
+    ASSERT_EQ(1, SymbolTable::kEntrySize);
+    __ movq(candidate,
+           FieldOperand(symbol_table,
+                        scratch,
+                        times_pointer_size,
+                        SymbolTable::kElementsStartOffset));
+
+    // If entry is undefined no string with this hash can be found.
+    __ cmpq(candidate, undefined);
+    __ j(equal, not_found);
+
+    // If length is not 2 the string is not a candidate.
+    __ SmiCompare(FieldOperand(candidate, String::kLengthOffset),
+                  Smi::FromInt(2));
+    __ j(not_equal, &next_probe[i]);
+
+    // We use kScratchRegister as a temporary register in assumption that
+    // JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly
+    Register temp = kScratchRegister;
+
+    // Check that the candidate is a non-external ascii string.
+    __ movq(temp, FieldOperand(candidate, HeapObject::kMapOffset));
+    __ movzxbl(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
+    __ JumpIfInstanceTypeIsNotSequentialAscii(
+        temp, temp, &next_probe[i]);
+
+    // Check if the two characters match.
+    __ movl(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
+    __ andl(temp, Immediate(0x0000ffff));
+    __ cmpl(chars, temp);
+    __ j(equal, &found_in_symbol_table);
+    __ bind(&next_probe[i]);
+  }
+
+  // No matching 2 character string found by probing.
+  __ jmp(not_found);
+
+  // Scratch register contains result when we fall through to here.
+  Register result = scratch;
+  __ bind(&found_in_symbol_table);
+  if (!result.is(rax)) {
+    __ movq(rax, result);
+  }
+}
+
+
+void StringHelper::GenerateHashInit(MacroAssembler* masm,
+                                    Register hash,
+                                    Register character,
+                                    Register scratch) {
+  // hash = character + (character << 10);
+  __ movl(hash, character);
+  __ shll(hash, Immediate(10));
+  __ addl(hash, character);
+  // hash ^= hash >> 6;
+  __ movl(scratch, hash);
+  __ sarl(scratch, Immediate(6));
+  __ xorl(hash, scratch);
+}
+
+
+void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
+                                            Register hash,
+                                            Register character,
+                                            Register scratch) {
+  // hash += character;
+  __ addl(hash, character);
+  // hash += hash << 10;
+  __ movl(scratch, hash);
+  __ shll(scratch, Immediate(10));
+  __ addl(hash, scratch);
+  // hash ^= hash >> 6;
+  __ movl(scratch, hash);
+  __ sarl(scratch, Immediate(6));
+  __ xorl(hash, scratch);
+}
+
+
+void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
+                                       Register hash,
+                                       Register scratch) {
+  // hash += hash << 3;
+  __ movl(scratch, hash);
+  __ shll(scratch, Immediate(3));
+  __ addl(hash, scratch);
+  // hash ^= hash >> 11;
+  __ movl(scratch, hash);
+  __ sarl(scratch, Immediate(11));
+  __ xorl(hash, scratch);
+  // hash += hash << 15;
+  __ movl(scratch, hash);
+  __ shll(scratch, Immediate(15));
+  __ addl(hash, scratch);
+
+  // if (hash == 0) hash = 27;
+  Label hash_not_zero;
+  __ testl(hash, hash);
+  __ j(not_zero, &hash_not_zero);
+  __ movl(hash, Immediate(27));
+  __ bind(&hash_not_zero);
+}
 
 void SubStringStub::Generate(MacroAssembler* masm) {
   Label runtime;
@@ -9217,25 +11085,55 @@
   // rax: string
   // rbx: instance type
   // Calculate length of sub string using the smi values.
+  Label result_longer_than_two;
   __ movq(rcx, Operand(rsp, kToOffset));
   __ movq(rdx, Operand(rsp, kFromOffset));
   __ JumpIfNotBothPositiveSmi(rcx, rdx, &runtime);
 
   __ SmiSub(rcx, rcx, rdx, NULL);  // Overflow doesn't happen.
   __ j(negative, &runtime);
-  // Handle sub-strings of length 2 and less in the runtime system.
+  // Special handling of sub-strings of length 1 and 2. One character strings
+  // are handled in the runtime system (looked up in the single character
+  // cache). Two character strings are looked for in the symbol cache.
   __ SmiToInteger32(rcx, rcx);
   __ cmpl(rcx, Immediate(2));
-  __ j(below_equal, &runtime);
+  __ j(greater, &result_longer_than_two);
+  __ j(less, &runtime);
+
+  // Sub string of length 2 requested.
+  // rax: string
+  // rbx: instance type
+  // rcx: sub string length (value is 2)
+  // rdx: from index (smi)
+  __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &runtime);
+
+  // Get the two characters forming the sub string.
+  __ SmiToInteger32(rdx, rdx);  // From index is no longer smi.
+  __ movzxbq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize));
+  __ movzxbq(rcx,
+             FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize + 1));
+
+  // Try to lookup two character string in symbol table.
+  Label make_two_character_string;
+  StringHelper::GenerateTwoCharacterSymbolTableProbe(
+      masm, rbx, rcx, rax, rdx, rdi, r14, &make_two_character_string);
+  __ ret(3 * kPointerSize);
+
+  __ bind(&make_two_character_string);
+  // Setup registers for allocating the two character string.
+  __ movq(rax, Operand(rsp, kStringOffset));
+  __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+  __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+  __ Set(rcx, 2);
+
+  __ bind(&result_longer_than_two);
 
   // rax: string
   // rbx: instance type
   // rcx: result string length
   // Check for flat ascii string
   Label non_ascii_flat;
-  __ and_(rbx, Immediate(kStringRepresentationMask | kStringEncodingMask));
-  __ cmpb(rbx, Immediate(kSeqStringTag | kAsciiStringTag));
-  __ j(not_equal, &non_ascii_flat);
+  __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &non_ascii_flat);
 
   // Allocate the result.
   __ AllocateAsciiString(rax, rcx, rbx, rdx, rdi, &runtime);
@@ -9259,7 +11157,7 @@
   // rdx: original value of rsi
   // rdi: first character of result
   // rsi: character of sub string start
-  GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
+  StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
   __ movq(rsi, rdx);  // Restore rsi.
   __ IncrementCounter(&Counters::sub_string_native, 1);
   __ ret(kArgumentsSize);
@@ -9294,14 +11192,14 @@
   // rdx: original value of rsi
   // rdi: first character of result
   // rsi: character of sub string start
-  GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
+  StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
   __ movq(rsi, rdx);  // Restore esi.
   __ IncrementCounter(&Counters::sub_string_native, 1);
   __ ret(kArgumentsSize);
 
   // Just jump to runtime to create the sub string.
   __ bind(&runtime);
-  __ TailCallRuntime(ExternalReference(Runtime::kSubString), 3, 1);
+  __ TailCallRuntime(Runtime::kSubString, 3, 1);
 }
 
 
@@ -9317,9 +11215,12 @@
   ASSERT(String::kMaxLength < 0x7fffffff);
 
   // Find minimum length and length difference.
-  __ movl(scratch1, FieldOperand(left, String::kLengthOffset));
-  __ movl(scratch4, scratch1);
-  __ subl(scratch4, FieldOperand(right, String::kLengthOffset));
+  __ movq(scratch1, FieldOperand(left, String::kLengthOffset));
+  __ movq(scratch4, scratch1);
+  __ SmiSub(scratch4,
+            scratch4,
+            FieldOperand(right, String::kLengthOffset),
+            NULL);
   // Register scratch4 now holds left.length - right.length.
   const Register length_difference = scratch4;
   Label left_shorter;
@@ -9327,16 +11228,18 @@
   // The right string isn't longer that the left one.
   // Get the right string's length by subtracting the (non-negative) difference
   // from the left string's length.
-  __ subl(scratch1, length_difference);
+  __ SmiSub(scratch1, scratch1, length_difference, NULL);
   __ bind(&left_shorter);
   // Register scratch1 now holds Min(left.length, right.length).
   const Register min_length = scratch1;
 
   Label compare_lengths;
   // If min-length is zero, go directly to comparing lengths.
-  __ testl(min_length, min_length);
+  __ SmiTest(min_length);
   __ j(zero, &compare_lengths);
 
+  __ SmiToInteger32(min_length, min_length);
+
   // Registers scratch2 and scratch3 are free.
   Label result_not_equal;
   Label loop;
@@ -9367,7 +11270,7 @@
   // Completed loop without finding different characters.
   // Compare lengths (precomputed).
   __ bind(&compare_lengths);
-  __ testl(length_difference, length_difference);
+  __ SmiTest(length_difference);
   __ j(not_zero, &result_not_equal);
 
   // Result is EQUAL.
@@ -9421,7 +11324,7 @@
   // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
   // tagged as a small integer.
   __ bind(&runtime);
-  __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1);
+  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
 }
 
 #undef __
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 4b0c77d..5d9861b 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -28,6 +28,8 @@
 #ifndef V8_X64_CODEGEN_X64_H_
 #define V8_X64_CODEGEN_X64_H_
 
+#include "ic-inl.h"
+
 namespace v8 {
 namespace internal {
 
@@ -337,13 +339,17 @@
   bool in_spilled_code() const { return in_spilled_code_; }
   void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
 
+  // If the name is an inline runtime function call return the number of
+  // expected arguments. Otherwise return -1.
+  static int InlineRuntimeCallArgumentsCount(Handle<String> name);
+
  private:
   // Construction/Destruction
   explicit CodeGenerator(MacroAssembler* masm);
 
   // Accessors
   inline bool is_eval();
-  Scope* scope();
+  inline Scope* scope();
 
   // Generating deferred code.
   void ProcessDeferred();
@@ -448,10 +454,11 @@
   // control destination.
   void ToBoolean(ControlDestination* destination);
 
-  void GenericBinaryOperation(
-      Token::Value op,
-      StaticType* type,
-      OverwriteMode overwrite_mode);
+  // Generate code that computes a shortcutting logical operation.
+  void GenerateLogicalBooleanOperation(BinaryOperation* node);
+
+  void GenericBinaryOperation(BinaryOperation* expr,
+                              OverwriteMode overwrite_mode);
 
   // If possible, combine two constant smi values using op to produce
   // a smi result, and push it on the virtual frame, all at compile time.
@@ -460,17 +467,16 @@
 
   // Emit code to perform a binary operation on a constant
   // smi and a likely smi.  Consumes the Result *operand.
-  Result ConstantSmiBinaryOperation(Token::Value op,
+  Result ConstantSmiBinaryOperation(BinaryOperation* expr,
                                     Result* operand,
                                     Handle<Object> constant_operand,
-                                    StaticType* type,
                                     bool reversed,
                                     OverwriteMode overwrite_mode);
 
   // Emit code to perform a binary operation on two likely smis.
   // The code to handle smi arguments is produced inline.
   // Consumes the Results *left and *right.
-  Result LikelySmiBinaryOperation(Token::Value op,
+  Result LikelySmiBinaryOperation(BinaryOperation* expr,
                                   Result* left,
                                   Result* right,
                                   OverwriteMode overwrite_mode);
@@ -479,6 +485,10 @@
                   Condition cc,
                   bool strict,
                   ControlDestination* destination);
+  void GenerateInlineNumberComparison(Result* left_side,
+                                      Result* right_side,
+                                      Condition cc,
+                                      ControlDestination* dest);
 
   // To prevent long attacker-controlled byte sequences, integer constants
   // from the JavaScript source are loaded in two parts if they are larger
@@ -506,6 +516,7 @@
   struct InlineRuntimeLUT {
     void (CodeGenerator::*method)(ZoneList<Expression*>*);
     const char* name;
+    int nargs;
   };
   static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
   bool CheckForInlineRuntimeCall(CallRuntime* node);
@@ -520,8 +531,8 @@
   // name/value pairs.
   void DeclareGlobals(Handle<FixedArray> pairs);
 
-  // Instantiate the function boilerplate.
-  void InstantiateBoilerplate(Handle<JSFunction> boilerplate);
+  // Instantiate the function based on the shared function info.
+  void InstantiateFunction(Handle<SharedFunctionInfo> function_info);
 
   // Support for type checks.
   void GenerateIsSmi(ZoneList<Expression*>* args);
@@ -537,7 +548,7 @@
 
   // Support for arguments.length and arguments[?].
   void GenerateArgumentsLength(ZoneList<Expression*>* args);
-  void GenerateArgumentsAccess(ZoneList<Expression*>* args);
+  void GenerateArguments(ZoneList<Expression*>* args);
 
   // Support for accessing the class and value fields of an object.
   void GenerateClassOf(ZoneList<Expression*>* args);
@@ -547,6 +558,9 @@
   // Fast support for charCodeAt(n).
   void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
 
+  // Fast support for string.charAt(n) and string[n].
+  void GenerateCharFromCode(ZoneList<Expression*>* args);
+
   // Fast support for object equality testing.
   void GenerateObjectEquals(ZoneList<Expression*>* args);
 
@@ -555,7 +569,7 @@
   void GenerateGetFramePointer(ZoneList<Expression*>* args);
 
   // Fast support for Math.random().
-  void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
+  void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
 
   // Fast support for StringAdd.
   void GenerateStringAdd(ZoneList<Expression*>* args);
@@ -569,12 +583,27 @@
   // Support for direct calls from JavaScript to native RegExp code.
   void GenerateRegExpExec(ZoneList<Expression*>* args);
 
+  void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
+
+  // Support for fast native caches.
+  void GenerateGetFromCache(ZoneList<Expression*>* args);
+
   // Fast support for number to string.
   void GenerateNumberToString(ZoneList<Expression*>* args);
 
+  // Fast swapping of elements. Takes three expressions, the object and two
+  // indices. This should only be used if the indices are known to be
+  // non-negative and within bounds of the elements array at the call site.
+  void GenerateSwapElements(ZoneList<Expression*>* args);
+
+  // Fast call for custom callbacks.
+  void GenerateCallFunction(ZoneList<Expression*>* args);
+
   // Fast call to math functions.
+  void GenerateMathPow(ZoneList<Expression*>* args);
   void GenerateMathSin(ZoneList<Expression*>* args);
   void GenerateMathCos(ZoneList<Expression*>* args);
+  void GenerateMathSqrt(ZoneList<Expression*>* args);
 
 // Simple condition analysis.
   enum ConditionAnalysis {
@@ -593,6 +622,8 @@
   void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
   void CodeForSourcePosition(int pos);
 
+  void SetTypeForStackSlot(Slot* slot, TypeInfo info);
+
 #ifdef DEBUG
   // True if the registers are valid for entry to a block.  There should
   // be no frame-external references to (non-reserved) registers.
@@ -643,6 +674,22 @@
 };
 
 
+// Compute a transcendental math function natively, or call the
+// TranscendentalCache runtime function.
+class TranscendentalCacheStub: public CodeStub {
+ public:
+  explicit TranscendentalCacheStub(TranscendentalCache::Type type)
+      : type_(type) {}
+  void Generate(MacroAssembler* masm);
+ private:
+  TranscendentalCache::Type type_;
+  Major MajorKey() { return TranscendentalCache; }
+  int MinorKey() { return type_; }
+  Runtime::FunctionId RuntimeFunction();
+  void GenerateOperation(MacroAssembler* masm, Label* on_nan_result);
+};
+
+
 // Flag that indicates how to generate code for the stub GenericBinaryOpStub.
 enum GenericBinaryFlags {
   NO_GENERIC_BINARY_FLAGS = 0,
@@ -655,18 +702,32 @@
   GenericBinaryOpStub(Token::Value op,
                       OverwriteMode mode,
                       GenericBinaryFlags flags,
-                      NumberInfo::Type operands_type = NumberInfo::kUnknown)
+                      TypeInfo operands_type = TypeInfo::Unknown())
       : op_(op),
         mode_(mode),
         flags_(flags),
         args_in_registers_(false),
         args_reversed_(false),
-        name_(NULL),
-        operands_type_(operands_type) {
+        static_operands_type_(operands_type),
+        runtime_operands_type_(BinaryOpIC::DEFAULT),
+        name_(NULL) {
     use_sse3_ = CpuFeatures::IsSupported(SSE3);
     ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
   }
 
+  GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
+      : op_(OpBits::decode(key)),
+        mode_(ModeBits::decode(key)),
+        flags_(FlagBits::decode(key)),
+        args_in_registers_(ArgsInRegistersBits::decode(key)),
+        args_reversed_(ArgsReversedBits::decode(key)),
+        use_sse3_(SSE3Bits::decode(key)),
+        static_operands_type_(TypeInfo::ExpandedRepresentation(
+            StaticTypeInfoBits::decode(key))),
+        runtime_operands_type_(type_info),
+        name_(NULL) {
+  }
+
   // Generate code to call the stub with the supplied arguments. This will add
   // code at the call site to prepare arguments either in registers or on the
   // stack together with the actual call.
@@ -686,8 +747,14 @@
   bool args_in_registers_;  // Arguments passed in registers not on the stack.
   bool args_reversed_;  // Left and right argument are swapped.
   bool use_sse3_;
+
+  // Number type information of operands, determined by code generator.
+  TypeInfo static_operands_type_;
+
+  // Operand type information determined at runtime.
+  BinaryOpIC::TypeInfo runtime_operands_type_;
+
   char* name_;
-  NumberInfo::Type operands_type_;
 
   const char* GetName();
 
@@ -701,35 +768,40 @@
            static_cast<int>(flags_),
            static_cast<int>(args_in_registers_),
            static_cast<int>(args_reversed_),
-           NumberInfo::ToString(operands_type_));
+           static_operands_type_.ToString());
   }
 #endif
 
-  // Minor key encoding in 16 bits NNNFRASOOOOOOOMM.
+  // Minor key encoding in 18 bits TTNNNFRASOOOOOOOMM.
   class ModeBits: public BitField<OverwriteMode, 0, 2> {};
   class OpBits: public BitField<Token::Value, 2, 7> {};
   class SSE3Bits: public BitField<bool, 9, 1> {};
   class ArgsInRegistersBits: public BitField<bool, 10, 1> {};
   class ArgsReversedBits: public BitField<bool, 11, 1> {};
   class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
-  class NumberInfoBits: public BitField<NumberInfo::Type, 13, 3> {};
+  class StaticTypeInfoBits: public BitField<int, 13, 3> {};
+  class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 2> {};
 
   Major MajorKey() { return GenericBinaryOp; }
   int MinorKey() {
-    // Encode the parameters in a unique 16 bit value.
+    // Encode the parameters in a unique 18 bit value.
     return OpBits::encode(op_)
            | ModeBits::encode(mode_)
            | FlagBits::encode(flags_)
            | SSE3Bits::encode(use_sse3_)
            | ArgsInRegistersBits::encode(args_in_registers_)
            | ArgsReversedBits::encode(args_reversed_)
-           | NumberInfoBits::encode(operands_type_);
+           | StaticTypeInfoBits::encode(
+               static_operands_type_.ThreeBitRepresentation())
+           | RuntimeTypeInfoBits::encode(runtime_operands_type_);
   }
 
   void Generate(MacroAssembler* masm);
   void GenerateSmiCode(MacroAssembler* masm, Label* slow);
   void GenerateLoadArguments(MacroAssembler* masm);
   void GenerateReturn(MacroAssembler* masm);
+  void GenerateRegisterArgsPush(MacroAssembler* masm);
+  void GenerateTypeTransition(MacroAssembler* masm);
 
   bool ArgsInRegistersSupported() {
     return (op_ == Token::ADD) || (op_ == Token::SUB)
@@ -744,29 +816,106 @@
   bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
   bool HasArgsInRegisters() { return args_in_registers_; }
   bool HasArgsReversed() { return args_reversed_; }
+
+  bool ShouldGenerateSmiCode() {
+    return HasSmiCodeInStub() &&
+        runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
+        runtime_operands_type_ != BinaryOpIC::STRINGS;
+  }
+
+  bool ShouldGenerateFPCode() {
+    return runtime_operands_type_ != BinaryOpIC::STRINGS;
+  }
+
+  virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+
+  virtual InlineCacheState GetICState() {
+    return BinaryOpIC::ToState(runtime_operands_type_);
+  }
 };
 
-
-class StringStubBase: public CodeStub {
+class StringHelper : public AllStatic {
  public:
+  // Generates fast code for getting a char code out of a string
+  // object at the given index. May bail out for four reasons (in the
+  // listed order):
+  //   * Receiver is not a string (receiver_not_string label).
+  //   * Index is not a smi (index_not_smi label).
+  //   * Index is out of range (index_out_of_range).
+  //   * Some other reason (slow_case label). In this case it's
+  //     guaranteed that the above conditions are not violated,
+  //     e.g. it's safe to assume the receiver is a string and the
+  //     index is a non-negative smi < length.
+  // When successful, object, index, and scratch are clobbered.
+  // Otherwise, scratch and result are clobbered.
+  static void GenerateFastCharCodeAt(MacroAssembler* masm,
+                                     Register object,
+                                     Register index,
+                                     Register scratch,
+                                     Register result,
+                                     Label* receiver_not_string,
+                                     Label* index_not_smi,
+                                     Label* index_out_of_range,
+                                     Label* slow_case);
+
+  // Generates code for creating a one-char string from the given char
+  // code. May do a runtime call, so any register can be clobbered
+  // and, if the given invoke flag specifies a call, an internal frame
+  // is required. In tail call mode the result must be rax register.
+  static void GenerateCharFromCode(MacroAssembler* masm,
+                                   Register code,
+                                   Register result,
+                                   Register scratch,
+                                   InvokeFlag flag);
+
   // Generate code for copying characters using a simple loop. This should only
   // be used in places where the number of characters is small and the
   // additional setup and checking in GenerateCopyCharactersREP adds too much
   // overhead. Copying of overlapping regions is not supported.
-  void GenerateCopyCharacters(MacroAssembler* masm,
-                              Register dest,
-                              Register src,
-                              Register count,
-                              bool ascii);
+  static void GenerateCopyCharacters(MacroAssembler* masm,
+                                     Register dest,
+                                     Register src,
+                                     Register count,
+                                     bool ascii);
 
   // Generate code for copying characters using the rep movs instruction.
   // Copies rcx characters from rsi to rdi. Copying of overlapping regions is
   // not supported.
-  void GenerateCopyCharactersREP(MacroAssembler* masm,
-                                 Register dest,     // Must be rdi.
-                                 Register src,      // Must be rsi.
-                                 Register count,    // Must be rcx.
-                                 bool ascii);
+  static void GenerateCopyCharactersREP(MacroAssembler* masm,
+                                        Register dest,     // Must be rdi.
+                                        Register src,      // Must be rsi.
+                                        Register count,    // Must be rcx.
+                                        bool ascii);
+
+
+  // Probe the symbol table for a two character string. If the string is
+  // not found by probing a jump to the label not_found is performed. This jump
+  // does not guarantee that the string is not in the symbol table. If the
+  // string is found the code falls through with the string in register rax.
+  static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+                                                   Register c1,
+                                                   Register c2,
+                                                   Register scratch1,
+                                                   Register scratch2,
+                                                   Register scratch3,
+                                                   Register scratch4,
+                                                   Label* not_found);
+
+  // Generate string hash.
+  static void GenerateHashInit(MacroAssembler* masm,
+                               Register hash,
+                               Register character,
+                               Register scratch);
+  static void GenerateHashAddCharacter(MacroAssembler* masm,
+                                       Register hash,
+                                       Register character,
+                                       Register scratch);
+  static void GenerateHashGetHash(MacroAssembler* masm,
+                                  Register hash,
+                                  Register scratch);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
 };
 
 
@@ -777,7 +926,7 @@
 };
 
 
-class StringAddStub: public StringStubBase {
+class StringAddStub: public CodeStub {
  public:
   explicit StringAddStub(StringAddFlags flags) {
     string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
@@ -794,7 +943,7 @@
 };
 
 
-class SubStringStub: public StringStubBase {
+class SubStringStub: public CodeStub {
  public:
   SubStringStub() {}
 
@@ -828,6 +977,79 @@
 };
 
 
+class NumberToStringStub: public CodeStub {
+ public:
+  NumberToStringStub() { }
+
+  // Generate code to do a lookup in the number string cache. If the number in
+  // the register object is found in the cache the generated code falls through
+  // with the result in the result register. The object and the result register
+  // can be the same. If the number is not found in the cache the code jumps to
+  // the label not_found with only the content of register object unchanged.
+  static void GenerateLookupNumberStringCache(MacroAssembler* masm,
+                                              Register object,
+                                              Register result,
+                                              Register scratch1,
+                                              Register scratch2,
+                                              bool object_is_smi,
+                                              Label* not_found);
+
+ private:
+  static void GenerateConvertHashCodeToIndex(MacroAssembler* masm,
+                                             Register hash,
+                                             Register mask);
+
+  Major MajorKey() { return NumberToString; }
+  int MinorKey() { return 0; }
+
+  void Generate(MacroAssembler* masm);
+
+  const char* GetName() { return "NumberToStringStub"; }
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("NumberToStringStub\n");
+  }
+#endif
+};
+
+
+class RecordWriteStub : public CodeStub {
+ public:
+  RecordWriteStub(Register object, Register addr, Register scratch)
+      : object_(object), addr_(addr), scratch_(scratch) { }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  Register object_;
+  Register addr_;
+  Register scratch_;
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
+           object_.code(), addr_.code(), scratch_.code());
+  }
+#endif
+
+  // Minor key encoding in 12 bits. 4 bits for each of the three
+  // registers (object, address and scratch) OOOOAAAASSSS.
+  class ScratchBits : public BitField<uint32_t, 0, 4> {};
+  class AddressBits : public BitField<uint32_t, 4, 4> {};
+  class ObjectBits : public BitField<uint32_t, 8, 4> {};
+
+  Major MajorKey() { return RecordWrite; }
+
+  int MinorKey() {
+    // Encode the registers.
+    return ObjectBits::encode(object_.code()) |
+           AddressBits::encode(addr_.code()) |
+           ScratchBits::encode(scratch_.code());
+  }
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_X64_CODEGEN_X64_H_
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index 261b16c..5470912 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -177,9 +177,24 @@
 }
 
 
+void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+  masm->Abort("LiveEdit frame dropping is not supported on x64");
+}
+
+void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+  masm->Abort("LiveEdit frame dropping is not supported on x64");
+}
+
 #undef __
 
 
+void Debug::SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
+                                   Handle<Code> code) {
+  UNREACHABLE();
+}
+const int Debug::kFrameDropperFrameSize = -1;
+
+
 void BreakLocationIterator::ClearDebugBreakAtReturn() {
   rinfo()->PatchCode(original_rinfo()->pc(),
                      Assembler::kJSReturnSequenceLength);
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 547daee..bd912cd 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -996,19 +996,45 @@
   if (operand_size_ == 0x66) {
     // 0x66 0x0F prefix.
     int mod, regop, rm;
-    get_modrm(*current, &mod, &regop, &rm);
-    const char* mnemonic = "?";
-    if (opcode == 0x57) {
-      mnemonic = "xorpd";
-    } else if (opcode == 0x2E) {
-      mnemonic = "comisd";
-    } else if (opcode == 0x2F) {
-      mnemonic = "ucomisd";
+    if (opcode == 0x3A) {
+      byte third_byte = *current;
+      current = data + 3;
+      if (third_byte == 0x17) {
+        get_modrm(*current, &mod, &regop, &rm);
+        AppendToBuffer("extractps ");  // reg/m32, xmm, imm8
+        current += PrintRightOperand(current);
+        AppendToBuffer(", %s, %d", NameOfCPURegister(regop), (*current) & 3);
+        current += 1;
+      } else {
+        UnimplementedInstruction();
+      }
     } else {
-      UnimplementedInstruction();
+      get_modrm(*current, &mod, &regop, &rm);
+      if (opcode == 0x6E) {
+        AppendToBuffer("mov%c %s,",
+                       rex_w() ? 'q' : 'd',
+                       NameOfXMMRegister(regop));
+        current += PrintRightOperand(current);
+      } else if (opcode == 0x7E) {
+        AppendToBuffer("mov%c %s,",
+                       rex_w() ? 'q' : 'd',
+                       NameOfCPURegister(regop));
+        current += PrintRightXMMOperand(current);
+      } else {
+        const char* mnemonic = "?";
+        if (opcode == 0x57) {
+          mnemonic = "xorpd";
+        } else if (opcode == 0x2E) {
+          mnemonic = "comisd";
+        } else if (opcode == 0x2F) {
+          mnemonic = "ucomisd";
+        } else {
+          UnimplementedInstruction();
+        }
+        AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
+        current += PrintRightXMMOperand(current);
+      }
     }
-    AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
-    current += PrintRightXMMOperand(current);
   } else if (group_1_prefix_ == 0xF2) {
     // Beginning of instructions with prefix 0xF2.
 
@@ -1030,7 +1056,7 @@
       get_modrm(*current, &mod, &regop, &rm);
       AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
       current += PrintRightOperand(current);
-    } else if ((opcode & 0xF8) == 0x58) {
+    } else if ((opcode & 0xF8) == 0x58 || opcode == 0x51) {
       // XMM arithmetic. Mnemonic was retrieved at the start of this function.
       int mod, regop, rm;
       get_modrm(*current, &mod, &regop, &rm);
@@ -1039,13 +1065,21 @@
     } else {
       UnimplementedInstruction();
     }
-  } else if (opcode == 0x2C && group_1_prefix_ == 0xF3) {
-    // Instruction with prefix 0xF3.
-
-    // CVTTSS2SI: Convert scalar single-precision FP to dword integer.
-    // Assert that mod is not 3, so source is memory, not an XMM register.
-    ASSERT_NE(0xC0, *current & 0xC0);
-    current += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, current);
+  } else if (group_1_prefix_ == 0xF3) {
+    // Instructions with prefix 0xF3.
+    if (opcode == 0x2C) {
+      // CVTTSS2SI: Convert scalar single-precision FP to dword integer.
+      // Assert that mod is not 3, so source is memory, not an XMM register.
+      ASSERT_NE(0xC0, *current & 0xC0);
+      current += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, current);
+    } else if (opcode == 0x5A) {
+      int mod, regop, rm;
+      get_modrm(*current, &mod, &regop, &rm);
+      AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop));
+      current += PrintRightXMMOperand(current);
+    } else {
+      UnimplementedInstruction();
+    }
   } else if (opcode == 0x1F) {
     // NOP
     int mod, regop, rm;
@@ -1113,6 +1147,8 @@
       return "cvtsi2sd";
     case 0x31:
       return "rdtsc";
+    case 0x51:  // F2 prefix.
+      return "sqrtsd";
     case 0x58:  // F2 prefix.
       return "addsd";
     case 0x59:  // F2 prefix.
@@ -1273,7 +1309,9 @@
         get_modrm(*(data + 1), &mod, &regop, &rm);
         int32_t imm = *data == 0x6B ? *(data + 2)
             : *reinterpret_cast<int32_t*>(data + 2);
-        AppendToBuffer("imul %s,%s,0x%x", NameOfCPURegister(regop),
+        AppendToBuffer("imul%c %s,%s,0x%x",
+                       operand_size_code(),
+                       NameOfCPURegister(regop),
                        NameOfCPURegister(rm), imm);
         data += 2 + (*data == 0x6B ? 1 : 4);
         break;
diff --git a/src/x64/fast-codegen-x64.cc b/src/x64/fast-codegen-x64.cc
index 4dbf26a..5e76901 100644
--- a/src/x64/fast-codegen-x64.cc
+++ b/src/x64/fast-codegen-x64.cc
@@ -29,6 +29,7 @@
 
 #include "codegen-inl.h"
 #include "fast-codegen.h"
+#include "scopes.h"
 
 namespace v8 {
 namespace internal {
@@ -188,6 +189,7 @@
 void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
   ASSERT(info_ == NULL);
   info_ = compilation_info;
+  Comment cmnt(masm_, "[ function compiled by fast code generator");
 
   // Save the caller's frame pointer and set up our own.
   Comment prologue_cmnt(masm(), ";; Prologue");
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 30db660..a34a94e 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -32,6 +32,7 @@
 #include "debug.h"
 #include "full-codegen.h"
 #include "parser.h"
+#include "scopes.h"
 
 namespace v8 {
 namespace internal {
@@ -55,6 +56,7 @@
   ASSERT(info_ == NULL);
   info_ = info;
   SetFunctionPosition(function());
+  Comment cmnt(masm_, "[ function compiled by full code generator");
 
   if (mode == PRIMARY) {
     __ push(rbp);  // Caller's frame pointer.
@@ -778,16 +780,15 @@
 void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
   Comment cmnt(masm_, "[ FunctionLiteral");
 
-  // Build the function boilerplate and instantiate it.
-  Handle<JSFunction> boilerplate =
-      Compiler::BuildBoilerplate(expr, script(), this);
+  // Build the shared function info and instantiate the function based
+  // on it.
+  Handle<SharedFunctionInfo> function_info =
+      Compiler::BuildFunctionInfo(expr, script(), this);
   if (HasStackOverflow()) return;
 
-  ASSERT(boilerplate->IsBoilerplate());
-
   // Create a new closure.
   __ push(rsi);
-  __ Push(boilerplate);
+  __ Push(function_info);
   __ CallRuntime(Runtime::kNewClosure, 2);
   Apply(context_, rax);
 }
@@ -901,10 +902,11 @@
   __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
   __ Push(Smi::FromInt(expr->literal_index()));
   __ Push(expr->constant_properties());
+  __ Push(Smi::FromInt(expr->fast_elements() ? 1 : 0));
   if (expr->depth() > 1) {
-    __ CallRuntime(Runtime::kCreateObjectLiteral, 3);
+    __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
   } else {
-    __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
+    __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
   }
 
   // If result_saved is true the result is on top of the stack.  If
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 8d43332..88fcfd1 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -72,11 +72,10 @@
   // Check for the absence of an interceptor.
   // Load the map into r0.
   __ movq(r0, FieldOperand(r1, JSObject::kMapOffset));
-  // Test the has_named_interceptor bit in the map.
-  __ testl(FieldOperand(r0, Map::kInstanceAttributesOffset),
-          Immediate(1 << (Map::kHasNamedInterceptor + (3 * 8))));
 
-  // Jump to miss if the interceptor bit is set.
+  // Bail out if the receiver has a named interceptor.
+  __ testl(FieldOperand(r0, Map::kBitFieldOffset),
+           Immediate(1 << Map::kHasNamedInterceptor));
   __ j(not_zero, miss_label);
 
   // Bail out if we have a JS global proxy object.
@@ -151,6 +150,103 @@
 }
 
 
+static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
+                                         Label* miss,
+                                         Register elements,
+                                         Register key,
+                                         Register r0,
+                                         Register r1,
+                                         Register r2) {
+  // Register use:
+  //
+  // elements - holds the slow-case elements of the receiver and is unchanged.
+  //
+  // key      - holds the smi key on entry and is unchanged if a branch is
+  //            performed to the miss label.
+  //
+  // Scratch registers:
+  //
+  // r0 - holds the untagged key on entry and holds the hash once computed.
+  //      Holds the result on exit if the load succeeded.
+  //
+  // r1 - used to hold the capacity mask of the dictionary
+  //
+  // r2 - used for the index into the dictionary.
+  Label done;
+
+  // Compute the hash code from the untagged key.  This must be kept in sync
+  // with ComputeIntegerHash in utils.h.
+  //
+  // hash = ~hash + (hash << 15);
+  __ movl(r1, r0);
+  __ notl(r0);
+  __ shll(r1, Immediate(15));
+  __ addl(r0, r1);
+  // hash = hash ^ (hash >> 12);
+  __ movl(r1, r0);
+  __ shrl(r1, Immediate(12));
+  __ xorl(r0, r1);
+  // hash = hash + (hash << 2);
+  __ leal(r0, Operand(r0, r0, times_4, 0));
+  // hash = hash ^ (hash >> 4);
+  __ movl(r1, r0);
+  __ shrl(r1, Immediate(4));
+  __ xorl(r0, r1);
+  // hash = hash * 2057;
+  __ imull(r0, r0, Immediate(2057));
+  // hash = hash ^ (hash >> 16);
+  __ movl(r1, r0);
+  __ shrl(r1, Immediate(16));
+  __ xorl(r0, r1);
+
+  // Compute capacity mask.
+  __ movq(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset));
+  __ SmiToInteger32(r1, r1);
+  __ decl(r1);
+
+  // Generate an unrolled loop that performs a few probes before giving up.
+  const int kProbes = 4;
+  for (int i = 0; i < kProbes; i++) {
+    // Use r2 for index calculations and keep the hash intact in r0.
+    __ movq(r2, r0);
+    // Compute the masked index: (hash + i + i * i) & mask.
+    if (i > 0) {
+      __ addl(r2, Immediate(NumberDictionary::GetProbeOffset(i)));
+    }
+    __ and_(r2, r1);
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(NumberDictionary::kEntrySize == 3);
+    __ lea(r2, Operand(r2, r2, times_2, 0));  // r2 = r2 * 3
+
+    // Check if the key matches.
+    __ cmpq(key, FieldOperand(elements,
+                              r2,
+                              times_pointer_size,
+                              NumberDictionary::kElementsStartOffset));
+    if (i != (kProbes - 1)) {
+      __ j(equal, &done);
+    } else {
+      __ j(not_equal, miss);
+    }
+  }
+
+  __ bind(&done);
+  // Check that the value is a normal propety.
+  const int kDetailsOffset =
+      NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+  ASSERT_EQ(NORMAL, 0);
+  __ Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
+          Smi::FromInt(PropertyDetails::TypeField::mask()));
+  __ j(not_zero, miss);
+
+  // Get the value at the masked, scaled index.
+  const int kValueOffset =
+      NumberDictionary::kElementsStartOffset + kPointerSize;
+  __ movq(r0, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
+}
+
+
 // One byte opcode for test eax,0xXXXXXXXX.
 static const byte kTestEaxByte = 0xA9;
 
@@ -225,7 +321,8 @@
   __ push(rbx);  // return address
 
   // Perform tail call to the entry.
-  __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedLoadIC_Miss)), 2, 1);
+  ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss));
+  __ TailCallExternalReference(ref, 2, 1);
 }
 
 
@@ -242,7 +339,7 @@
   __ push(rbx);  // return address
 
   // Perform tail call to the entry.
-  __ TailCallRuntime(ExternalReference(Runtime::kKeyedGetProperty), 2, 1);
+  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
 }
 
 
@@ -254,6 +351,7 @@
   // -----------------------------------
   Label slow, check_string, index_int, index_string;
   Label check_pixel_array, probe_dictionary;
+  Label check_number_dictionary;
 
   // Load name and receiver.
   __ movq(rax, Operand(rsp, kPointerSize));
@@ -277,6 +375,9 @@
 
   // Check that the key is a smi.
   __ JumpIfNotSmi(rax, &check_string);
+  // Save key in rbx in case we want it for the number dictionary
+  // case.
+  __ movq(rbx, rax);
   __ SmiToInteger32(rax, rax);
   // Get the elements array of the object.
   __ bind(&index_int);
@@ -304,7 +405,7 @@
   __ bind(&check_pixel_array);
   __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
                  Heap::kPixelArrayMapRootIndex);
-  __ j(not_equal, &slow);
+  __ j(not_equal, &check_number_dictionary);
   __ cmpl(rax, FieldOperand(rcx, PixelArray::kLengthOffset));
   __ j(above_equal, &slow);
   __ movq(rcx, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
@@ -312,6 +413,17 @@
   __ Integer32ToSmi(rax, rax);
   __ ret(0);
 
+  __ bind(&check_number_dictionary);
+  // Check whether the elements is a number dictionary.
+  // rax: untagged index
+  // rbx: key
+  // rcx: elements
+  __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+                 Heap::kHashTableMapRootIndex);
+  __ j(not_equal, &slow);
+  GenerateNumberDictionaryLoad(masm, &slow, rcx, rbx, rax, rdx, rdi);
+  __ ret(0);
+
   // Slow case: Load name and receiver from stack and jump to runtime.
   __ bind(&slow);
   __ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
@@ -411,8 +523,72 @@
   //  -- rsp[8] : name
   //  -- rsp[16] : receiver
   // -----------------------------------
+  Label miss;
+  Label index_not_smi;
+  Label index_out_of_range;
+  Label slow_char_code;
+  Label got_char_code;
 
-  GenerateGeneric(masm);
+  Register receiver = rdx;
+  Register index = rax;
+  Register code = rbx;
+  Register scratch = rcx;
+
+  __ movq(index, Operand(rsp, 1 * kPointerSize));
+  __ movq(receiver, Operand(rsp, 2 * kPointerSize));
+
+  StringHelper::GenerateFastCharCodeAt(masm,
+                                       receiver,
+                                       index,
+                                       scratch,
+                                       code,
+                                       &miss,  // When not a string.
+                                       &index_not_smi,
+                                       &index_out_of_range,
+                                       &slow_char_code);
+  // If we didn't bail out, code register contains smi tagged char
+  // code.
+  __ bind(&got_char_code);
+  StringHelper::GenerateCharFromCode(masm, code, rax, scratch, JUMP_FUNCTION);
+#ifdef DEBUG
+  __ Abort("Unexpected fall-through from char from code tail call");
+#endif
+
+  // Check if key is a heap number.
+  __ bind(&index_not_smi);
+  __ CompareRoot(FieldOperand(index, HeapObject::kMapOffset),
+                 Heap::kHeapNumberMapRootIndex);
+  __ j(not_equal, &miss);
+
+  // Push receiver and key on the stack (now that we know they are a
+  // string and a number), and call runtime.
+  __ bind(&slow_char_code);
+  __ EnterInternalFrame();
+  __ push(receiver);
+  __ push(index);
+  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+  ASSERT(!code.is(rax));
+  __ movq(code, rax);
+  __ LeaveInternalFrame();
+
+  // Check if the runtime call returned NaN char code. If yes, return
+  // undefined. Otherwise, we can continue.
+  if (FLAG_debug_code) {
+    ASSERT(kSmiTag == 0);
+    __ JumpIfSmi(code, &got_char_code);
+    __ CompareRoot(FieldOperand(code, HeapObject::kMapOffset),
+                   Heap::kHeapNumberMapRootIndex);
+    __ Assert(equal, "StringCharCodeAt must return smi or heap number");
+  }
+  __ CompareRoot(code, Heap::kNanValueRootIndex);
+  __ j(not_equal, &got_char_code);
+
+  __ bind(&index_out_of_range);
+  __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+  __ ret(0);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
 }
 
 
@@ -591,7 +767,7 @@
   __ push(rdx);  // return address
 
   // Perform tail call to the entry.
-  __ TailCallRuntime(ExternalReference(
+  __ TailCallExternalReference(ExternalReference(
         IC_Utility(kKeyedLoadPropertyWithInterceptor)), 2, 1);
 
   __ bind(&slow);
@@ -614,7 +790,8 @@
   __ push(rcx);  // return address
 
   // Do tail-call to runtime routine.
-  __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedStoreIC_Miss)), 3, 1);
+  ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss));
+  __ TailCallExternalReference(ref, 3, 1);
 }
 
 
@@ -633,7 +810,7 @@
   __ push(rcx);  // return address
 
   // Do tail-call to runtime routine.
-  __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
+  __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
 }
 
 
@@ -1202,7 +1379,8 @@
   __ push(rbx);  // return address
 
   // Perform tail call to the entry.
-  __ TailCallRuntime(ExternalReference(IC_Utility(kLoadIC_Miss)), 2, 1);
+  ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss));
+  __ TailCallExternalReference(ref, 2, 1);
 }
 
 
@@ -1284,7 +1462,7 @@
 
   // Check for non-global object that requires access check.
   __ testl(FieldOperand(rbx, Map::kBitFieldOffset),
-          Immediate(1 << Map::kIsAccessCheckNeeded));
+           Immediate(1 << Map::kIsAccessCheckNeeded));
   __ j(not_zero, &miss);
 
   // Search the dictionary placing the result in rax.
@@ -1363,7 +1541,8 @@
   __ push(rbx);  // return address
 
   // Perform tail call to the entry.
-  __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1);
+  ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss));
+  __ TailCallExternalReference(ref, 3, 1);
 }
 
 
@@ -1386,6 +1565,56 @@
 }
 
 
+void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rcx    : name
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  //
+  // This accepts as a receiver anything JSObject::SetElementsLength accepts
+  // (currently anything except for external and pixel arrays which means
+  // anything with elements of FixedArray type.), but currently is restricted
+  // to JSArray.
+  // Value must be a number, but only smis are accepted as the most common case.
+
+  Label miss;
+
+  Register receiver = rdx;
+  Register value = rax;
+  Register scratch = rbx;
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, &miss);
+
+  // Check that the object is a JS array.
+  __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
+  __ j(not_equal, &miss);
+
+  // Check that elements are FixedArray.
+  __ movq(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
+  __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
+  __ j(not_equal, &miss);
+
+  // Check that value is a smi.
+  __ JumpIfNotSmi(value, &miss);
+
+  // Prepare tail call to StoreIC_ArrayLength.
+  __ pop(scratch);
+  __ push(receiver);
+  __ push(value);
+  __ push(scratch);  // return address
+
+  ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength));
+  __ TailCallExternalReference(ref, 2, 1);
+
+  __ bind(&miss);
+
+  GenerateMiss(masm);
+}
+
+
 #undef __
 
 
diff --git a/src/x64/jump-target-x64.cc b/src/x64/jump-target-x64.cc
index dd2f6d6..9b08c1f 100644
--- a/src/x64/jump-target-x64.cc
+++ b/src/x64/jump-target-x64.cc
@@ -30,6 +30,7 @@
 #include "codegen-inl.h"
 #include "jump-target-inl.h"
 #include "register-allocator-inl.h"
+#include "virtual-frame-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 90a9c75..a1976ec 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -46,17 +46,17 @@
 
 
 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
-  movq(destination, Operand(r13, index << kPointerSizeLog2));
+  movq(destination, Operand(kRootRegister, index << kPointerSizeLog2));
 }
 
 
 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
-  push(Operand(r13, index << kPointerSizeLog2));
+  push(Operand(kRootRegister, index << kPointerSizeLog2));
 }
 
 
 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
-  cmpq(with, Operand(r13, index << kPointerSizeLog2));
+  cmpq(with, Operand(kRootRegister, index << kPointerSizeLog2));
 }
 
 
@@ -72,37 +72,46 @@
 }
 
 
-static void RecordWriteHelper(MacroAssembler* masm,
-                              Register object,
-                              Register addr,
-                              Register scratch) {
+void MacroAssembler::RecordWriteHelper(Register object,
+                                       Register addr,
+                                       Register scratch) {
+  if (FLAG_debug_code) {
+    // Check that the object is not in new space.
+    Label not_in_new_space;
+    InNewSpace(object, scratch, not_equal, &not_in_new_space);
+    Abort("new-space object passed to RecordWriteHelper");
+    bind(&not_in_new_space);
+  }
+
   Label fast;
 
   // Compute the page start address from the heap object pointer, and reuse
   // the 'object' register for it.
   ASSERT(is_int32(~Page::kPageAlignmentMask));
-  masm->and_(object,
-             Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask)));
+  and_(object,
+       Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask)));
   Register page_start = object;
 
   // Compute the bit addr in the remembered set/index of the pointer in the
   // page. Reuse 'addr' as pointer_offset.
-  masm->subq(addr, page_start);
-  masm->shr(addr, Immediate(kPointerSizeLog2));
+  subq(addr, page_start);
+  shr(addr, Immediate(kPointerSizeLog2));
   Register pointer_offset = addr;
 
   // If the bit offset lies beyond the normal remembered set range, it is in
   // the extra remembered set area of a large object.
-  masm->cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize));
-  masm->j(less, &fast);
+  cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize));
+  j(below, &fast);
+
+  // We have a large object containing pointers. It must be a FixedArray.
 
   // Adjust 'page_start' so that addressing using 'pointer_offset' hits the
   // extra remembered set after the large object.
 
   // Load the array length into 'scratch'.
-  masm->movl(scratch,
-             Operand(page_start,
-                     Page::kObjectStartOffset + FixedArray::kLengthOffset));
+  movl(scratch,
+       Operand(page_start,
+               Page::kObjectStartOffset + FixedArray::kLengthOffset));
   Register array_length = scratch;
 
   // Extra remembered set starts right after the large object (a FixedArray), at
@@ -111,59 +120,17 @@
   // Add the delta between the end of the normal RSet and the start of the
   // extra RSet to 'page_start', so that addressing the bit using
   // 'pointer_offset' hits the extra RSet words.
-  masm->lea(page_start,
-            Operand(page_start, array_length, times_pointer_size,
-                    Page::kObjectStartOffset + FixedArray::kHeaderSize
-                        - Page::kRSetEndOffset));
+  lea(page_start,
+      Operand(page_start, array_length, times_pointer_size,
+              Page::kObjectStartOffset + FixedArray::kHeaderSize
+                  - Page::kRSetEndOffset));
 
   // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
   // to limit code size. We should probably evaluate this decision by
   // measuring the performance of an equivalent implementation using
   // "simpler" instructions
-  masm->bind(&fast);
-  masm->bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
-}
-
-
-class RecordWriteStub : public CodeStub {
- public:
-  RecordWriteStub(Register object, Register addr, Register scratch)
-      : object_(object), addr_(addr), scratch_(scratch) { }
-
-  void Generate(MacroAssembler* masm);
-
- private:
-  Register object_;
-  Register addr_;
-  Register scratch_;
-
-#ifdef DEBUG
-  void Print() {
-    PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
-           object_.code(), addr_.code(), scratch_.code());
-  }
-#endif
-
-  // Minor key encoding in 12 bits of three registers (object, address and
-  // scratch) OOOOAAAASSSS.
-  class ScratchBits : public BitField<uint32_t, 0, 4> {};
-  class AddressBits : public BitField<uint32_t, 4, 4> {};
-  class ObjectBits : public BitField<uint32_t, 8, 4> {};
-
-  Major MajorKey() { return RecordWrite; }
-
-  int MinorKey() {
-    // Encode the registers.
-    return ObjectBits::encode(object_.code()) |
-           AddressBits::encode(addr_.code()) |
-           ScratchBits::encode(scratch_.code());
-  }
-};
-
-
-void RecordWriteStub::Generate(MacroAssembler* masm) {
-  RecordWriteHelper(masm, object_, addr_, scratch_);
-  masm->ret(0);
+  bind(&fast);
+  bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
 }
 
 
@@ -184,7 +151,7 @@
 
   // First, check if a remembered set write is even needed. The tests below
   // catch stores of Smis and stores into young gen (which does not have space
-  // for the remembered set bits.
+  // for the remembered set bits).
   Label done;
   JumpIfSmi(value, &done);
 
@@ -197,9 +164,9 @@
   // avoid having the fast case for smis leave the registers
   // unchanged.
   if (FLAG_debug_code) {
-    movq(object, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
-    movq(value, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
-    movq(smi_index, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
+    movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+    movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+    movq(smi_index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
   }
 }
 
@@ -219,19 +186,24 @@
 
   // Test that the object address is not in the new space.  We cannot
   // set remembered set bits in the new space.
-  movq(scratch, object);
-  ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
-  and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
-  movq(kScratchRegister, ExternalReference::new_space_start());
-  cmpq(scratch, kScratchRegister);
-  j(equal, &done);
+  InNewSpace(object, scratch, equal, &done);
 
-  if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
-    // Compute the bit offset in the remembered set, leave it in 'value'.
+  // The offset is relative to a tagged or untagged HeapObject pointer,
+  // so either offset or offset + kHeapObjectTag must be a
+  // multiple of kPointerSize.
+  ASSERT(IsAligned(offset, kPointerSize) ||
+         IsAligned(offset + kHeapObjectTag, kPointerSize));
+
+  // We use optimized write barrier code if the word being written to is not in
+  // a large object page, or is in the first "page" of a large object page.
+  // We make sure that an offset is inside the right limits whether it is
+  // tagged or untagged.
+  if ((offset > 0) && (offset < Page::kMaxHeapObjectSize - kHeapObjectTag)) {
+    // Compute the bit offset in the remembered set, leave it in 'scratch'.
     lea(scratch, Operand(object, offset));
     ASSERT(is_int32(Page::kPageAlignmentMask));
     and_(scratch, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
-    shr(scratch, Immediate(kObjectAlignmentBits));
+    shr(scratch, Immediate(kPointerSizeLog2));
 
     // Compute the page address from the heap object pointer, leave it in
     // 'object' (immediate value is sign extended).
@@ -250,15 +222,15 @@
       // array access: calculate the destination address in the same manner as
       // KeyedStoreIC::GenerateGeneric.
       SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
-      lea(dst, Operand(object,
-                       index.reg,
-                       index.scale,
-                       FixedArray::kHeaderSize - kHeapObjectTag));
+      lea(dst, FieldOperand(object,
+                            index.reg,
+                            index.scale,
+                            FixedArray::kHeaderSize));
     }
     // If we are already generating a shared stub, not inlining the
     // record write code isn't going to save us any memory.
     if (generating_stub()) {
-      RecordWriteHelper(this, object, dst, scratch);
+      RecordWriteHelper(object, dst, scratch);
     } else {
       RecordWriteStub stub(object, dst, scratch);
       CallStub(&stub);
@@ -270,9 +242,44 @@
   // Clobber all input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (FLAG_debug_code) {
-    movq(object, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
-    movq(scratch, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
-    movq(smi_index, bit_cast<int64_t>(kZapValue), RelocInfo::NONE);
+    movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+    movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+    movq(smi_index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+  }
+}
+
+
+void MacroAssembler::InNewSpace(Register object,
+                                Register scratch,
+                                Condition cc,
+                                Label* branch) {
+  if (Serializer::enabled()) {
+    // Can't do arithmetic on external references if it might get serialized.
+    // The mask isn't really an address.  We load it as an external reference in
+    // case the size of the new space is different between the snapshot maker
+    // and the running system.
+    if (scratch.is(object)) {
+      movq(kScratchRegister, ExternalReference::new_space_mask());
+      and_(scratch, kScratchRegister);
+    } else {
+      movq(scratch, ExternalReference::new_space_mask());
+      and_(scratch, object);
+    }
+    movq(kScratchRegister, ExternalReference::new_space_start());
+    cmpq(scratch, kScratchRegister);
+    j(cc, branch);
+  } else {
+    ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
+    intptr_t new_space_start =
+        reinterpret_cast<intptr_t>(Heap::NewSpaceStart());
+    movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
+    if (scratch.is(object)) {
+      addq(scratch, kScratchRegister);
+    } else {
+      lea(scratch, Operand(object, kScratchRegister, times_1, 0));
+    }
+    and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
+    j(cc, branch);
   }
 }
 
@@ -291,6 +298,21 @@
 }
 
 
+void MacroAssembler::CheckStackAlignment() {
+  int frame_alignment = OS::ActivationFrameAlignment();
+  int frame_alignment_mask = frame_alignment - 1;
+  if (frame_alignment > kPointerSize) {
+    ASSERT(IsPowerOf2(frame_alignment));
+    Label alignment_as_expected;
+    testq(rsp, Immediate(frame_alignment_mask));
+    j(zero, &alignment_as_expected);
+    // Abort if stack is not aligned.
+    int3();
+    bind(&alignment_as_expected);
+  }
+}
+
+
 void MacroAssembler::NegativeZeroTest(Register result,
                                       Register op,
                                       Label* then_label) {
@@ -396,9 +418,9 @@
 }
 
 
-void MacroAssembler::TailCallRuntime(ExternalReference const& ext,
-                                     int num_arguments,
-                                     int result_size) {
+void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
+                                               int num_arguments,
+                                               int result_size) {
   // ----------- S t a t e -------------
   //  -- rsp[0] : return address
   //  -- rsp[8] : argument num_arguments - 1
@@ -411,12 +433,19 @@
   // should remove this need and make the runtime routine entry code
   // smarter.
   movq(rax, Immediate(num_arguments));
-  JumpToRuntime(ext, result_size);
+  JumpToExternalReference(ext, result_size);
 }
 
 
-void MacroAssembler::JumpToRuntime(const ExternalReference& ext,
-                                   int result_size) {
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
+                                     int num_arguments,
+                                     int result_size) {
+  TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
+}
+
+
+void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
+                                             int result_size) {
   // Set the entry point and jump to the C entry runtime stub.
   movq(rbx, ext);
   CEntryStub ces(result_size);
@@ -438,16 +467,28 @@
 
 
 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+  ASSERT(!target.is(rdi));
+
+  // Load the builtins object into target register.
+  movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
+
   // Load the JavaScript builtin function from the builtins object.
-  movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
-  movq(rdi, FieldOperand(rdi, GlobalObject::kBuiltinsOffset));
-  int builtins_offset =
-      JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
-  movq(rdi, FieldOperand(rdi, builtins_offset));
-  // Load the code entry point from the function into the target register.
-  movq(target, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-  movq(target, FieldOperand(target, SharedFunctionInfo::kCodeOffset));
-  addq(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+  movq(rdi, FieldOperand(target, JSBuiltinsObject::OffsetOfFunctionWithId(id)));
+
+  // Load the code entry point from the builtins object.
+  movq(target, FieldOperand(target, JSBuiltinsObject::OffsetOfCodeWithId(id)));
+  if (FLAG_debug_code) {
+    // Make sure the code objects in the builtins object and in the
+    // builtin function are the same.
+    push(target);
+    movq(target, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+    movq(target, FieldOperand(target, SharedFunctionInfo::kCodeOffset));
+    cmpq(target, Operand(rsp, 0));
+    Assert(equal, "Builtin code object changed");
+    pop(target);
+  }
+  lea(target, FieldOperand(target, Code::kHeaderSize));
 }
 
 
@@ -555,6 +596,11 @@
 }
 
 
+void MacroAssembler::SmiCompare(Register  dst, const Operand& src) {
+  cmpq(dst, src);
+}
+
+
 void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
   cmpq(dst, src);
 }
@@ -690,7 +736,17 @@
                             Register src2,
                             Label* on_not_smi_result) {
   ASSERT(!dst.is(src2));
-  if (dst.is(src1)) {
+  if (on_not_smi_result == NULL) {
+    // No overflow checking. Use only when it's known that
+    // overflowing is impossible.
+    if (dst.is(src1)) {
+      addq(dst, src2);
+    } else {
+      movq(dst, src1);
+      addq(dst, src2);
+    }
+    Assert(no_overflow, "Smi addition onverflow");
+  } else if (dst.is(src1)) {
     addq(dst, src2);
     Label smi_result;
     j(no_overflow, &smi_result);
@@ -737,6 +793,35 @@
 }
 
 
+void MacroAssembler::SmiSub(Register dst,
+                            Register src1,
+                            Operand const& src2,
+                            Label* on_not_smi_result) {
+  if (on_not_smi_result == NULL) {
+    // No overflow checking. Use only when it's known that
+    // overflowing is impossible (e.g., subtracting two positive smis).
+    if (dst.is(src1)) {
+      subq(dst, src2);
+    } else {
+      movq(dst, src1);
+      subq(dst, src2);
+    }
+    Assert(no_overflow, "Smi substraction onverflow");
+  } else if (dst.is(src1)) {
+    subq(dst, src2);
+    Label smi_result;
+    j(no_overflow, &smi_result);
+    // Restore src1.
+    addq(src1, src2);
+    jmp(on_not_smi_result);
+    bind(&smi_result);
+  } else {
+    movq(dst, src1);
+    subq(dst, src2);
+    j(overflow, on_not_smi_result);
+  }
+}
+
 void MacroAssembler::SmiMul(Register dst,
                             Register src1,
                             Register src2,
@@ -1393,6 +1478,50 @@
 }
 
 
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
+    Register instance_type,
+    Register scratch,
+    Label *failure) {
+  if (!scratch.is(instance_type)) {
+    movl(scratch, instance_type);
+  }
+
+  const int kFlatAsciiStringMask =
+      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+
+  andl(scratch, Immediate(kFlatAsciiStringMask));
+  cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
+  j(not_equal, failure);
+}
+
+
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
+    Register first_object_instance_type,
+    Register second_object_instance_type,
+    Register scratch1,
+    Register scratch2,
+    Label* on_fail) {
+  // Load instance type for both strings.
+  movq(scratch1, first_object_instance_type);
+  movq(scratch2, second_object_instance_type);
+
+  // Check that both are flat ascii strings.
+  ASSERT(kNotStringTag != 0);
+  const int kFlatAsciiStringMask =
+      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+
+  andl(scratch1, Immediate(kFlatAsciiStringMask));
+  andl(scratch2, Immediate(kFlatAsciiStringMask));
+  // Interleave the bits to check both scratch1 and scratch2 in one test.
+  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+  cmpl(scratch1,
+       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+  j(not_equal, on_fail);
+}
+
+
 void MacroAssembler::Move(Register dst, Handle<Object> source) {
   ASSERT(!source->IsFailure());
   if (source->IsSmi()) {
@@ -1609,6 +1738,15 @@
 }
 
 
+void MacroAssembler::AbortIfNotSmi(Register object, const char* msg) {
+  Label ok;
+  Condition is_smi = CheckSmi(object);
+  j(is_smi, &ok);
+  Assert(equal, msg);
+  bind(&ok);
+}
+
+
 Condition MacroAssembler::IsObjectStringType(Register heap_object,
                                              Register map,
                                              Register instance_type) {
@@ -2075,6 +2213,7 @@
                                    JSObject* holder,
                                    Register holder_reg,
                                    Register scratch,
+                                   int save_at_depth,
                                    Label* miss) {
   // Make sure there's no overlap between scratch and the other
   // registers.
@@ -2084,7 +2223,11 @@
   // iteration, reg is an alias for object_reg, on later iterations,
   // it is an alias for holder_reg.
   Register reg = object_reg;
-  int depth = 1;
+  int depth = 0;
+
+  if (save_at_depth == depth) {
+    movq(Operand(rsp, kPointerSize), object_reg);
+  }
 
   // Check the maps in the prototype chain.
   // Traverse the prototype chain from the object and do map checks.
@@ -2134,6 +2277,10 @@
       Move(reg, Handle<JSObject>(prototype));
     }
 
+    if (save_at_depth == depth) {
+      movq(Operand(rsp, kPointerSize), reg);
+    }
+
     // Go to the next object in the prototype chain.
     object = prototype;
   }
@@ -2143,7 +2290,7 @@
   j(not_equal, miss);
 
   // Log the check depth.
-  LOG(IntEvent("check-maps-depth", depth));
+  LOG(IntEvent("check-maps-depth", depth + 1));
 
   // Perform security check for access to the global object and return
   // the holder register.
@@ -2229,7 +2376,7 @@
   // Just return if allocation top is already known.
   if ((flags & RESULT_CONTAINS_TOP) != 0) {
     // No use of scratch if allocation top is provided.
-    ASSERT(scratch.is(no_reg));
+    ASSERT(!scratch.is_valid());
 #ifdef DEBUG
     // Assert that result actually contains top on entry.
     movq(kScratchRegister, new_space_allocation_top);
@@ -2239,14 +2386,17 @@
     return;
   }
 
-  // Move address of new object to result. Use scratch register if available.
-  if (scratch.is(no_reg)) {
-    movq(kScratchRegister, new_space_allocation_top);
-    movq(result, Operand(kScratchRegister, 0));
-  } else {
+  // Move address of new object to result. Use scratch register if available,
+  // and keep address in scratch until call to UpdateAllocationTopHelper.
+  if (scratch.is_valid()) {
     ASSERT(!scratch.is(result_end));
     movq(scratch, new_space_allocation_top);
     movq(result, Operand(scratch, 0));
+  } else if (result.is(rax)) {
+    load_rax(new_space_allocation_top);
+  } else {
+    movq(kScratchRegister, new_space_allocation_top);
+    movq(result, Operand(kScratchRegister, 0));
   }
 }
 
@@ -2267,11 +2417,11 @@
     store_rax(new_space_allocation_top);
   } else {
     // Register required - use scratch provided if available.
-    if (scratch.is(no_reg)) {
+    if (scratch.is_valid()) {
+      movq(Operand(scratch, 0), result_end);
+    } else {
       movq(kScratchRegister, new_space_allocation_top);
       movq(Operand(kScratchRegister, 0), result_end);
-    } else {
-      movq(Operand(scratch, 0), result_end);
     }
   }
 }
@@ -2291,16 +2441,29 @@
   // Calculate new top and bail out if new space is exhausted.
   ExternalReference new_space_allocation_limit =
       ExternalReference::new_space_allocation_limit_address();
-  lea(result_end, Operand(result, object_size));
+
+  Register top_reg = result_end.is_valid() ? result_end : result;
+
+  if (top_reg.is(result)) {
+    addq(top_reg, Immediate(object_size));
+  } else {
+    lea(top_reg, Operand(result, object_size));
+  }
   movq(kScratchRegister, new_space_allocation_limit);
-  cmpq(result_end, Operand(kScratchRegister, 0));
+  cmpq(top_reg, Operand(kScratchRegister, 0));
   j(above, gc_required);
 
   // Update allocation top.
-  UpdateAllocationTopHelper(result_end, scratch);
+  UpdateAllocationTopHelper(top_reg, scratch);
 
-  // Tag the result if requested.
-  if ((flags & TAG_OBJECT) != 0) {
+  if (top_reg.is(result)) {
+    if ((flags & TAG_OBJECT) != 0) {
+      subq(result, Immediate(object_size - kHeapObjectTag));
+    } else {
+      subq(result, Immediate(object_size));
+    }
+  } else if ((flags & TAG_OBJECT) != 0) {
+    // Tag the result if requested.
     addq(result, Immediate(kHeapObjectTag));
   }
 }
@@ -2407,11 +2570,16 @@
                                            Label* gc_required) {
   // Calculate the number of bytes needed for the characters in the string while
   // observing object alignment.
-  ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+  const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
+                               kObjectAlignmentMask;
   ASSERT(kShortSize == 2);
   // scratch1 = length * 2 + kObjectAlignmentMask.
-  lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
+  lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
+                kHeaderAlignment));
   and_(scratch1, Immediate(~kObjectAlignmentMask));
+  if (kHeaderAlignment > 0) {
+    subq(scratch1, Immediate(kHeaderAlignment));
+  }
 
   // Allocate two byte string in new space.
   AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
@@ -2426,7 +2594,8 @@
   // Set the map, length and hash field.
   LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
   movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-  movl(FieldOperand(result, String::kLengthOffset), length);
+  Integer32ToSmi(scratch1, length);
+  movq(FieldOperand(result, String::kLengthOffset), scratch1);
   movl(FieldOperand(result, String::kHashFieldOffset),
        Immediate(String::kEmptyHashField));
 }
@@ -2440,11 +2609,15 @@
                                          Label* gc_required) {
   // Calculate the number of bytes needed for the characters in the string while
   // observing object alignment.
-  ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+  const int kHeaderAlignment = SeqAsciiString::kHeaderSize &
+                               kObjectAlignmentMask;
   movl(scratch1, length);
   ASSERT(kCharSize == 1);
-  addq(scratch1, Immediate(kObjectAlignmentMask));
+  addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
   and_(scratch1, Immediate(~kObjectAlignmentMask));
+  if (kHeaderAlignment > 0) {
+    subq(scratch1, Immediate(kHeaderAlignment));
+  }
 
   // Allocate ascii string in new space.
   AllocateInNewSpace(SeqAsciiString::kHeaderSize,
@@ -2459,7 +2632,8 @@
   // Set the map, length and hash field.
   LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
   movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-  movl(FieldOperand(result, String::kLengthOffset), length);
+  Integer32ToSmi(scratch1, length);
+  movq(FieldOperand(result, String::kLengthOffset), scratch1);
   movl(FieldOperand(result, String::kHashFieldOffset),
        Immediate(String::kEmptyHashField));
 }
@@ -2556,6 +2730,11 @@
 
 
 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
+  // Check stack alignment.
+  if (FLAG_debug_code) {
+    CheckStackAlignment();
+  }
+
   call(function);
   ASSERT(OS::ActivationFrameAlignment() != 0);
   ASSERT(num_arguments >= 0);
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 2673086..32e1f49 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -37,6 +37,7 @@
 // a spare register). The register isn't callee save, and not used by the
 // function calling convention.
 static const Register kScratchRegister = { 10 };  // r10.
+static const Register kRootRegister = { 13 };     // r13
 
 // Convenience for platform-independent signatures.
 typedef Operand MemOperand;
@@ -65,6 +66,21 @@
   // ---------------------------------------------------------------------------
   // GC Support
 
+  // Set the remebered set bit for an address which points into an
+  // object. RecordWriteHelper only works if the object is not in new
+  // space.
+  void RecordWriteHelper(Register object,
+                         Register addr,
+                         Register scratch);
+
+  // Check if object is in new space. The condition cc can be equal or
+  // not_equal. If it is equal a jump will be done if the object is on new
+  // space. The register scratch can be object itself, but it will be clobbered.
+  void InNewSpace(Register object,
+                  Register scratch,
+                  Condition cc,
+                  Label* branch);
+
   // Set the remembered set bit for [object+offset].
   // object is the object being stored into, value is the object being stored.
   // If offset is zero, then the scratch register contains the array index into
@@ -86,7 +102,6 @@
                          Register value,
                          Register scratch);
 
-
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // ---------------------------------------------------------------------------
   // Debugger Support
@@ -196,6 +211,7 @@
   // Simple comparison of smis.
   void SmiCompare(Register dst, Register src);
   void SmiCompare(Register dst, Smi* src);
+  void SmiCompare(Register dst, const Operand& src);
   void SmiCompare(const Operand& dst, Register src);
   void SmiCompare(const Operand& dst, Smi* src);
   // Sets sign and zero flags depending on value of smi in register.
@@ -286,7 +302,8 @@
                       Label* on_not_smi_result);
 
   // Subtract an integer constant from a tagged smi, giving a tagged smi as
-  // result. No testing on the result is done.
+  // result. No testing on the result is done. Sets the N and Z flags
+  // based on the value of the resulting integer.
   void SmiSubConstant(Register dst, Register src, Smi* constant);
 
   // Subtract an integer constant from a tagged smi, giving a tagged smi as
@@ -318,6 +335,11 @@
               Register src2,
               Label* on_not_smi_result);
 
+  void SmiSub(Register dst,
+              Register src1,
+              Operand const& src2,
+              Label* on_not_smi_result);
+
   // Multiplies smi values and return the result as a smi,
   // if possible.
   // If dst is src1, then src1 will be destroyed, even if
@@ -426,6 +448,20 @@
                                            Register scratch2,
                                            Label* on_not_both_flat_ascii);
 
+  // Check whether the instance type represents a flat ascii string. Jump to the
+  // label if not. If the instance type can be scratched specify same register
+  // for both instance type and scratch.
+  void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,
+                                              Register scratch,
+                                              Label *on_not_flat_ascii_string);
+
+  void JumpIfBothInstanceTypesAreNotSequentialAscii(
+      Register first_object_instance_type,
+      Register second_object_instance_type,
+      Register scratch1,
+      Register scratch2,
+      Label* on_fail);
+
   // ---------------------------------------------------------------------------
   // Macro instructions.
 
@@ -489,6 +525,9 @@
   // Abort execution if argument is not a number. Used in debug code.
   void AbortIfNotNumber(Register object, const char* msg);
 
+  // Abort execution if argument is not a smi. Used in debug code.
+  void AbortIfNotSmi(Register object, const char* msg);
+
   // ---------------------------------------------------------------------------
   // Exception handling
 
@@ -511,9 +550,14 @@
   // clobbered if it the same as the holder register. The function
   // returns a register containing the holder - either object_reg or
   // holder_reg.
+  // The function can optionally (when save_at_depth !=
+  // kInvalidProtoDepth) save the object at the given depth by moving
+  // it to [rsp + kPointerSize].
   Register CheckMaps(JSObject* object, Register object_reg,
                      JSObject* holder, Register holder_reg,
-                     Register scratch, Label* miss);
+                     Register scratch,
+                     int save_at_depth,
+                     Label* miss);
 
   // Generate code for checking access rights - used for security checks
   // on access to global objects across environments. The holder register
@@ -645,7 +689,6 @@
   void StubReturn(int argc);
 
   // Call a runtime routine.
-  // Eventually this should be used for all C calls.
   void CallRuntime(Runtime::Function* f, int num_arguments);
 
   // Convenience function: Same as above, but takes the fid instead.
@@ -656,14 +699,19 @@
                              int num_arguments);
 
   // Tail call of a runtime routine (jump).
-  // Like JumpToRuntime, but also takes care of passing the number
-  // of arguments.
-  void TailCallRuntime(const ExternalReference& ext,
+  // Like JumpToExternalReference, but also takes care of passing the number
+  // of parameters.
+  void TailCallExternalReference(const ExternalReference& ext,
+                                 int num_arguments,
+                                 int result_size);
+
+  // Convenience function: tail call a runtime routine (jump).
+  void TailCallRuntime(Runtime::FunctionId fid,
                        int num_arguments,
                        int result_size);
 
   // Jump to a runtime routine.
-  void JumpToRuntime(const ExternalReference& ext, int result_size);
+  void JumpToExternalReference(const ExternalReference& ext, int result_size);
 
   // Before calling a C-function from generated code, align arguments on stack.
   // After aligning the frame, arguments must be stored in esp[0], esp[4],
@@ -715,6 +763,9 @@
   // Print a message to stdout and abort execution.
   void Abort(const char* msg);
 
+  // Check that the stack is aligned.
+  void CheckStackAlignment();
+
   // Verify restrictions about code generated in stubs.
   void set_generating_stub(bool value) { generating_stub_ = value; }
   bool generating_stub() { return generating_stub_; }
@@ -740,10 +791,17 @@
   void LeaveFrame(StackFrame::Type type);
 
   // Allocation support helpers.
+  // Loads the top of new-space into the result register.
+  // If flags contains RESULT_CONTAINS_TOP then result_end is valid and
+  // already contains the top of new-space, and scratch is invalid.
+  // Otherwise the address of the new-space top is loaded into scratch (if
+  // scratch is valid), and the new-space top is loaded into result.
   void LoadAllocationTopHelper(Register result,
                                Register result_end,
                                Register scratch,
                                AllocationFlags flags);
+  // Update allocation top with value in result_end register.
+  // If scratch is valid, it contains the address of the allocation top.
   void UpdateAllocationTopHelper(Register result_end, Register scratch);
 };
 
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 026301b..50b4120 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -39,7 +39,7 @@
 namespace v8 {
 namespace internal {
 
-#ifdef V8_NATIVE_REGEXP
+#ifndef V8_INTERPRETED_REGEXP
 
 /*
  * This assembler uses the following register assignment convention
@@ -335,7 +335,7 @@
 #endif
     __ push(backtrack_stackpointer());
 
-    int num_arguments = 3;
+    static const int num_arguments = 3;
     __ PrepareCallCFunction(num_arguments);
 
     // Put arguments into parameter registers. Parameters are
@@ -711,9 +711,15 @@
   __ movq(rdi, Operand(rbp, kInputStart));
   // Set up rdi to be negative offset from string end.
   __ subq(rdi, rsi);
-  // Set rax to address of char before start of input
+  // Set rax to address of char before start of the string
   // (effectively string position -1).
-  __ lea(rax, Operand(rdi, -char_size()));
+  __ movq(rbx, Operand(rbp, kStartIndex));
+  __ neg(rbx);
+  if (mode_ == UC16) {
+    __ lea(rax, Operand(rdi, rbx, times_2, -char_size()));
+  } else {
+    __ lea(rax, Operand(rdi, rbx, times_1, -char_size()));
+  }
   // Store this value in a local variable, for use when clearing
   // position registers.
   __ movq(Operand(rbp, kInputStartMinusOne), rax);
@@ -770,9 +776,15 @@
     __ bind(&success_label_);
     if (num_saved_registers_ > 0) {
       // copy captures to output
+      __ movq(rdx, Operand(rbp, kStartIndex));
       __ movq(rbx, Operand(rbp, kRegisterOutput));
       __ movq(rcx, Operand(rbp, kInputEnd));
       __ subq(rcx, Operand(rbp, kInputStart));
+      if (mode_ == UC16) {
+        __ lea(rcx, Operand(rcx, rdx, times_2, 0));
+      } else {
+        __ addq(rcx, rdx);
+      }
       for (int i = 0; i < num_saved_registers_; i++) {
         __ movq(rax, register_location(i));
         __ addq(rax, rcx);  // Convert to index from start, not end.
@@ -849,7 +861,7 @@
 #endif
 
     // Call GrowStack(backtrack_stackpointer())
-    int num_arguments = 2;
+    static const int num_arguments = 2;
     __ PrepareCallCFunction(num_arguments);
 #ifdef _WIN64
     // Microsoft passes parameters in rcx, rdx.
@@ -893,7 +905,7 @@
                                        NULL,
                                        Code::ComputeFlags(Code::REGEXP),
                                        masm_->CodeObject());
-  LOG(RegExpCodeCreateEvent(*code, *source));
+  PROFILE(RegExpCodeCreateEvent(*code, *source));
   return Handle<Object>::cast(code);
 }
 
@@ -1029,7 +1041,7 @@
 void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
   // This function call preserves no register values. Caller should
   // store anything volatile in a C call or overwritten by this function.
-  int num_arguments = 3;
+  static const int num_arguments = 3;
   __ PrepareCallCFunction(num_arguments);
 #ifdef _WIN64
   // Second argument: Code* of self. (Do this before overwriting r8).
@@ -1298,6 +1310,6 @@
 
 #undef __
 
-#endif  // V8_NATIVE_REGEXP
+#endif  // V8_INTERPRETED_REGEXP
 
 }}  // namespace v8::internal
diff --git a/src/x64/regexp-macro-assembler-x64.h b/src/x64/regexp-macro-assembler-x64.h
index 6d13963..4903269 100644
--- a/src/x64/regexp-macro-assembler-x64.h
+++ b/src/x64/regexp-macro-assembler-x64.h
@@ -31,7 +31,7 @@
 namespace v8 {
 namespace internal {
 
-#ifdef V8_NATIVE_REGEXP
+#ifndef V8_INTERPRETED_REGEXP
 
 class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
  public:
@@ -271,7 +271,7 @@
   Label stack_overflow_label_;
 };
 
-#endif  // V8_NATIVE_REGEXP
+#endif  // V8_INTERPRETED_REGEXP
 
 }}  // namespace v8::internal
 
diff --git a/src/x64/register-allocator-x64-inl.h b/src/x64/register-allocator-x64-inl.h
index d630b33..c7c18b3 100644
--- a/src/x64/register-allocator-x64-inl.h
+++ b/src/x64/register-allocator-x64-inl.h
@@ -38,7 +38,7 @@
 
 bool RegisterAllocator::IsReserved(Register reg) {
   return reg.is(rsp) || reg.is(rbp) || reg.is(rsi) ||
-      reg.is(kScratchRegister);
+      reg.is(kScratchRegister) || reg.is(kRootRegister);
 }
 
 
diff --git a/src/x64/register-allocator-x64.cc b/src/x64/register-allocator-x64.cc
index deb2318..cf29593 100644
--- a/src/x64/register-allocator-x64.cc
+++ b/src/x64/register-allocator-x64.cc
@@ -29,6 +29,7 @@
 
 #include "codegen-inl.h"
 #include "register-allocator-inl.h"
+#include "virtual-frame-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -43,6 +44,7 @@
     ASSERT(fresh.is_valid());
     CodeGeneratorScope::Current()->masm()->Move(fresh.reg(), handle());
     // This result becomes a copy of the fresh one.
+    fresh.set_type_info(type_info());
     *this = fresh;
   }
   ASSERT(is_register());
@@ -60,6 +62,7 @@
       ASSERT(is_constant());
       CodeGeneratorScope::Current()->masm()->Move(fresh.reg(), handle());
     }
+    fresh.set_type_info(type_info());
     *this = fresh;
   } else if (is_register() && reg().is(target)) {
     ASSERT(CodeGeneratorScope::Current()->has_valid_frame());
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 9c8b4f7..7d4410c 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -138,14 +138,13 @@
                                      Register holder,
                                      Register name,
                                      JSObject* holder_obj) {
-  __ push(receiver);
-  __ push(holder);
   __ push(name);
   InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
   ASSERT(!Heap::InNewSpace(interceptor));
-  __ movq(kScratchRegister, Handle<Object>(interceptor),
-          RelocInfo::EMBEDDED_OBJECT);
+  __ Move(kScratchRegister, Handle<Object>(interceptor));
   __ push(kScratchRegister);
+  __ push(receiver);
+  __ push(holder);
   __ push(FieldOperand(kScratchRegister, InterceptorInfo::kDataOffset));
 }
 
@@ -236,7 +235,7 @@
     __ Push(Handle<Map>(transition));
     __ push(rax);
     __ push(scratch);
-    __ TailCallRuntime(
+    __ TailCallExternalReference(
         ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage)), 3, 1);
     return;
   }
@@ -328,8 +327,7 @@
   GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
 
   // Load length directly from the string.
-  __ movl(rax, FieldOperand(receiver, String::kLengthOffset));
-  __ Integer32ToSmi(rax, rax);
+  __ movq(rax, FieldOperand(receiver, String::kLengthOffset));
   __ ret(0);
 
   // Check if the object is a JSValue wrapper.
@@ -341,8 +339,7 @@
   // directly if it is.
   __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
   GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
-  __ movl(rax, FieldOperand(scratch2, String::kLengthOffset));
-  __ Integer32ToSmi(rax, rax);
+  __ movq(rax, FieldOperand(scratch2, String::kLengthOffset));
   __ ret(0);
 }
 
@@ -526,7 +523,7 @@
 
       ExternalReference ref =
           ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
-      __ TailCallRuntime(ref, 5, 1);
+      __ TailCallExternalReference(ref, 5, 1);
 
       __ bind(&cleanup);
       __ pop(scratch1);
@@ -548,7 +545,7 @@
 
     ExternalReference ref = ExternalReference(
         IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
-    __ TailCallRuntime(ref, 5, 1);
+    __ TailCallExternalReference(ref, 5, 1);
   }
 
  private:
@@ -556,41 +553,258 @@
 };
 
 
+// Reserves space for the extra arguments to FastHandleApiCall in the
+// caller's frame.
+//
+// These arguments are set by CheckPrototypes and GenerateFastApiCall.
+static void ReserveSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
+  // ----------- S t a t e -------------
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : last argument in the internal frame of the caller
+  // -----------------------------------
+  __ movq(scratch, Operand(rsp, 0));
+  __ subq(rsp, Immediate(4 * kPointerSize));
+  __ movq(Operand(rsp, 0), scratch);
+  __ Move(scratch, Smi::FromInt(0));
+  __ movq(Operand(rsp, 1 * kPointerSize), scratch);
+  __ movq(Operand(rsp, 2 * kPointerSize), scratch);
+  __ movq(Operand(rsp, 3 * kPointerSize), scratch);
+  __ movq(Operand(rsp, 4 * kPointerSize), scratch);
+}
+
+
+// Undoes the effects of ReserveSpaceForFastApiCall.
+static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
+  // ----------- S t a t e -------------
+  //  -- rsp[0]  : return address
+  //  -- rsp[8]  : last fast api call extra argument
+  //  -- ...
+  //  -- rsp[32] : first fast api call extra argument
+  //  -- rsp[40] : last argument in the internal frame
+  // -----------------------------------
+  __ movq(scratch, Operand(rsp, 0));
+  __ movq(Operand(rsp, 4 * kPointerSize), scratch);
+  __ addq(rsp, Immediate(kPointerSize * 4));
+}
+
+
+// Generates call to FastHandleApiCall builtin.
+static void GenerateFastApiCall(MacroAssembler* masm,
+                                const CallOptimization& optimization,
+                                int argc) {
+  // ----------- S t a t e -------------
+  //  -- rsp[0]              : return address
+  //  -- rsp[8]              : object passing the type check
+  //                           (last fast api call extra argument,
+  //                            set by CheckPrototypes)
+  //  -- rsp[16]             : api call data
+  //  -- rsp[24]             : api callback
+  //  -- rsp[32]             : api function
+  //                           (first fast api call extra argument)
+  //  -- rsp[40]             : last argument
+  //  -- ...
+  //  -- rsp[(argc + 5) * 8] : first argument
+  //  -- rsp[(argc + 6) * 8] : receiver
+  // -----------------------------------
+
+  // Get the function and setup the context.
+  JSFunction* function = optimization.constant_function();
+  __ Move(rdi, Handle<JSFunction>(function));
+  __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+  // Pass the additional arguments FastHandleApiCall expects.
+  __ movq(Operand(rsp, 4 * kPointerSize), rdi);
+  bool info_loaded = false;
+  Object* callback = optimization.api_call_info()->callback();
+  if (Heap::InNewSpace(callback)) {
+    info_loaded = true;
+    __ Move(rcx, Handle<CallHandlerInfo>(optimization.api_call_info()));
+    __ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kCallbackOffset));
+    __ movq(Operand(rsp, 3 * kPointerSize), rbx);
+  } else {
+    __ Move(Operand(rsp, 3 * kPointerSize), Handle<Object>(callback));
+  }
+  Object* call_data = optimization.api_call_info()->data();
+  if (Heap::InNewSpace(call_data)) {
+    if (!info_loaded) {
+      __ Move(rcx, Handle<CallHandlerInfo>(optimization.api_call_info()));
+    }
+    __ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
+    __ movq(Operand(rsp, 2 * kPointerSize), rbx);
+  } else {
+    __ Move(Operand(rsp, 2 * kPointerSize), Handle<Object>(call_data));
+  }
+
+  // Set the number of arguments.
+  __ movq(rax, Immediate(argc + 4));
+
+  // Jump to the fast api call builtin (tail call).
+  Handle<Code> code = Handle<Code>(
+      Builtins::builtin(Builtins::FastHandleApiCall));
+  ParameterCount expected(0);
+  __ InvokeCode(code, expected, expected,
+                RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+}
+
+
 class CallInterceptorCompiler BASE_EMBEDDED {
  public:
-  CallInterceptorCompiler(const ParameterCount& arguments, Register name)
-      : arguments_(arguments), name_(name) {}
+  CallInterceptorCompiler(StubCompiler* stub_compiler,
+                          const ParameterCount& arguments,
+                          Register name)
+      : stub_compiler_(stub_compiler),
+        arguments_(arguments),
+        name_(name) {}
 
+  void Compile(MacroAssembler* masm,
+               JSObject* object,
+               JSObject* holder,
+               String* name,
+               LookupResult* lookup,
+               Register receiver,
+               Register scratch1,
+               Register scratch2,
+               Label* miss) {
+    ASSERT(holder->HasNamedInterceptor());
+    ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+    // Check that the receiver isn't a smi.
+    __ JumpIfSmi(receiver, miss);
+
+    CallOptimization optimization(lookup);
+
+    if (optimization.is_constant_call()) {
+      CompileCacheable(masm,
+                       object,
+                       receiver,
+                       scratch1,
+                       scratch2,
+                       holder,
+                       lookup,
+                       name,
+                       optimization,
+                       miss);
+    } else {
+      CompileRegular(masm,
+                     object,
+                     receiver,
+                     scratch1,
+                     scratch2,
+                     name,
+                     holder,
+                     miss);
+    }
+  }
+
+ private:
   void CompileCacheable(MacroAssembler* masm,
-                        StubCompiler* stub_compiler,
+                        JSObject* object,
                         Register receiver,
-                        Register holder,
                         Register scratch1,
                         Register scratch2,
                         JSObject* holder_obj,
                         LookupResult* lookup,
                         String* name,
+                        const CallOptimization& optimization,
                         Label* miss_label) {
-    JSFunction* function = 0;
-    bool optimize = false;
-    // So far the most popular case for failed interceptor is
-    // CONSTANT_FUNCTION sitting below.
-    if (lookup->type() == CONSTANT_FUNCTION) {
-      function = lookup->GetConstantFunction();
-      // JSArray holder is a special case for call constant function
-      // (see the corresponding code).
-      if (function->is_compiled() && !holder_obj->IsJSArray()) {
-        optimize = true;
-      }
-    }
-
-    if (!optimize) {
-      CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
-      return;
-    }
-
+    ASSERT(optimization.is_constant_call());
     ASSERT(!lookup->holder()->IsGlobalObject());
 
+    int depth1 = kInvalidProtoDepth;
+    int depth2 = kInvalidProtoDepth;
+    bool can_do_fast_api_call = false;
+    if (optimization.is_simple_api_call() &&
+        !lookup->holder()->IsGlobalObject()) {
+      depth1 = optimization.GetPrototypeDepthOfExpectedType(object, holder_obj);
+      if (depth1 == kInvalidProtoDepth) {
+        depth2 = optimization.GetPrototypeDepthOfExpectedType(holder_obj,
+                                                              lookup->holder());
+      }
+      can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
+                             (depth2 != kInvalidProtoDepth);
+    }
+
+    __ IncrementCounter(&Counters::call_const_interceptor, 1);
+
+    if (can_do_fast_api_call) {
+      __ IncrementCounter(&Counters::call_const_interceptor_fast_api, 1);
+      ReserveSpaceForFastApiCall(masm, scratch1);
+    }
+
+    Label miss_cleanup;
+    Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
+    Register holder =
+        stub_compiler_->CheckPrototypes(object, receiver, holder_obj,
+                                        scratch1, scratch2, name,
+                                        depth1, miss);
+
+    Label regular_invoke;
+    LoadWithInterceptor(masm, receiver, holder, holder_obj, &regular_invoke);
+
+    // Generate code for the failed interceptor case.
+
+    // Check the lookup is still valid.
+    stub_compiler_->CheckPrototypes(holder_obj, receiver,
+                                    lookup->holder(),
+                                    scratch1, scratch2, name,
+                                    depth2, miss);
+
+    if (can_do_fast_api_call) {
+      GenerateFastApiCall(masm, optimization, arguments_.immediate());
+    } else {
+      __ InvokeFunction(optimization.constant_function(), arguments_,
+                        JUMP_FUNCTION);
+    }
+
+    if (can_do_fast_api_call) {
+      __ bind(&miss_cleanup);
+      FreeSpaceForFastApiCall(masm, scratch1);
+      __ jmp(miss_label);
+    }
+
+    __ bind(&regular_invoke);
+    if (can_do_fast_api_call) {
+      FreeSpaceForFastApiCall(masm, scratch1);
+    }
+  }
+
+  void CompileRegular(MacroAssembler* masm,
+                      JSObject* object,
+                      Register receiver,
+                      Register scratch1,
+                      Register scratch2,
+                      String* name,
+                      JSObject* holder_obj,
+                      Label* miss_label) {
+    Register holder =
+        stub_compiler_->CheckPrototypes(object, receiver, holder_obj,
+                                        scratch1, scratch2, name,
+                                        miss_label);
+
+    __ EnterInternalFrame();
+    // Save the name_ register across the call.
+    __ push(name_);
+
+    PushInterceptorArguments(masm,
+                             receiver,
+                             holder,
+                             name_,
+                             holder_obj);
+
+    __ CallExternalReference(
+        ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall)),
+        5);
+
+    // Restore the name_ register.
+    __ pop(name_);
+    __ LeaveInternalFrame();
+  }
+
+  void LoadWithInterceptor(MacroAssembler* masm,
+                           Register receiver,
+                           Register holder,
+                           JSObject* holder_obj,
+                           Label* interceptor_succeeded) {
     __ EnterInternalFrame();
     __ push(holder);  // Save the holder.
     __ push(name_);  // Save the name.
@@ -606,55 +820,39 @@
     __ LeaveInternalFrame();
 
     __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
-    Label invoke;
-    __ j(not_equal, &invoke);
-
-    stub_compiler->CheckPrototypes(holder_obj, receiver,
-                                   lookup->holder(), scratch1,
-                                   scratch2,
-                                   name,
-                                   miss_label);
-
-    __ InvokeFunction(function, arguments_, JUMP_FUNCTION);
-
-    __ bind(&invoke);
+    __ j(not_equal, interceptor_succeeded);
   }
 
-  void CompileRegular(MacroAssembler* masm,
-                      Register receiver,
-                      Register holder,
-                      Register scratch,
-                      JSObject* holder_obj,
-                      Label* miss_label) {
-    __ EnterInternalFrame();
-    // Save the name_ register across the call.
-    __ push(name_);
-
-    PushInterceptorArguments(masm,
-                             receiver,
-                             holder,
-                             name_,
-                             holder_obj);
-
-    __ CallExternalReference(
-        ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall)),
-        5);
-
-    __ pop(name_);
-    __ LeaveInternalFrame();
-  }
-
- private:
+  StubCompiler* stub_compiler_;
   const ParameterCount& arguments_;
   Register name_;
 };
 
 
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+static Object* GenerateCheckPropertyCell(MacroAssembler* masm,
+                                         GlobalObject* global,
+                                         String* name,
+                                         Register scratch,
+                                         Label* miss) {
+  Object* probe = global->EnsurePropertyCell(name);
+  if (probe->IsFailure()) return probe;
+  JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+  ASSERT(cell->value()->IsTheHole());
+  __ Move(scratch, Handle<Object>(cell));
+  __ Cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
+         Factory::the_hole_value());
+  __ j(not_equal, miss);
+  return cell;
+}
+
+
 #undef __
 
 #define __ ACCESS_MASM((masm()))
 
-
 Object* CallStubCompiler::CompileCallConstant(Object* object,
                                               JSObject* holder,
                                               JSFunction* function,
@@ -670,7 +868,18 @@
   // rsp[(argc + 1) * 8] : argument 0 = receiver
   // -----------------------------------
 
-  Label miss;
+  SharedFunctionInfo* function_info = function->shared();
+  if (function_info->HasCustomCallGenerator()) {
+    CustomCallGenerator generator =
+        ToCData<CustomCallGenerator>(function_info->function_data());
+    Object* result = generator(this, object, holder, function, name, check);
+    // undefined means bail out to regular compiler.
+    if (!result->IsUndefined()) {
+      return result;
+    }
+  }
+
+  Label miss_in_smi_check;
 
   // Get the receiver from the stack.
   const int argc = arguments().immediate();
@@ -678,22 +887,39 @@
 
   // Check that the receiver isn't a smi.
   if (check != NUMBER_CHECK) {
-    __ JumpIfSmi(rdx, &miss);
+    __ JumpIfSmi(rdx, &miss_in_smi_check);
   }
 
   // Make sure that it's okay not to patch the on stack receiver
   // unless we're doing a receiver map check.
   ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
 
+  CallOptimization optimization(function);
+  int depth = kInvalidProtoDepth;
+  Label miss;
+
   switch (check) {
     case RECEIVER_MAP_CHECK:
+      __ IncrementCounter(&Counters::call_const, 1);
+
+      if (optimization.is_simple_api_call() && !object->IsGlobalObject()) {
+        depth = optimization.GetPrototypeDepthOfExpectedType(
+            JSObject::cast(object), holder);
+      }
+
+      if (depth != kInvalidProtoDepth) {
+        __ IncrementCounter(&Counters::call_const_fast_api, 1);
+        ReserveSpaceForFastApiCall(masm(), rax);
+      }
+
       // Check that the maps haven't changed.
       CheckPrototypes(JSObject::cast(object), rdx, holder,
-                      rbx, rax, name, &miss);
+                      rbx, rax, name, depth, &miss);
 
       // Patch the receiver on the stack with the global proxy if
       // necessary.
       if (object->IsGlobalObject()) {
+        ASSERT(depth == kInvalidProtoDepth);
         __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
         __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
       }
@@ -759,26 +985,24 @@
       break;
     }
 
-    case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
-      CheckPrototypes(JSObject::cast(object), rdx, holder,
-                      rbx, rax, name, &miss);
-      // Make sure object->HasFastElements().
-      // Get the elements array of the object.
-      __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
-      // Check that the object is in fast mode (not dictionary).
-      __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
-             Factory::fixed_array_map());
-      __ j(not_equal, &miss);
-      break;
-
     default:
       UNREACHABLE();
   }
 
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+  if (depth != kInvalidProtoDepth) {
+    GenerateFastApiCall(masm(), optimization, argc);
+  } else {
+    __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+  }
 
   // Handle call cache miss.
   __ bind(&miss);
+  if (depth != kInvalidProtoDepth) {
+    FreeSpaceForFastApiCall(masm(), rax);
+  }
+
+  // Handle call cache miss.
+  __ bind(&miss_in_smi_check);
   Handle<Code> ic = ComputeCallMiss(arguments().immediate());
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
@@ -843,6 +1067,257 @@
 }
 
 
+Object* CallStubCompiler::CompileArrayPushCall(Object* object,
+                                               JSObject* holder,
+                                               JSFunction* function,
+                                               String* name,
+                                               CheckType check) {
+  // ----------- S t a t e -------------
+  //  -- rcx                 : name
+  //  -- rsp[0]              : return address
+  //  -- rsp[(argc - n) * 8] : arg[n] (zero-based)
+  //  -- ...
+  //  -- rsp[(argc + 1) * 8] : receiver
+  // -----------------------------------
+  ASSERT(check == RECEIVER_MAP_CHECK);
+
+  // If object is not an array, bail out to regular call.
+  if (!object->IsJSArray()) {
+    return Heap::undefined_value();
+  }
+
+  Label miss;
+
+  // Get the receiver from the stack.
+  const int argc = arguments().immediate();
+  __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(rdx, &miss);
+
+  CheckPrototypes(JSObject::cast(object),
+                  rdx,
+                  holder,
+                  rbx,
+                  rax,
+                  name,
+                  &miss);
+
+  if (argc == 0) {
+    // Noop, return the length.
+    __ movq(rax, FieldOperand(rdx, JSArray::kLengthOffset));
+    __ ret((argc + 1) * kPointerSize);
+  } else {
+    // Get the elements array of the object.
+    __ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
+
+    // Check that the elements are in fast mode (not dictionary).
+    __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+           Factory::fixed_array_map());
+    __ j(not_equal, &miss);
+
+    if (argc == 1) {  // Otherwise fall through to call builtin.
+      Label call_builtin, exit, with_rset_update, attempt_to_grow_elements;
+
+      // Get the array's length into rax and calculate new length.
+      __ movq(rax, FieldOperand(rdx, JSArray::kLengthOffset));
+      STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
+      __ SmiAddConstant(rax, rax, Smi::FromInt(argc));
+
+      // Get the element's length into rcx.
+      __ movl(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
+      __ Integer32ToSmi(rcx, rcx);
+
+      // Check if we could survive without allocation.
+      __ SmiCompare(rax, rcx);
+      __ j(greater, &attempt_to_grow_elements);
+
+      // Save new length.
+      __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
+
+      // Push the element.
+      __ movq(rcx, Operand(rsp, argc * kPointerSize));
+      SmiIndex index =
+          masm()->SmiToIndex(kScratchRegister, rax, times_pointer_size);
+      __ lea(rdx, FieldOperand(rbx,
+                               index.reg, index.scale,
+                               FixedArray::kHeaderSize - argc * kPointerSize));
+      __ movq(Operand(rdx, 0), rcx);
+
+      // Check if value is a smi.
+      __ JumpIfNotSmi(rcx, &with_rset_update);
+
+      __ bind(&exit);
+      __ ret((argc + 1) * kPointerSize);
+
+      __ bind(&with_rset_update);
+
+      __ InNewSpace(rbx, rcx, equal, &exit);
+
+      RecordWriteStub stub(rbx, rdx, rcx);
+      __ CallStub(&stub);
+      __ ret((argc + 1) * kPointerSize);
+
+      __ bind(&attempt_to_grow_elements);
+      ExternalReference new_space_allocation_top =
+          ExternalReference::new_space_allocation_top_address();
+      ExternalReference new_space_allocation_limit =
+          ExternalReference::new_space_allocation_limit_address();
+
+      const int kAllocationDelta = 4;
+      // Load top.
+      __ movq(rcx, new_space_allocation_top);
+      __ movq(rcx, Operand(rcx, 0));
+
+      // Check if it's the end of elements.
+      index = masm()->SmiToIndex(kScratchRegister, rax, times_pointer_size);
+      __ lea(rdx, FieldOperand(rbx,
+                               index.reg, index.scale,
+                               FixedArray::kHeaderSize - argc * kPointerSize));
+      __ cmpq(rdx, rcx);
+      __ j(not_equal, &call_builtin);
+      __ addq(rcx, Immediate(kAllocationDelta * kPointerSize));
+      __ movq(kScratchRegister, new_space_allocation_limit);
+      __ cmpq(rcx, Operand(kScratchRegister, 0));
+      __ j(above, &call_builtin);
+
+      // We fit and could grow elements.
+      __ movq(kScratchRegister, new_space_allocation_top);
+      __ movq(Operand(kScratchRegister, 0), rcx);
+      __ movq(rcx, Operand(rsp, argc * kPointerSize));
+
+      // Push the argument...
+      __ movq(Operand(rdx, 0), rcx);
+      // ... and fill the rest with holes.
+      __ Move(kScratchRegister, Factory::the_hole_value());
+      for (int i = 1; i < kAllocationDelta; i++) {
+        __ movq(Operand(rdx, i * kPointerSize), kScratchRegister);
+      }
+
+      // Restore receiver to rdx as finish sequence assumes it's here.
+      __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+
+      // Increment element's and array's sizes.
+      __ addq(FieldOperand(rbx, FixedArray::kLengthOffset),
+              Immediate(kAllocationDelta));
+      __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
+
+      // Elements are in new space, so no remembered set updates are necessary.
+      __ ret((argc + 1) * kPointerSize);
+
+      __ bind(&call_builtin);
+    }
+
+    __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush),
+                                 argc + 1,
+                                 1);
+  }
+
+  __ bind(&miss);
+
+  Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  String* function_name = NULL;
+  if (function->shared()->name()->IsString()) {
+    function_name = String::cast(function->shared()->name());
+  }
+  return GetCode(CONSTANT_FUNCTION, function_name);
+}
+
+
+Object* CallStubCompiler::CompileArrayPopCall(Object* object,
+                                              JSObject* holder,
+                                              JSFunction* function,
+                                              String* name,
+                                              CheckType check) {
+  // ----------- S t a t e -------------
+  //  -- ecx                 : name
+  //  -- esp[0]              : return address
+  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- esp[(argc + 1) * 4] : receiver
+  // -----------------------------------
+  ASSERT(check == RECEIVER_MAP_CHECK);
+
+  // If object is not an array, bail out to regular call.
+  if (!object->IsJSArray()) {
+    return Heap::undefined_value();
+  }
+
+  Label miss, return_undefined, call_builtin;
+
+  // Get the receiver from the stack.
+  const int argc = arguments().immediate();
+  __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(rdx, &miss);
+
+  CheckPrototypes(JSObject::cast(object), rdx,
+                  holder, rbx,
+                  rax, name, &miss);
+
+  // Get the elements array of the object.
+  __ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
+
+  // Check that the elements are in fast mode (not dictionary).
+  __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset), Factory::fixed_array_map());
+  __ j(not_equal, &miss);
+
+  // Get the array's length into rcx and calculate new length.
+  __ movq(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
+  __ SmiSubConstant(rcx, rcx, Smi::FromInt(1));
+  __ SmiTest(rcx);
+  __ j(negative, &return_undefined);
+
+  // Get the last element.
+  __ Move(r9, Factory::the_hole_value());
+  SmiIndex index =
+      masm()->SmiToIndex(r8, rcx, times_pointer_size);
+  __ movq(rax, FieldOperand(rbx,
+                            index.reg, index.scale,
+                            FixedArray::kHeaderSize));
+  // Check if element is already the hole.
+  __ cmpq(rax, r9);
+  __ j(equal, &call_builtin);
+
+  // Set the array's length.
+  __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
+
+  // Fill with the hole and return original value..
+  __ movq(FieldOperand(rbx,
+                       index.reg, index.scale,
+                       FixedArray::kHeaderSize),
+          r9);
+  __ ret((argc + 1) * kPointerSize);
+
+  __ bind(&return_undefined);
+
+  __ Move(rax, Factory::undefined_value());
+  __ ret((argc + 1) * kPointerSize);
+
+  __ bind(&call_builtin);
+  __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop),
+                               argc + 1,
+                               1);
+  __ bind(&miss);
+
+  Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  String* function_name = NULL;
+  if (function->shared()->name()->IsString()) {
+    function_name = String::cast(function->shared()->name());
+  }
+  return GetCode(CONSTANT_FUNCTION, function_name);
+}
+
+
+
+
 Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
                                                  JSObject* holder,
                                                  String* name) {
@@ -866,18 +1341,16 @@
   // Get the receiver from the stack.
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
-  CallInterceptorCompiler compiler(arguments(), rcx);
-  CompileLoadInterceptor(&compiler,
-                         this,
-                         masm(),
-                         object,
-                         holder,
-                         name,
-                         &lookup,
-                         rdx,
-                         rbx,
-                         rdi,
-                         &miss);
+  CallInterceptorCompiler compiler(this, arguments(), rcx);
+  compiler.Compile(masm(),
+                   object,
+                   holder,
+                   name,
+                   &lookup,
+                   rdx,
+                   rbx,
+                   rdi,
+                   &miss);
 
   // Restore receiver.
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@@ -1038,6 +1511,51 @@
 }
 
 
+Object* LoadStubCompiler::CompileLoadNonexistent(String* name,
+                                                 JSObject* object,
+                                                 JSObject* last) {
+  // ----------- S t a t e -------------
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Load receiver.
+  __ movq(rax, Operand(rsp, kPointerSize));
+
+  // Chech that receiver is not a smi.
+  __ JumpIfSmi(rax, &miss);
+
+  // Check the maps of the full prototype chain. Also check that
+  // global property cells up to (but not including) the last object
+  // in the prototype chain are empty.
+  CheckPrototypes(object, rax, last, rbx, rdx, name, &miss);
+
+  // If the last object in the prototype chain is a global object,
+  // check that the global property cell is empty.
+  if (last->IsGlobalObject()) {
+    Object* cell = GenerateCheckPropertyCell(masm(),
+                                             GlobalObject::cast(last),
+                                             name,
+                                             rdx,
+                                             &miss);
+    if (cell->IsFailure()) return cell;
+  }
+
+  // Return undefined if maps of the full prototype chain are still the
+  // same and no global property with this name contains a value.
+  __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+  __ ret(0);
+
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(NONEXISTENT, Heap::empty_string());
+}
+
+
 Object* LoadStubCompiler::CompileLoadField(JSObject* object,
                                            JSObject* holder,
                                            int index,
@@ -1360,7 +1878,7 @@
   // Do tail-call to the runtime system.
   ExternalReference store_callback_property =
       ExternalReference(IC_Utility(IC::kStoreCallbackProperty));
-  __ TailCallRuntime(store_callback_property, 4, 1);
+  __ TailCallExternalReference(store_callback_property, 4, 1);
 
   // Handle store cache miss.
   __ bind(&miss);
@@ -1438,7 +1956,7 @@
   // Do tail-call to the runtime system.
   ExternalReference store_ic_property =
       ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
-  __ TailCallRuntime(store_ic_property, 3, 1);
+  __ TailCallExternalReference(store_ic_property, 3, 1);
 
   // Handle store cache miss.
   __ bind(&miss);
@@ -1637,7 +2155,7 @@
   // Do tail-call to the runtime system.
   ExternalReference load_callback_property =
       ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
-  __ TailCallRuntime(load_callback_property, 5, 1);
+  __ TailCallExternalReference(load_callback_property, 5, 1);
 
   return true;
 }
@@ -1651,29 +2169,30 @@
                                        String* name,
                                        int save_at_depth,
                                        Label* miss) {
-  // TODO(602): support object saving.
-  ASSERT(save_at_depth == kInvalidProtoDepth);
-
   // Check that the maps haven't changed.
   Register result =
-      __ CheckMaps(object, object_reg, holder, holder_reg, scratch, miss);
+      masm()->CheckMaps(object,
+                        object_reg,
+                        holder,
+                        holder_reg,
+                        scratch,
+                        save_at_depth,
+                        miss);
 
   // If we've skipped any global objects, it's not enough to verify
-  // that their maps haven't changed.
+  // that their maps haven't changed.  We also need to check that the
+  // property cell for the property is still empty.
   while (object != holder) {
     if (object->IsGlobalObject()) {
-      GlobalObject* global = GlobalObject::cast(object);
-      Object* probe = global->EnsurePropertyCell(name);
-      if (probe->IsFailure()) {
-        set_failure(Failure::cast(probe));
+      Object* cell = GenerateCheckPropertyCell(masm(),
+                                               GlobalObject::cast(object),
+                                               name,
+                                               scratch,
+                                               miss);
+      if (cell->IsFailure()) {
+        set_failure(Failure::cast(cell));
         return result;
       }
-      JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
-      ASSERT(cell->value()->IsTheHole());
-      __ Move(scratch, Handle<Object>(cell));
-      __ Cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
-             Factory::the_hole_value());
-      __ j(not_equal, miss);
     }
     object = JSObject::cast(object->GetPrototype());
   }
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
index a0e883c..1e4374b 100644
--- a/src/x64/virtual-frame-x64.cc
+++ b/src/x64/virtual-frame-x64.cc
@@ -30,29 +30,13 @@
 #include "codegen-inl.h"
 #include "register-allocator-inl.h"
 #include "scopes.h"
+#include "virtual-frame-inl.h"
 
 namespace v8 {
 namespace internal {
 
 #define __ ACCESS_MASM(masm())
 
-// -------------------------------------------------------------------------
-// VirtualFrame implementation.
-
-// On entry to a function, the virtual frame already contains the receiver,
-// the parameters, and a return address.  All frame elements are in memory.
-VirtualFrame::VirtualFrame()
-    : elements_(parameter_count() + local_count() + kPreallocatedElements),
-      stack_pointer_(parameter_count() + 1) {  // 0-based index of TOS.
-  for (int i = 0; i <= stack_pointer_; i++) {
-    elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown));
-  }
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    register_locations_[i] = kIllegalIndex;
-  }
-}
-
-
 void VirtualFrame::Enter() {
   // Registers live on entry to a JS frame:
   //   rsp: stack pointer, points to return address from this function.
@@ -193,7 +177,7 @@
 }
 
 
-void VirtualFrame::EmitPush(Register reg, NumberInfo::Type info) {
+void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
   ASSERT(stack_pointer_ == element_count() - 1);
   elements_.Add(FrameElement::MemoryElement(info));
   stack_pointer_++;
@@ -201,7 +185,7 @@
 }
 
 
-void VirtualFrame::EmitPush(const Operand& operand, NumberInfo::Type info) {
+void VirtualFrame::EmitPush(const Operand& operand, TypeInfo info) {
   ASSERT(stack_pointer_ == element_count() - 1);
   elements_.Add(FrameElement::MemoryElement(info));
   stack_pointer_++;
@@ -209,7 +193,7 @@
 }
 
 
-void VirtualFrame::EmitPush(Immediate immediate, NumberInfo::Type info) {
+void VirtualFrame::EmitPush(Immediate immediate, TypeInfo info) {
   ASSERT(stack_pointer_ == element_count() - 1);
   elements_.Add(FrameElement::MemoryElement(info));
   stack_pointer_++;
@@ -219,7 +203,7 @@
 
 void VirtualFrame::EmitPush(Smi* smi_value) {
   ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement(NumberInfo::kSmi));
+  elements_.Add(FrameElement::MemoryElement(TypeInfo::Smi()));
   stack_pointer_++;
   __ Push(smi_value);
 }
@@ -227,19 +211,14 @@
 
 void VirtualFrame::EmitPush(Handle<Object> value) {
   ASSERT(stack_pointer_ == element_count() - 1);
-  NumberInfo::Type info = NumberInfo::kUnknown;
-  if (value->IsSmi()) {
-    info = NumberInfo::kSmi;
-  } else if (value->IsHeapNumber()) {
-    info = NumberInfo::kHeapNumber;
-  }
+  TypeInfo info = TypeInfo::TypeFromValue(value);
   elements_.Add(FrameElement::MemoryElement(info));
   stack_pointer_++;
   __ Push(value);
 }
 
 
-void VirtualFrame::EmitPush(Heap::RootListIndex index, NumberInfo::Type info) {
+void VirtualFrame::EmitPush(Heap::RootListIndex index, TypeInfo info) {
   ASSERT(stack_pointer_ == element_count() - 1);
   elements_.Add(FrameElement::MemoryElement(info));
   stack_pointer_++;
@@ -247,6 +226,31 @@
 }
 
 
+void VirtualFrame::Push(Expression* expr) {
+  ASSERT(expr->IsTrivial());
+
+  Literal* lit = expr->AsLiteral();
+  if (lit != NULL) {
+    Push(lit->handle());
+    return;
+  }
+
+  VariableProxy* proxy = expr->AsVariableProxy();
+  if (proxy != NULL) {
+    Slot* slot = proxy->var()->slot();
+    if (slot->type() == Slot::LOCAL) {
+      PushLocalAt(slot->index());
+      return;
+    }
+    if (slot->type() == Slot::PARAMETER) {
+      PushParameterAt(slot->index());
+      return;
+    }
+  }
+  UNREACHABLE();
+}
+
+
 void VirtualFrame::Drop(int count) {
   ASSERT(count >= 0);
   ASSERT(height() >= count);
@@ -313,12 +317,12 @@
     elements_[new_backing_index] =
         FrameElement::RegisterElement(backing_reg,
                                       FrameElement::SYNCED,
-                                      original.number_info());
+                                      original.type_info());
   } else {
     elements_[new_backing_index] =
         FrameElement::RegisterElement(backing_reg,
                                       FrameElement::NOT_SYNCED,
-                                      original.number_info());
+                                      original.type_info());
   }
   // Update the other copies.
   for (int i = new_backing_index + 1; i < element_count(); i++) {
@@ -350,7 +354,7 @@
       FrameElement new_element =
           FrameElement::RegisterElement(fresh.reg(),
                                         FrameElement::NOT_SYNCED,
-                                        original.number_info());
+                                        original.type_info());
       Use(fresh.reg(), element_count());
       elements_.Add(new_element);
       __ movq(fresh.reg(), Operand(rbp, fp_relative(index)));
@@ -496,7 +500,7 @@
     if (element.is_constant() || element.is_copy()) {
       if (element.is_synced()) {
         // Just spill.
-        elements_[i] = FrameElement::MemoryElement(NumberInfo::kUnknown);
+        elements_[i] = FrameElement::MemoryElement(TypeInfo::Unknown());
       } else {
         // Allocate to a register.
         FrameElement backing_element;  // Invalid if not a copy.
@@ -508,7 +512,7 @@
         elements_[i] =
             FrameElement::RegisterElement(fresh.reg(),
                                           FrameElement::NOT_SYNCED,
-                                          NumberInfo::kUnknown);
+                                          TypeInfo::Unknown());
         Use(fresh.reg(), i);
 
         // Emit a move.
@@ -537,7 +541,7 @@
       // The copy flag is not relied on before the end of this loop,
       // including when registers are spilled.
       elements_[i].clear_copied();
-      elements_[i].set_number_info(NumberInfo::kUnknown);
+      elements_[i].set_type_info(TypeInfo::Unknown());
     }
   }
 }
@@ -744,11 +748,11 @@
   ASSERT(element.is_valid());
 
   // Get number type information of the result.
-  NumberInfo::Type info;
+  TypeInfo info;
   if (!element.is_copy()) {
-    info = element.number_info();
+    info = element.type_info();
   } else {
-    info = elements_[element.index()].number_info();
+    info = elements_[element.index()].type_info();
   }
 
   bool pop_needed = (stack_pointer_ == index);
@@ -758,7 +762,7 @@
       Result temp = cgen()->allocator()->Allocate();
       ASSERT(temp.is_valid());
       __ pop(temp.reg());
-      temp.set_number_info(info);
+      temp.set_type_info(info);
       return temp;
     }
 
@@ -788,7 +792,7 @@
     FrameElement new_element =
         FrameElement::RegisterElement(temp.reg(),
                                       FrameElement::SYNCED,
-                                      element.number_info());
+                                      element.type_info());
     // Preserve the copy flag on the element.
     if (element.is_copied()) new_element.set_copied();
     elements_[index] = new_element;
@@ -845,6 +849,25 @@
 }
 
 
+Result VirtualFrame::CallJSFunction(int arg_count) {
+  Result function = Pop();
+
+  // InvokeFunction requires function in rdi.  Move it in there.
+  function.ToRegister(rdi);
+  function.Unuse();
+
+  // +1 for receiver.
+  PrepareForCall(arg_count + 1, arg_count + 1);
+  ASSERT(cgen()->HasValidEntryRegisters());
+  ParameterCount count(arg_count);
+  __ InvokeFunction(rdi, count, CALL_FUNCTION);
+  RestoreContextRegister();
+  Result result = cgen()->allocator()->Allocate(rax);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
 void VirtualFrame::SyncElementBelowStackPointer(int index) {
   // Emit code to write elements below the stack pointer to their
   // (already allocated) stack address.
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
index c9aa799..7cda181 100644
--- a/src/x64/virtual-frame-x64.h
+++ b/src/x64/virtual-frame-x64.h
@@ -28,7 +28,7 @@
 #ifndef V8_X64_VIRTUAL_FRAME_X64_H_
 #define V8_X64_VIRTUAL_FRAME_X64_H_
 
-#include "number-info.h"
+#include "type-info.h"
 #include "register-allocator.h"
 #include "scopes.h"
 
@@ -73,17 +73,17 @@
   static const int kIllegalIndex = -1;
 
   // Construct an initial virtual frame on entry to a JS function.
-  VirtualFrame();
+  inline VirtualFrame();
 
   // Construct a virtual frame as a clone of an existing one.
-  explicit VirtualFrame(VirtualFrame* original);
+  explicit inline VirtualFrame(VirtualFrame* original);
 
   CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
   MacroAssembler* masm() { return cgen()->masm(); }
 
   // Create a duplicate of an existing valid frame element.
   FrameElement CopyElementAt(int index,
-    NumberInfo::Type info = NumberInfo::kUninitialized);
+    TypeInfo info = TypeInfo::Uninitialized());
 
   // The number of elements on the virtual frame.
   int element_count() { return elements_.length(); }
@@ -139,7 +139,7 @@
   void ForgetElements(int count);
 
   // Spill all values from the frame to memory.
-  void SpillAll();
+  inline void SpillAll();
 
   // Spill all occurrences of a specific register from the frame.
   void Spill(Register reg) {
@@ -200,7 +200,7 @@
   // Prepare for returning from the frame by spilling locals.  This
   // avoids generating unnecessary merge code when jumping to the
   // shared return site.  Emits code for spills.
-  void PrepareForReturn();
+  inline void PrepareForReturn();
 
   // Number of local variables after when we use a loop for allocating.
   static const int kLocalVarBound = 7;
@@ -318,6 +318,10 @@
   // arguments are consumed by the call.
   Result CallStub(CodeStub* stub, Result* arg0, Result* arg1);
 
+  // Call JS function from top of the stack with arguments
+  // taken from the stack.
+  Result CallJSFunction(int arg_count);
+
   // Call runtime given the number of arguments expected on (and
   // removed from) the stack.
   Result CallRuntime(Runtime::Function* f, int arg_count);
@@ -383,27 +387,27 @@
   // Push an element on top of the expression stack and emit a
   // corresponding push instruction.
   void EmitPush(Register reg,
-                NumberInfo::Type info = NumberInfo::kUnknown);
+                TypeInfo info = TypeInfo::Unknown());
   void EmitPush(const Operand& operand,
-                NumberInfo::Type info = NumberInfo::kUnknown);
+                TypeInfo info = TypeInfo::Unknown());
   void EmitPush(Heap::RootListIndex index,
-                NumberInfo::Type info = NumberInfo::kUnknown);
+                TypeInfo info = TypeInfo::Unknown());
   void EmitPush(Immediate immediate,
-                NumberInfo::Type info = NumberInfo::kUnknown);
+                TypeInfo info = TypeInfo::Unknown());
   void EmitPush(Smi* value);
   // Uses kScratchRegister, emits appropriate relocation info.
   void EmitPush(Handle<Object> value);
 
   // Push an element on the virtual frame.
-  void Push(Register reg, NumberInfo::Type info = NumberInfo::kUnknown);
-  void Push(Handle<Object> value);
-  void Push(Smi* value) { Push(Handle<Object>(value)); }
+  inline void Push(Register reg, TypeInfo info = TypeInfo::Unknown());
+  inline void Push(Handle<Object> value);
+  inline void Push(Smi* value);
 
   // Pushing a result invalidates it (its contents become owned by the
   // frame).
   void Push(Result* result) {
     if (result->is_register()) {
-      Push(result->reg(), result->number_info());
+      Push(result->reg(), result->type_info());
     } else {
       ASSERT(result->is_constant());
       Push(result->handle());
@@ -411,10 +415,17 @@
     result->Unuse();
   }
 
+  // Pushing an expression expects that the expression is trivial (according
+  // to Expression::IsTrivial).
+  void Push(Expression* expr);
+
   // Nip removes zero or more elements from immediately below the top
   // of the frame, leaving the previous top-of-frame value on top of
   // the frame.  Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
-  void Nip(int num_dropped);
+  inline void Nip(int num_dropped);
+
+  inline void SetTypeForLocalAt(int index, TypeInfo info);
+  inline void SetTypeForParamAt(int index, TypeInfo info);
 
  private:
   static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
@@ -506,7 +517,7 @@
 
   // Push a copy of a frame slot (typically a local or parameter) on top of
   // the frame.
-  void PushFrameSlotAt(int index);
+  inline void PushFrameSlotAt(int index);
 
   // Push a the value of a frame slot (typically a local or parameter) on
   // top of the frame and invalidate the slot.
@@ -557,7 +568,7 @@
   // (via PrepareForCall).
   Result RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
 
-  bool Equals(VirtualFrame* other);
+  inline bool Equals(VirtualFrame* other);
 
   // Classes that need raw access to the elements_ array.
   friend class DeferredCode;
diff --git a/src/zone-inl.h b/src/zone-inl.h
index 121ba19..5893a2f 100644
--- a/src/zone-inl.h
+++ b/src/zone-inl.h
@@ -68,227 +68,12 @@
 }
 
 
-template <typename C>
-bool ZoneSplayTree<C>::Insert(const Key& key, Locator* locator) {
-  if (is_empty()) {
-    // If the tree is empty, insert the new node.
-    root_ = new Node(key, C::kNoValue);
-  } else {
-    // Splay on the key to move the last node on the search path
-    // for the key to the root of the tree.
-    Splay(key);
-    // Ignore repeated insertions with the same key.
-    int cmp = C::Compare(key, root_->key_);
-    if (cmp == 0) {
-      locator->bind(root_);
-      return false;
-    }
-    // Insert the new node.
-    Node* node = new Node(key, C::kNoValue);
-    if (cmp > 0) {
-      node->left_ = root_;
-      node->right_ = root_->right_;
-      root_->right_ = NULL;
-    } else {
-      node->right_ = root_;
-      node->left_ = root_->left_;
-      root_->left_ = NULL;
-    }
-    root_ = node;
-  }
-  locator->bind(root_);
-  return true;
-}
-
-
-template <typename C>
-bool ZoneSplayTree<C>::Find(const Key& key, Locator* locator) {
-  if (is_empty())
-    return false;
-  Splay(key);
-  if (C::Compare(key, root_->key_) == 0) {
-    locator->bind(root_);
-    return true;
-  } else {
-    return false;
-  }
-}
-
-
-template <typename C>
-bool ZoneSplayTree<C>::FindGreatestLessThan(const Key& key,
-                                            Locator* locator) {
-  if (is_empty())
-    return false;
-  // Splay on the key to move the node with the given key or the last
-  // node on the search path to the top of the tree.
-  Splay(key);
-  // Now the result is either the root node or the greatest node in
-  // the left subtree.
-  int cmp = C::Compare(root_->key_, key);
-  if (cmp <= 0) {
-    locator->bind(root_);
-    return true;
-  } else {
-    Node* temp = root_;
-    root_ = root_->left_;
-    bool result = FindGreatest(locator);
-    root_ = temp;
-    return result;
-  }
-}
-
-
-template <typename C>
-bool ZoneSplayTree<C>::FindLeastGreaterThan(const Key& key,
-                                            Locator* locator) {
-  if (is_empty())
-    return false;
-  // Splay on the key to move the node with the given key or the last
-  // node on the search path to the top of the tree.
-  Splay(key);
-  // Now the result is either the root node or the least node in
-  // the right subtree.
-  int cmp = C::Compare(root_->key_, key);
-  if (cmp >= 0) {
-    locator->bind(root_);
-    return true;
-  } else {
-    Node* temp = root_;
-    root_ = root_->right_;
-    bool result = FindLeast(locator);
-    root_ = temp;
-    return result;
-  }
-}
-
-
-template <typename C>
-bool ZoneSplayTree<C>::FindGreatest(Locator* locator) {
-  if (is_empty())
-    return false;
-  Node* current = root_;
-  while (current->right_ != NULL)
-    current = current->right_;
-  locator->bind(current);
-  return true;
-}
-
-
-template <typename C>
-bool ZoneSplayTree<C>::FindLeast(Locator* locator) {
-  if (is_empty())
-    return false;
-  Node* current = root_;
-  while (current->left_ != NULL)
-    current = current->left_;
-  locator->bind(current);
-  return true;
-}
-
-
-template <typename C>
-bool ZoneSplayTree<C>::Remove(const Key& key) {
-  // Bail if the tree is empty
-  if (is_empty())
-    return false;
-  // Splay on the key to move the node with the given key to the top.
-  Splay(key);
-  // Bail if the key is not in the tree
-  if (C::Compare(key, root_->key_) != 0)
-    return false;
-  if (root_->left_ == NULL) {
-    // No left child, so the new tree is just the right child.
-    root_ = root_->right_;
-  } else {
-    // Left child exists.
-    Node* right = root_->right_;
-    // Make the original left child the new root.
-    root_ = root_->left_;
-    // Splay to make sure that the new root has an empty right child.
-    Splay(key);
-    // Insert the original right child as the right child of the new
-    // root.
-    root_->right_ = right;
-  }
-  return true;
-}
-
-
-template <typename C>
-void ZoneSplayTree<C>::Splay(const Key& key) {
-  if (is_empty())
-    return;
-  Node dummy_node(C::kNoKey, C::kNoValue);
-  // Create a dummy node.  The use of the dummy node is a bit
-  // counter-intuitive: The right child of the dummy node will hold
-  // the L tree of the algorithm.  The left child of the dummy node
-  // will hold the R tree of the algorithm.  Using a dummy node, left
-  // and right will always be nodes and we avoid special cases.
-  Node* dummy = &dummy_node;
-  Node* left = dummy;
-  Node* right = dummy;
-  Node* current = root_;
-  while (true) {
-    int cmp = C::Compare(key, current->key_);
-    if (cmp < 0) {
-      if (current->left_ == NULL)
-        break;
-      if (C::Compare(key, current->left_->key_) < 0) {
-        // Rotate right.
-        Node* temp = current->left_;
-        current->left_ = temp->right_;
-        temp->right_ = current;
-        current = temp;
-        if (current->left_ == NULL)
-          break;
-      }
-      // Link right.
-      right->left_ = current;
-      right = current;
-      current = current->left_;
-    } else if (cmp > 0) {
-      if (current->right_ == NULL)
-        break;
-      if (C::Compare(key, current->right_->key_) > 0) {
-        // Rotate left.
-        Node* temp = current->right_;
-        current->right_ = temp->left_;
-        temp->left_ = current;
-        current = temp;
-        if (current->right_ == NULL)
-          break;
-      }
-      // Link left.
-      left->right_ = current;
-      left = current;
-      current = current->right_;
-    } else {
-      break;
-    }
-  }
-  // Assemble.
-  left->right_ = current->left_;
-  right->left_ = current->right_;
-  current->left_ = dummy->right_;
-  current->right_ = dummy->left_;
-  root_ = current;
-}
-
-
-template <typename Config> template <class Callback>
-void ZoneSplayTree<Config>::ForEach(Callback* callback) {
-  // Pre-allocate some space for tiny trees.
-  ZoneList<Node*> nodes_to_visit(10);
-  nodes_to_visit.Add(root_);
-  int pos = 0;
-  while (pos < nodes_to_visit.length()) {
-    Node* node = nodes_to_visit[pos++];
-    if (node == NULL) continue;
-    callback->Call(node->key(), node->value());
-    nodes_to_visit.Add(node->left());
-    nodes_to_visit.Add(node->right());
-  }
+template <typename Config>
+ZoneSplayTree<Config>::~ZoneSplayTree() {
+  // Reset the root to avoid unneeded iteration over all tree nodes
+  // in the destructor.  For a zone-allocated tree, nodes will be
+  // freed by the Zone.
+  SplayTree<Config, ZoneListAllocationPolicy>::ResetRoot();
 }
 
 
diff --git a/src/zone.cc b/src/zone.cc
index 33fe557..01df450 100644
--- a/src/zone.cc
+++ b/src/zone.cc
@@ -28,6 +28,7 @@
 #include "v8.h"
 
 #include "zone-inl.h"
+#include "splay-tree-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/zone.h b/src/zone.h
index 0d006dd..3397356 100644
--- a/src/zone.h
+++ b/src/zone.h
@@ -205,98 +205,14 @@
 
 
 // A zone splay tree.  The config type parameter encapsulates the
-// different configurations of a concrete splay tree:
-//
-//   typedef Key: the key type
-//   typedef Value: the value type
-//   static const kNoKey: the dummy key used when no key is set
-//   static const kNoValue: the dummy value used to initialize nodes
-//   int (Compare)(Key& a, Key& b) -> {-1, 0, 1}: comparison function
-//
+// different configurations of a concrete splay tree (see splay-tree.h).
+// The tree itself and all its elements are allocated in the Zone.
 template <typename Config>
-class ZoneSplayTree : public ZoneObject {
+class ZoneSplayTree: public SplayTree<Config, ZoneListAllocationPolicy> {
  public:
-  typedef typename Config::Key Key;
-  typedef typename Config::Value Value;
-
-  class Locator;
-
-  ZoneSplayTree() : root_(NULL) { }
-
-  // Inserts the given key in this tree with the given value.  Returns
-  // true if a node was inserted, otherwise false.  If found the locator
-  // is enabled and provides access to the mapping for the key.
-  bool Insert(const Key& key, Locator* locator);
-
-  // Looks up the key in this tree and returns true if it was found,
-  // otherwise false.  If the node is found the locator is enabled and
-  // provides access to the mapping for the key.
-  bool Find(const Key& key, Locator* locator);
-
-  // Finds the mapping with the greatest key less than or equal to the
-  // given key.
-  bool FindGreatestLessThan(const Key& key, Locator* locator);
-
-  // Find the mapping with the greatest key in this tree.
-  bool FindGreatest(Locator* locator);
-
-  // Finds the mapping with the least key greater than or equal to the
-  // given key.
-  bool FindLeastGreaterThan(const Key& key, Locator* locator);
-
-  // Find the mapping with the least key in this tree.
-  bool FindLeast(Locator* locator);
-
-  // Remove the node with the given key from the tree.
-  bool Remove(const Key& key);
-
-  bool is_empty() { return root_ == NULL; }
-
-  // Perform the splay operation for the given key. Moves the node with
-  // the given key to the top of the tree.  If no node has the given
-  // key, the last node on the search path is moved to the top of the
-  // tree.
-  void Splay(const Key& key);
-
-  class Node : public ZoneObject {
-   public:
-    Node(const Key& key, const Value& value)
-        : key_(key),
-          value_(value),
-          left_(NULL),
-          right_(NULL) { }
-    Key key() { return key_; }
-    Value value() { return value_; }
-    Node* left() { return left_; }
-    Node* right() { return right_; }
-   private:
-    friend class ZoneSplayTree;
-    friend class Locator;
-    Key key_;
-    Value value_;
-    Node* left_;
-    Node* right_;
-  };
-
-  // A locator provides access to a node in the tree without actually
-  // exposing the node.
-  class Locator {
-   public:
-    explicit Locator(Node* node) : node_(node) { }
-    Locator() : node_(NULL) { }
-    const Key& key() { return node_->key_; }
-    Value& value() { return node_->value_; }
-    void set_value(const Value& value) { node_->value_ = value; }
-    inline void bind(Node* node) { node_ = node; }
-   private:
-    Node* node_;
-  };
-
-  template <class Callback>
-  void ForEach(Callback* callback);
-
- private:
-  Node* root_;
+  ZoneSplayTree()
+      : SplayTree<Config, ZoneListAllocationPolicy>() {}
+  ~ZoneSplayTree();
 };