Version 2.1.8

Added fine-grained garbage collection callbacks to the API.

Performance improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@4238 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 0786ed9..b16dd2f 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,10 @@
+2010-03-24: Version 2.1.8
+
+        Added fine-grained garbage collection callbacks to the API.
+
+        Performance improvements on all platforms.
+
+
 2010-03-22: Version 2.1.7
 
         Fixed issue 650.
diff --git a/include/v8.h b/include/v8.h
index bed86ca..206b0b0 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -2158,12 +2158,26 @@
 // --- G a r b a g e C o l l e c t i o n  C a l l b a c k s
 
 /**
- * Applications can register a callback function which is called
- * before and after a major garbage collection.  Allocations are not
- * allowed in the callback function, you therefore cannot manipulate
+ * Applications can register callback functions which will be called
+ * before and after a garbage collection.  Allocations are not
+ * allowed in the callback functions, you therefore cannot manipulate
  * objects (set or delete properties for example) since it is possible
  * such operations will result in the allocation of objects.
  */
+enum GCType {
+  kGCTypeScavenge = 1 << 0,
+  kGCTypeMarkSweepCompact = 1 << 1,
+  kGCTypeAll = kGCTypeScavenge | kGCTypeMarkSweepCompact
+};
+
+enum GCCallbackFlags {
+  kNoGCCallbackFlags = 0,
+  kGCCallbackFlagCompacted = 1 << 0
+};
+
+typedef void (*GCPrologueCallback)(GCType type, GCCallbackFlags flags);
+typedef void (*GCEpilogueCallback)(GCType type, GCCallbackFlags flags);
+
 typedef void (*GCCallback)();
 
 
@@ -2299,7 +2313,27 @@
 
   /**
    * Enables the host application to receive a notification before a
-   * major garbage colletion.  Allocations are not allowed in the
+   * garbage collection.  Allocations are not allowed in the
+   * callback function, you therefore cannot manipulate objects (set
+   * or delete properties for example) since it is possible such
+   * operations will result in the allocation of objects. It is possible
+   * to specify the GCType filter for your callback. But it is not possible to
+   * register the same callback function two times with different
+   * GCType filters.
+   */
+  static void AddGCPrologueCallback(
+      GCPrologueCallback callback, GCType gc_type_filter = kGCTypeAll);
+
+  /**
+   * This function removes callback which was installed by
+   * AddGCPrologueCallback function.
+   */
+  static void RemoveGCPrologueCallback(GCPrologueCallback callback);
+
+  /**
+   * The function is deprecated. Please use AddGCPrologueCallback instead.
+   * Enables the host application to receive a notification before a
+   * garbage collection.  Allocations are not allowed in the
    * callback function, you therefore cannot manipulate objects (set
    * or delete properties for example) since it is possible such
    * operations will result in the allocation of objects.
@@ -2308,6 +2342,26 @@
 
   /**
    * Enables the host application to receive a notification after a
+   * garbage collection.  Allocations are not allowed in the
+   * callback function, you therefore cannot manipulate objects (set
+   * or delete properties for example) since it is possible such
+   * operations will result in the allocation of objects. It is possible
+   * to specify the GCType filter for your callback. But it is not possible to
+   * register the same callback function two times with different
+   * GCType filters.
+   */
+  static void AddGCEpilogueCallback(
+      GCEpilogueCallback callback, GCType gc_type_filter = kGCTypeAll);
+
+  /**
+   * This function removes callback which was installed by
+   * AddGCEpilogueCallback function.
+   */
+  static void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
+
+  /**
+   * The function is deprecated. Please use AddGCEpilogueCallback instead.
+   * Enables the host application to receive a notification after a
    * major garbage collection.  Allocations are not allowed in the
    * callback function, you therefore cannot manipulate objects (set
    * or delete properties for example) since it is possible such
@@ -2683,7 +2737,7 @@
 
   /** Creates a new context. */
   static Persistent<Context> New(
-      ExtensionConfiguration* extensions = 0,
+      ExtensionConfiguration* extensions = NULL,
       Handle<ObjectTemplate> global_template = Handle<ObjectTemplate>(),
       Handle<Value> global_object = Handle<Value>());
 
diff --git a/src/SConscript b/src/SConscript
index bf42fd4..51c9ba8 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -252,12 +252,12 @@
 math.js
 messages.js
 apinatives.js
-debug-delay.js
-liveedit-delay.js
-mirror-delay.js
-date-delay.js
-regexp-delay.js
-json-delay.js
+date.js
+regexp.js
+json.js
+liveedit-debugger.js
+mirror-debugger.js
+debug-debugger.js
 '''.split()
 
 
diff --git a/src/api.cc b/src/api.cc
index b2f0e03..5f9f178 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -1136,7 +1136,7 @@
   if (pre_data_impl != NULL && !pre_data_impl->SanityCheck()) {
     pre_data_impl = NULL;
   }
-  i::Handle<i::JSFunction> boilerplate =
+  i::Handle<i::SharedFunctionInfo> result =
       i::Compiler::Compile(str,
                            name_obj,
                            line_offset,
@@ -1145,9 +1145,9 @@
                            pre_data_impl,
                            Utils::OpenHandle(*script_data),
                            i::NOT_NATIVES_CODE);
-  has_pending_exception = boilerplate.is_null();
+  has_pending_exception = result.is_null();
   EXCEPTION_BAILOUT_CHECK(Local<Script>());
-  return Local<Script>(ToApi<Script>(boilerplate));
+  return Local<Script>(ToApi<Script>(result));
 }
 
 
@@ -1168,10 +1168,12 @@
   Local<Script> generic = New(source, origin, pre_data, script_data);
   if (generic.IsEmpty())
     return generic;
-  i::Handle<i::JSFunction> boilerplate = Utils::OpenHandle(*generic);
+  i::Handle<i::Object> obj = Utils::OpenHandle(*generic);
+  i::Handle<i::SharedFunctionInfo> function =
+      i::Handle<i::SharedFunctionInfo>(i::SharedFunctionInfo::cast(*obj));
   i::Handle<i::JSFunction> result =
-      i::Factory::NewFunctionFromBoilerplate(boilerplate,
-                                             i::Top::global_context());
+      i::Factory::NewFunctionFromSharedFunctionInfo(function,
+                                                    i::Top::global_context());
   return Local<Script>(ToApi<Script>(result));
 }
 
@@ -1191,10 +1193,15 @@
   i::Object* raw_result = NULL;
   {
     HandleScope scope;
-    i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
-    if (fun->IsBoilerplate()) {
-      fun = i::Factory::NewFunctionFromBoilerplate(fun,
-                                                   i::Top::global_context());
+    i::Handle<i::Object> obj = Utils::OpenHandle(this);
+    i::Handle<i::JSFunction> fun;
+    if (obj->IsSharedFunctionInfo()) {
+      i::Handle<i::SharedFunctionInfo>
+          function_info(i::SharedFunctionInfo::cast(*obj));
+      fun = i::Factory::NewFunctionFromSharedFunctionInfo(
+          function_info, i::Top::global_context());
+    } else {
+      fun = i::Handle<i::JSFunction>(i::JSFunction::cast(*obj));
     }
     EXCEPTION_PREAMBLE();
     i::Handle<i::Object> receiver(i::Top::context()->global_proxy());
@@ -1208,14 +1215,28 @@
 }
 
 
+static i::Handle<i::SharedFunctionInfo> OpenScript(Script* script) {
+  i::Handle<i::Object> obj = Utils::OpenHandle(script);
+  i::Handle<i::SharedFunctionInfo> result;
+  if (obj->IsSharedFunctionInfo()) {
+    result =
+        i::Handle<i::SharedFunctionInfo>(i::SharedFunctionInfo::cast(*obj));
+  } else {
+    result =
+        i::Handle<i::SharedFunctionInfo>(i::JSFunction::cast(*obj)->shared());
+  }
+  return result;
+}
+
+
 Local<Value> Script::Id() {
   ON_BAILOUT("v8::Script::Id()", return Local<Value>());
   LOG_API("Script::Id");
   i::Object* raw_id = NULL;
   {
     HandleScope scope;
-    i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
-    i::Handle<i::Script> script(i::Script::cast(fun->shared()->script()));
+    i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this);
+    i::Handle<i::Script> script(i::Script::cast(function_info->script()));
     i::Handle<i::Object> id(script->id());
     raw_id = *id;
   }
@@ -1229,9 +1250,9 @@
   LOG_API("Script::SetData");
   {
     HandleScope scope;
-    i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
+    i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this);
     i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
-    i::Handle<i::Script> script(i::Script::cast(fun->shared()->script()));
+    i::Handle<i::Script> script(i::Script::cast(function_info->script()));
     script->set_data(*raw_data);
   }
 }
@@ -3525,6 +3546,30 @@
 }
 
 
+void V8::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
+  if (IsDeadCheck("v8::V8::AddGCPrologueCallback()")) return;
+  i::Heap::AddGCPrologueCallback(callback, gc_type);
+}
+
+
+void V8::RemoveGCPrologueCallback(GCPrologueCallback callback) {
+  if (IsDeadCheck("v8::V8::RemoveGCPrologueCallback()")) return;
+  i::Heap::RemoveGCPrologueCallback(callback);
+}
+
+
+void V8::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
+  if (IsDeadCheck("v8::V8::AddGCEpilogueCallback()")) return;
+  i::Heap::AddGCEpilogueCallback(callback, gc_type);
+}
+
+
+void V8::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
+  if (IsDeadCheck("v8::V8::RemoveGCEpilogueCallback()")) return;
+  i::Heap::RemoveGCEpilogueCallback(callback);
+}
+
+
 void V8::PauseProfiler() {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   PauseProfilerEx(PROFILER_MODULE_CPU);
diff --git a/src/api.h b/src/api.h
index a28e1f0..2f1f77c 100644
--- a/src/api.h
+++ b/src/api.h
@@ -221,7 +221,7 @@
       OpenHandle(const v8::Array* data);
   static inline v8::internal::Handle<v8::internal::String>
       OpenHandle(const String* data);
-  static inline v8::internal::Handle<v8::internal::JSFunction>
+  static inline v8::internal::Handle<v8::internal::Object>
       OpenHandle(const Script* data);
   static inline v8::internal::Handle<v8::internal::JSFunction>
       OpenHandle(const Function* data);
@@ -296,7 +296,7 @@
 MAKE_OPEN_HANDLE(Object, JSObject)
 MAKE_OPEN_HANDLE(Array, JSArray)
 MAKE_OPEN_HANDLE(String, String)
-MAKE_OPEN_HANDLE(Script, JSFunction)
+MAKE_OPEN_HANDLE(Script, Object)
 MAKE_OPEN_HANDLE(Function, JSFunction)
 MAKE_OPEN_HANDLE(Message, JSObject)
 MAKE_OPEN_HANDLE(Context, Context)
diff --git a/src/apinatives.js b/src/apinatives.js
index 6451e62..ca2bbf5 100644
--- a/src/apinatives.js
+++ b/src/apinatives.js
@@ -31,7 +31,7 @@
 
 
 function CreateDate(time) {
-  var date = new ORIGINAL_DATE();
+  var date = new $Date();
   date.setTime(time);
   return date;
 }
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index d4cd818..23d5e00 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -47,21 +47,41 @@
 unsigned CpuFeatures::enabled_ = 0;
 unsigned CpuFeatures::found_by_runtime_probing_ = 0;
 
+
+#ifdef __arm__
+static uint64_t CpuFeaturesImpliedByCompiler() {
+  uint64_t answer = 0;
+#ifdef CAN_USE_ARMV7_INSTRUCTIONS
+  answer |= 1u << ARMv7;
+#endif  // def CAN_USE_ARMV7_INSTRUCTIONS
+  // If the compiler is allowed to use VFP then we can use VFP too in our code
+  // generation even when generating snapshots.  This won't work for cross
+  // compilation.
+#if defined(__VFP_FP__) && !defined(__SOFTFP__)
+  answer |= 1u << VFP3;
+#endif  // defined(__VFP_FP__) && !defined(__SOFTFP__)
+#ifdef CAN_USE_VFP_INSTRUCTIONS
+  answer |= 1u << VFP3;
+#endif  // def CAN_USE_VFP_INSTRUCTIONS
+  return answer;
+}
+#endif  // def __arm__
+
+
 void CpuFeatures::Probe() {
-  // If the compiler is allowed to use vfp then we can use vfp too in our
-  // code generation.
-#if !defined(__arm__)
+#ifndef __arm__
   // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
   if (FLAG_enable_vfp3) {
-      supported_ |= 1u << VFP3;
+    supported_ |= 1u << VFP3;
   }
   // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
   if (FLAG_enable_armv7) {
-      supported_ |= 1u << ARMv7;
+    supported_ |= 1u << ARMv7;
   }
-#else
+#else  // def __arm__
   if (Serializer::enabled()) {
     supported_ |= OS::CpuFeaturesImpliedByPlatform();
+    supported_ |= CpuFeaturesImpliedByCompiler();
     return;  // No features if we might serialize.
   }
 
@@ -532,7 +552,7 @@
     if (!Serializer::enabled()) {
       Serializer::TooLateToEnableNow();
     }
-#endif
+#endif  // def DEBUG
     return Serializer::enabled();
   } else if (rmode == RelocInfo::NONE) {
     return false;
@@ -1137,14 +1157,16 @@
 
 // Exception-generating instructions and debugging support.
 void Assembler::stop(const char* msg) {
-#if !defined(__arm__)
+#ifndef __arm__
   // The simulator handles these special instructions and stops execution.
   emit(15 << 28 | ((intptr_t) msg));
-#else
-  // Just issue a simple break instruction for now. Alternatively we could use
-  // the swi(0x9f0001) instruction on Linux.
+#else  // def __arm__
+#ifdef CAN_USE_ARMV5_INSTRUCTIONS
   bkpt(0);
-#endif
+#else  // ndef CAN_USE_ARMV5_INSTRUCTIONS
+  swi(0x9f0001);
+#endif  // ndef CAN_USE_ARMV5_INSTRUCTIONS
+#endif  // def __arm__
 }
 
 
@@ -1319,11 +1341,28 @@
   // Vdst(15-12) | 1011(11-8) | offset
   ASSERT(CpuFeatures::IsEnabled(VFP3));
   ASSERT(offset % 4 == 0);
+  ASSERT((offset / 4) < 256);
   emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
        0xB*B8 | ((offset / 4) & 255));
 }
 
 
+void Assembler::vldr(const SwVfpRegister dst,
+                     const Register base,
+                     int offset,
+                     const Condition cond) {
+  // Sdst = MEM(Rbase + offset).
+  // Instruction details available in ARM DDI 0406A, A8-628.
+  // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
+  // Vdst(15-12) | 1010(11-8) | offset
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  ASSERT(offset % 4 == 0);
+  ASSERT((offset / 4) < 256);
+  emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
+       0xA*B8 | ((offset / 4) & 255));
+}
+
+
 void Assembler::vstr(const DwVfpRegister src,
                      const Register base,
                      int offset,
@@ -1334,6 +1373,7 @@
   // Vsrc(15-12) | 1011(11-8) | (offset/4)
   ASSERT(CpuFeatures::IsEnabled(VFP3));
   ASSERT(offset % 4 == 0);
+  ASSERT((offset / 4) < 256);
   emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
        0xB*B8 | ((offset / 4) & 255));
 }
@@ -1397,31 +1437,172 @@
 }
 
 
-void Assembler::vcvt(const DwVfpRegister dst,
-                     const SwVfpRegister src,
-                     const Condition cond) {
-  // Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
-  // Instruction details available in ARM DDI 0406A, A8-576.
-  // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=000(18-16) |
-  // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
-       dst.code()*B12 | 0x5*B9 | B8 | B7 | B6 |
-       (0x1 & src.code())*B5 | (src.code() >> 1));
+// Type of data to read from or write to VFP register.
+// Used as specifier in generic vcvt instruction.
+enum VFPType { S32, U32, F32, F64 };
+
+
+static bool IsSignedVFPType(VFPType type) {
+  switch (type) {
+    case S32:
+      return true;
+    case U32:
+      return false;
+    default:
+      UNREACHABLE();
+      return false;
+  }
 }
 
 
-void Assembler::vcvt(const SwVfpRegister dst,
-                     const DwVfpRegister src,
-                     const Condition cond) {
-  // Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd).
-  // Instruction details available in ARM DDI 0406A, A8-576.
-  // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)|
-  // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=? | 1(6) | M=?(5) | 0(4) | Vm(3-0)
+static bool IsIntegerVFPType(VFPType type) {
+  switch (type) {
+    case S32:
+    case U32:
+      return true;
+    case F32:
+    case F64:
+      return false;
+    default:
+      UNREACHABLE();
+      return false;
+  }
+}
+
+
+static bool IsDoubleVFPType(VFPType type) {
+  switch (type) {
+    case F32:
+      return false;
+    case F64:
+      return true;
+    default:
+      UNREACHABLE();
+      return false;
+  }
+}
+
+
+// Depending on split_last_bit split binary representation of reg_code into Vm:M
+// or M:Vm form (where M is single bit).
+static void SplitRegCode(bool split_last_bit,
+                         int reg_code,
+                         int* vm,
+                         int* m) {
+  if (split_last_bit) {
+    *m  = reg_code & 0x1;
+    *vm = reg_code >> 1;
+  } else {
+    *m  = (reg_code & 0x10) >> 4;
+    *vm = reg_code & 0x0F;
+  }
+}
+
+
+// Encode vcvt.src_type.dst_type instruction.
+static Instr EncodeVCVT(const VFPType dst_type,
+                        const int dst_code,
+                        const VFPType src_type,
+                        const int src_code,
+                        const Condition cond) {
+  if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
+    // Conversion between IEEE floating point and 32-bit integer.
+    // Instruction details available in ARM DDI 0406B, A8.6.295.
+    // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
+    // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+    ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
+
+    int sz, opc2, D, Vd, M, Vm, op;
+
+    if (IsIntegerVFPType(dst_type)) {
+      opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
+      sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
+      op = 1;  // round towards zero
+      SplitRegCode(!IsDoubleVFPType(src_type), src_code, &Vm, &M);
+      SplitRegCode(true, dst_code, &Vd, &D);
+    } else {
+      ASSERT(IsIntegerVFPType(src_type));
+
+      opc2 = 0x0;
+      sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
+      op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
+      SplitRegCode(true, src_code, &Vm, &M);
+      SplitRegCode(!IsDoubleVFPType(dst_type), dst_code, &Vd, &D);
+    }
+
+    return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
+            Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
+  } else {
+    // Conversion between IEEE double and single precision.
+    // Instruction details available in ARM DDI 0406B, A8.6.298.
+    // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
+    // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+    int sz, D, Vd, M, Vm;
+
+    ASSERT(IsDoubleVFPType(dst_type) != IsDoubleVFPType(src_type));
+    sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
+    SplitRegCode(IsDoubleVFPType(src_type), dst_code, &Vd, &D);
+    SplitRegCode(!IsDoubleVFPType(src_type), src_code, &Vm, &M);
+
+    return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
+            Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
+  }
+}
+
+
+void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
+                             const SwVfpRegister src,
+                             const Condition cond) {
   ASSERT(CpuFeatures::IsEnabled(VFP3));
-  emit(cond | 0xE*B24 | B23 |(0x1 & dst.code())*B22 |
-       0x3*B20 | B19 | 0x5*B16 | (dst.code() >> 1)*B12 |
-       0x5*B9 | B8 | B7 | B6 | src.code());
+  emit(EncodeVCVT(F64, dst.code(), S32, src.code(), cond));
+}
+
+
+void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
+                             const SwVfpRegister src,
+                             const Condition cond) {
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  emit(EncodeVCVT(F32, dst.code(), S32, src.code(), cond));
+}
+
+
+void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
+                             const SwVfpRegister src,
+                             const Condition cond) {
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  emit(EncodeVCVT(F64, dst.code(), U32, src.code(), cond));
+}
+
+
+void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
+                             const DwVfpRegister src,
+                             const Condition cond) {
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  emit(EncodeVCVT(S32, dst.code(), F64, src.code(), cond));
+}
+
+
+void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
+                             const DwVfpRegister src,
+                             const Condition cond) {
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  emit(EncodeVCVT(U32, dst.code(), F64, src.code(), cond));
+}
+
+
+void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
+                             const SwVfpRegister src,
+                             const Condition cond) {
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  emit(EncodeVCVT(F64, dst.code(), F32, src.code(), cond));
+}
+
+
+void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
+                             const DwVfpRegister src,
+                             const Condition cond) {
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  emit(EncodeVCVT(F32, dst.code(), F64, src.code(), cond));
 }
 
 
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 539a6b8..98be7b5 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -826,6 +826,12 @@
             const Register base,
             int offset,  // Offset must be a multiple of 4.
             const Condition cond = al);
+
+  void vldr(const SwVfpRegister dst,
+            const Register base,
+            int offset,  // Offset must be a multiple of 4.
+            const Condition cond = al);
+
   void vstr(const DwVfpRegister src,
             const Register base,
             int offset,  // Offset must be a multiple of 4.
@@ -844,12 +850,27 @@
   void vmov(const Register dst,
             const SwVfpRegister src,
             const Condition cond = al);
-  void vcvt(const DwVfpRegister dst,
-            const SwVfpRegister src,
-            const Condition cond = al);
-  void vcvt(const SwVfpRegister dst,
-            const DwVfpRegister src,
-            const Condition cond = al);
+  void vcvt_f64_s32(const DwVfpRegister dst,
+                    const SwVfpRegister src,
+                    const Condition cond = al);
+  void vcvt_f32_s32(const SwVfpRegister dst,
+                    const SwVfpRegister src,
+                    const Condition cond = al);
+  void vcvt_f64_u32(const DwVfpRegister dst,
+                    const SwVfpRegister src,
+                    const Condition cond = al);
+  void vcvt_s32_f64(const SwVfpRegister dst,
+                    const DwVfpRegister src,
+                    const Condition cond = al);
+  void vcvt_u32_f64(const SwVfpRegister dst,
+                    const DwVfpRegister src,
+                    const Condition cond = al);
+  void vcvt_f64_f32(const DwVfpRegister dst,
+                    const SwVfpRegister src,
+                    const Condition cond = al);
+  void vcvt_f32_f64(const SwVfpRegister dst,
+                    const DwVfpRegister src,
+                    const Condition cond = al);
 
   void vadd(const DwVfpRegister dst,
             const DwVfpRegister src1,
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index d3e98a3..5e00677 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -2305,14 +2305,13 @@
 }
 
 
-void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
+void CodeGenerator::InstantiateFunction(
+    Handle<SharedFunctionInfo> function_info) {
   VirtualFrame::SpilledScope spilled_scope;
-  ASSERT(boilerplate->IsBoilerplate());
-
-  __ mov(r0, Operand(boilerplate));
+  __ mov(r0, Operand(function_info));
   // Use the fast case closure allocation code that allocates in new
   // space for nested functions that don't need literals cloning.
-  if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) {
+  if (scope()->is_function_scope() && function_info->num_literals() == 0) {
     FastNewClosureStub stub;
     frame_->EmitPush(r0);
     frame_->CallStub(&stub, 1);
@@ -2334,27 +2333,27 @@
   VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ FunctionLiteral");
 
-  // Build the function boilerplate and instantiate it.
-  Handle<JSFunction> boilerplate =
-      Compiler::BuildBoilerplate(node, script(), this);
+  // Build the function info and instantiate it.
+  Handle<SharedFunctionInfo> function_info =
+      Compiler::BuildFunctionInfo(node, script(), this);
   // Check for stack-overflow exception.
   if (HasStackOverflow()) {
     ASSERT(frame_->height() == original_height);
     return;
   }
-  InstantiateBoilerplate(boilerplate);
+  InstantiateFunction(function_info);
   ASSERT(frame_->height() == original_height + 1);
 }
 
 
-void CodeGenerator::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* node) {
+void CodeGenerator::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* node) {
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
   VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
-  InstantiateBoilerplate(node->boilerplate());
+  Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
+  InstantiateFunction(node->shared_function_info());
   ASSERT(frame_->height() == original_height + 1);
 }
 
@@ -4527,11 +4526,11 @@
 
 
 void FastNewClosureStub::Generate(MacroAssembler* masm) {
-  // Clone the boilerplate in new space. Set the context to the
-  // current context in cp.
+  // Create a new closure from the given function info in new
+  // space. Set the context to the current context in cp.
   Label gc;
 
-  // Pop the boilerplate function from the stack.
+  // Pop the function info from the stack.
   __ pop(r3);
 
   // Attempt to allocate new JSFunction in new space.
@@ -4549,20 +4548,18 @@
   __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
   __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
 
-  // Clone the rest of the boilerplate fields. We don't have to update
-  // the write barrier because the allocated object is in new space.
-  for (int offset = kPointerSize;
-       offset < JSFunction::kSize;
-       offset += kPointerSize) {
-    if (offset == JSFunction::kContextOffset) {
-      __ str(cp, FieldMemOperand(r0, offset));
-    } else {
-      __ ldr(r1, FieldMemOperand(r3, offset));
-      __ str(r1, FieldMemOperand(r0, offset));
-    }
-  }
+  // Initialize the rest of the function. We don't have to update the
+  // write barrier because the allocated object is in new space.
+  __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
+  __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
+  __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+  __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
+  __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
+  __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
+  __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
+  __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
 
-  // Return result. The argument boilerplate has been popped already.
+  // Return result. The argument function info has been popped already.
   __ Ret();
 
   // Create a new closure through the slower runtime call.
@@ -4685,42 +4682,6 @@
 }
 
 
-// Count leading zeros in a 32 bit word.  On ARM5 and later it uses the clz
-// instruction.  On pre-ARM5 hardware this routine gives the wrong answer for 0
-// (31 instead of 32).
-static void CountLeadingZeros(
-    MacroAssembler* masm,
-    Register source,
-    Register scratch,
-    Register zeros) {
-#ifdef CAN_USE_ARMV5_INSTRUCTIONS
-  __ clz(zeros, source);  // This instruction is only supported after ARM5.
-#else
-  __ mov(zeros, Operand(0));
-  __ mov(scratch, source);
-  // Top 16.
-  __ tst(scratch, Operand(0xffff0000));
-  __ add(zeros, zeros, Operand(16), LeaveCC, eq);
-  __ mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
-  // Top 8.
-  __ tst(scratch, Operand(0xff000000));
-  __ add(zeros, zeros, Operand(8), LeaveCC, eq);
-  __ mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
-  // Top 4.
-  __ tst(scratch, Operand(0xf0000000));
-  __ add(zeros, zeros, Operand(4), LeaveCC, eq);
-  __ mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
-  // Top 2.
-  __ tst(scratch, Operand(0xc0000000));
-  __ add(zeros, zeros, Operand(2), LeaveCC, eq);
-  __ mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
-  // Top bit.
-  __ tst(scratch, Operand(0x80000000u));
-  __ add(zeros, zeros, Operand(1), LeaveCC, eq);
-#endif
-}
-
-
 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
 // registers.  The format is 1 sign bit, 11 exponent bits (biased 1023) and
 // 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a
@@ -4784,25 +4745,27 @@
   __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
   // Subtract from 0 if source was negative.
   __ rsb(source_, source_, Operand(0), LeaveCC, ne);
+
+  // We have -1, 0 or 1, which we treat specially. Register source_ contains
+  // absolute value: it is either equal to 1 (special case of -1 and 1),
+  // greater than 1 (not a special case) or less than 1 (special case of 0).
   __ cmp(source_, Operand(1));
   __ b(gt, &not_special);
 
-  // We have -1, 0 or 1, which we treat specially.
-  __ cmp(source_, Operand(0));
   // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
   static const uint32_t exponent_word_for_1 =
       HeapNumber::kExponentBias << HeapNumber::kExponentShift;
-  __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, ne);
+  __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
   // 1, 0 and -1 all have 0 for the second word.
   __ mov(mantissa, Operand(0));
   __ Ret();
 
   __ bind(&not_special);
-  // Count leading zeros.  Uses result2 for a scratch register on pre-ARM5.
+  // Count leading zeros.  Uses mantissa for a scratch register on pre-ARM5.
   // Gets the wrong answer for 0, but we already checked for that case above.
-  CountLeadingZeros(masm, source_, mantissa, zeros_);
+  __ CountLeadingZeros(source_, mantissa, zeros_);
   // Compute exponent and or it into the exponent register.
-  // We use result2 as a scratch register here.
+  // We use mantissa as a scratch register here.
   __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias));
   __ orr(exponent,
          exponent,
@@ -4821,45 +4784,6 @@
 }
 
 
-// This stub can convert a signed int32 to a heap number (double).  It does
-// not work for int32s that are in Smi range!  No GC occurs during this stub
-// so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public CodeStub {
- public:
-  WriteInt32ToHeapNumberStub(Register the_int,
-                             Register the_heap_number,
-                             Register scratch)
-      : the_int_(the_int),
-        the_heap_number_(the_heap_number),
-        scratch_(scratch) { }
-
- private:
-  Register the_int_;
-  Register the_heap_number_;
-  Register scratch_;
-
-  // Minor key encoding in 16 bits.
-  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
-  class OpBits: public BitField<Token::Value, 2, 14> {};
-
-  Major MajorKey() { return WriteInt32ToHeapNumber; }
-  int MinorKey() {
-    // Encode the parameters in a unique 16 bit value.
-    return  the_int_.code() +
-           (the_heap_number_.code() << 4) +
-           (scratch_.code() << 8);
-  }
-
-  void Generate(MacroAssembler* masm);
-
-  const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
-
-#ifdef DEBUG
-  void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
-#endif
-};
-
-
 // See comment for class.
 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
   Label max_negative_int;
@@ -5042,7 +4966,7 @@
     CpuFeatures::Scope scope(VFP3);
     __ mov(r7, Operand(r1, ASR, kSmiTagSize));
     __ vmov(s15, r7);
-    __ vcvt(d7, s15);
+    __ vcvt_f64_s32(d7, s15);
     // Load the double from rhs, tagged HeapNumber r0, to d6.
     __ sub(r7, r0, Operand(kHeapObjectTag));
     __ vldr(d6, r7, HeapNumber::kValueOffset);
@@ -5085,7 +5009,7 @@
     __ vldr(d7, r7, HeapNumber::kValueOffset);
     __ mov(r7, Operand(r0, ASR, kSmiTagSize));
     __ vmov(s13, r7);
-    __ vcvt(d6, s13);
+    __ vcvt_f64_s32(d6, s13);
   } else {
     __ push(lr);
     // Load lhs to a double in r2, r3.
@@ -5494,29 +5418,6 @@
 }
 
 
-// Allocates a heap number or jumps to the label if the young space is full and
-// a scavenge is needed.
-static void AllocateHeapNumber(
-    MacroAssembler* masm,
-    Label* need_gc,       // Jump here if young space is full.
-    Register result,  // The tagged address of the new heap number.
-    Register scratch1,  // A scratch register.
-    Register scratch2) {  // Another scratch register.
-  // Allocate an object in the heap for the heap number and tag it as a heap
-  // object.
-  __ AllocateInNewSpace(HeapNumber::kSize / kPointerSize,
-                        result,
-                        scratch1,
-                        scratch2,
-                        need_gc,
-                        TAG_OBJECT);
-
-  // Get heap number map and store it in the allocated object.
-  __ LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
-  __ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
-}
-
-
 // We fall into this code if the operands were Smis, but the result was
 // not (eg. overflow).  We branch into this code (to the not_smi label) if
 // the operands were not both Smi.  The operands are in r0 and r1.  In order
@@ -5533,7 +5434,7 @@
   // Smi-smi case (overflow).
   // Since both are Smis there is no heap number to overwrite, so allocate.
   // The new heap number is in r5.  r6 and r7 are scratch.
-  AllocateHeapNumber(masm, &slow, r5, r6, r7);
+  __ AllocateHeapNumber(r5, r6, r7, &slow);
 
   // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
   // using registers d7 and d6 for the double values.
@@ -5543,10 +5444,10 @@
     CpuFeatures::Scope scope(VFP3);
     __ mov(r7, Operand(r0, ASR, kSmiTagSize));
     __ vmov(s15, r7);
-    __ vcvt(d7, s15);
+    __ vcvt_f64_s32(d7, s15);
     __ mov(r7, Operand(r1, ASR, kSmiTagSize));
     __ vmov(s13, r7);
-    __ vcvt(d6, s13);
+    __ vcvt_f64_s32(d6, s13);
   } else {
     // Write Smi from r0 to r3 and r2 in double format.  r6 is scratch.
     __ mov(r7, Operand(r0));
@@ -5628,7 +5529,7 @@
   if (mode == NO_OVERWRITE) {
     // In the case where there is no chance of an overwritable float we may as
     // well do the allocation immediately while r0 and r1 are untouched.
-    AllocateHeapNumber(masm, &slow, r5, r6, r7);
+    __ AllocateHeapNumber(r5, r6, r7, &slow);
   }
 
   // Move r0 to a double in r2-r3.
@@ -5653,7 +5554,7 @@
   __ bind(&r0_is_smi);
   if (mode == OVERWRITE_RIGHT) {
     // We can't overwrite a Smi so get address of new heap number into r5.
-    AllocateHeapNumber(masm, &slow, r5, r6, r7);
+    __ AllocateHeapNumber(r5, r6, r7, &slow);
   }
 
   if (use_fp_registers) {
@@ -5661,7 +5562,7 @@
     // Convert smi in r0 to double in d7.
     __ mov(r7, Operand(r0, ASR, kSmiTagSize));
     __ vmov(s15, r7);
-    __ vcvt(d7, s15);
+    __ vcvt_f64_s32(d7, s15);
   } else {
     // Write Smi from r0 to r3 and r2 in double format.
     __ mov(r7, Operand(r0));
@@ -5695,7 +5596,7 @@
   __ bind(&r1_is_smi);
   if (mode == OVERWRITE_LEFT) {
     // We can't overwrite a Smi so get address of new heap number into r5.
-    AllocateHeapNumber(masm, &slow, r5, r6, r7);
+    __ AllocateHeapNumber(r5, r6, r7, &slow);
   }
 
   if (use_fp_registers) {
@@ -5703,7 +5604,7 @@
     // Convert smi in r1 to double in d6.
     __ mov(r7, Operand(r1, ASR, kSmiTagSize));
     __ vmov(s13, r7);
-    __ vcvt(d6, s13);
+    __ vcvt_f64_s32(d6, s13);
   } else {
     // Write Smi from r1 to r1 and r0 in double format.
     __ mov(r7, Operand(r1));
@@ -5830,7 +5731,7 @@
     // conversion using round to zero.
     __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
     __ vmov(d7, scratch2, scratch);
-    __ vcvt(s15, d7);
+    __ vcvt_s32_f64(s15, d7);
     __ vmov(dest, s15);
   } else {
     // Get the top bits of the mantissa.
@@ -5942,7 +5843,7 @@
     }
     case NO_OVERWRITE: {
       // Get a new heap number in r5.  r6 and r7 are scratch.
-      AllocateHeapNumber(masm, &slow, r5, r6, r7);
+      __ AllocateHeapNumber(r5, r6, r7, &slow);
     }
     default: break;
   }
@@ -5962,7 +5863,7 @@
   if (mode_ != NO_OVERWRITE) {
     __ bind(&have_to_allocate);
     // Get a new heap number in r5.  r6 and r7 are scratch.
-    AllocateHeapNumber(masm, &slow, r5, r6, r7);
+    __ AllocateHeapNumber(r5, r6, r7, &slow);
     __ jmp(&got_a_heap_number);
   }
 
@@ -6380,7 +6281,7 @@
       __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign.
       __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
     } else {
-      AllocateHeapNumber(masm, &slow, r1, r2, r3);
+      __ AllocateHeapNumber(r1, r2, r3, &slow);
       __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
       __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
       __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
@@ -6410,7 +6311,7 @@
       // Allocate a fresh heap number, but don't overwrite r0 until
       // we're sure we can do it without going through the slow case
       // that needs the value in r0.
-      AllocateHeapNumber(masm, &slow, r2, r3, r4);
+      __ AllocateHeapNumber(r2, r3, r4, &slow);
       __ mov(r0, Operand(r2));
     }
 
@@ -7117,53 +7018,59 @@
 }
 
 
+// Unfortunately you have to run without snapshots to see most of these
+// names in the profile since most compare stubs end up in the snapshot.
 const char* CompareStub::GetName() {
+  if (name_ != NULL) return name_;
+  const int kMaxNameLength = 100;
+  name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+  if (name_ == NULL) return "OOM";
+
+  const char* cc_name;
   switch (cc_) {
-    case lt: return "CompareStub_LT";
-    case gt: return "CompareStub_GT";
-    case le: return "CompareStub_LE";
-    case ge: return "CompareStub_GE";
-    case ne: {
-      if (strict_) {
-        if (never_nan_nan_) {
-          return "CompareStub_NE_STRICT_NO_NAN";
-        } else {
-          return "CompareStub_NE_STRICT";
-        }
-      } else {
-        if (never_nan_nan_) {
-          return "CompareStub_NE_NO_NAN";
-        } else {
-          return "CompareStub_NE";
-        }
-      }
-    }
-    case eq: {
-      if (strict_) {
-        if (never_nan_nan_) {
-          return "CompareStub_EQ_STRICT_NO_NAN";
-        } else {
-          return "CompareStub_EQ_STRICT";
-        }
-      } else {
-        if (never_nan_nan_) {
-          return "CompareStub_EQ_NO_NAN";
-        } else {
-          return "CompareStub_EQ";
-        }
-      }
-    }
-    default: return "CompareStub";
+    case lt: cc_name = "LT"; break;
+    case gt: cc_name = "GT"; break;
+    case le: cc_name = "LE"; break;
+    case ge: cc_name = "GE"; break;
+    case eq: cc_name = "EQ"; break;
+    case ne: cc_name = "NE"; break;
+    default: cc_name = "UnknownCondition"; break;
   }
+
+  const char* strict_name = "";
+  if (strict_ && (cc_ == eq || cc_ == ne)) {
+    strict_name = "_STRICT";
+  }
+
+  const char* never_nan_nan_name = "";
+  if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
+    never_nan_nan_name = "_NO_NAN";
+  }
+
+  const char* include_number_compare_name = "";
+  if (!include_number_compare_) {
+    include_number_compare_name = "_NO_NUMBER";
+  }
+
+  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+               "CompareStub_%s%s%s%s",
+               cc_name,
+               strict_name,
+               never_nan_nan_name,
+               include_number_compare_name);
+  return name_;
 }
 
 
 int CompareStub::MinorKey() {
-  // Encode the three parameters in a unique 16 bit value.
-  ASSERT((static_cast<unsigned>(cc_) >> 26) < (1 << 16));
-  int nnn_value = (never_nan_nan_ ? 2 : 0);
-  if (cc_ != eq) nnn_value = 0;  // Avoid duplicate stubs.
-  return (static_cast<unsigned>(cc_) >> 26) | nnn_value | (strict_ ? 1 : 0);
+  // Encode the three parameters in a unique 16 bit value. To avoid duplicate
+  // stubs the never NaN NaN condition is only taken into account if the
+  // condition is equals.
+  ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 13));
+  return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
+         | StrictField::encode(strict_)
+         | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
+         | IncludeNumberCompareField::encode(include_number_compare_);
 }
 
 
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 68f293a..4bea341 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -348,8 +348,8 @@
   // name/value pairs.
   void DeclareGlobals(Handle<FixedArray> pairs);
 
-  // Instantiate the function boilerplate.
-  void InstantiateBoilerplate(Handle<JSFunction> boilerplate);
+  // Instantiate the function based on the shared function info.
+  void InstantiateFunction(Handle<SharedFunctionInfo> function_info);
 
   // Support for type checks.
   void GenerateIsSmi(ZoneList<Expression*>* args);
@@ -660,6 +660,46 @@
 };
 
 
+// This stub can convert a signed int32 to a heap number (double).  It does
+// not work for int32s that are in Smi range!  No GC occurs during this stub
+// so you don't have to set up the frame.
+class WriteInt32ToHeapNumberStub : public CodeStub {
+ public:
+  WriteInt32ToHeapNumberStub(Register the_int,
+                             Register the_heap_number,
+                             Register scratch)
+      : the_int_(the_int),
+        the_heap_number_(the_heap_number),
+        scratch_(scratch) { }
+
+ private:
+  Register the_int_;
+  Register the_heap_number_;
+  Register scratch_;
+
+  // Minor key encoding in 16 bits.
+  class IntRegisterBits: public BitField<int, 0, 4> {};
+  class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
+  class ScratchRegisterBits: public BitField<int, 8, 4> {};
+
+  Major MajorKey() { return WriteInt32ToHeapNumber; }
+  int MinorKey() {
+    // Encode the parameters in a unique 16 bit value.
+    return IntRegisterBits::encode(the_int_.code())
+           | HeapNumberRegisterBits::encode(the_heap_number_.code())
+           | ScratchRegisterBits::encode(scratch_.code());
+  }
+
+  void Generate(MacroAssembler* masm);
+
+  const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
+
+#ifdef DEBUG
+  void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
+#endif
+};
+
+
 class NumberToStringStub: public CodeStub {
  public:
   NumberToStringStub() { }
diff --git a/src/arm/constants-arm.cc b/src/arm/constants-arm.cc
index 89ff7c0..2e37120 100644
--- a/src/arm/constants-arm.cc
+++ b/src/arm/constants-arm.cc
@@ -81,9 +81,27 @@
 };
 
 
-const char* VFPRegisters::Name(int reg) {
+const char* VFPRegisters::Name(int reg, bool is_double) {
   ASSERT((0 <= reg) && (reg < kNumVFPRegisters));
-  return names_[reg];
+  return names_[reg + is_double ? kNumVFPSingleRegisters : 0];
+}
+
+
+int VFPRegisters::Number(const char* name, bool* is_double) {
+  for (int i = 0; i < kNumVFPRegisters; i++) {
+    if (strcmp(names_[i], name) == 0) {
+      if (i < kNumVFPSingleRegisters) {
+        *is_double = false;
+        return i;
+      } else {
+        *is_double = true;
+        return i - kNumVFPSingleRegisters;
+      }
+    }
+  }
+
+  // No register with the requested name found.
+  return kNoRegister;
 }
 
 
@@ -104,7 +122,7 @@
     i++;
   }
 
-  // No register with the reguested name found.
+  // No register with the requested name found.
   return kNoRegister;
 }
 
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 2b883f3..36d2fb6 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -84,7 +84,10 @@
 static const int kNumRegisters = 16;
 
 // VFP support.
-static const int kNumVFPRegisters = 48;
+static const int kNumVFPSingleRegisters = 32;
+static const int kNumVFPDoubleRegisters = 16;
+static const int kNumVFPRegisters =
+    kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
 
 // PC is register 15.
 static const int kPCRegister = 15;
@@ -254,6 +257,14 @@
   inline int RtField() const { return Bits(15, 12); }
   inline int PField() const { return Bit(24); }
   inline int UField() const { return Bit(23); }
+  inline int Opc1Field() const { return (Bit(23) << 2) | Bits(21, 20); }
+  inline int Opc2Field() const { return Bits(19, 16); }
+  inline int Opc3Field() const { return Bits(7, 6); }
+  inline int SzField() const { return Bit(8); }
+  inline int VLField() const { return Bit(20); }
+  inline int VCField() const { return Bit(8); }
+  inline int VAField() const { return Bits(23, 21); }
+  inline int VBField() const { return Bits(6, 5); }
 
   // Fields used in Data processing instructions
   inline Opcode OpcodeField() const {
@@ -344,7 +355,12 @@
 class VFPRegisters {
  public:
   // Return the name of the register.
-  static const char* Name(int reg);
+  static const char* Name(int reg, bool is_double);
+
+  // Lookup the register number for the name provided.
+  // Set flag pointed by is_double to true if register
+  // is double-precision.
+  static int Number(const char* name, bool* is_double);
 
  private:
   static const char* names_[kNumVFPRegisters];
diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc
index 4e39cda..55f31d4 100644
--- a/src/arm/cpu-arm.cc
+++ b/src/arm/cpu-arm.cc
@@ -122,7 +122,7 @@
 
 
 void CPU::DebugBreak() {
-#if !defined (__arm__)
+#if !defined (__arm__) || !defined(CAN_USE_ARMV5_INSTRUCTIONS)
   UNIMPLEMENTED();  // when building ARM emulator target
 #else
   asm volatile("bkpt 0");
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 127c160..8e1776d 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -129,6 +129,10 @@
   void DecodeTypeVFP(Instr* instr);
   void DecodeType6CoprocessorIns(Instr* instr);
 
+  void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr);
+  void DecodeVCMP(Instr* instr);
+  void DecodeVCVTBetweenDoubleAndSingle(Instr* instr);
+  void DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr);
 
   const disasm::NameConverter& converter_;
   v8::internal::Vector<char> out_buffer_;
@@ -181,12 +185,12 @@
 
 // Print the VFP S register name according to the active name converter.
 void Decoder::PrintSRegister(int reg) {
-  Print(assembler::arm::VFPRegisters::Name(reg));
+  Print(assembler::arm::VFPRegisters::Name(reg, false));
 }
 
 // Print the  VFP D register name according to the active name converter.
 void Decoder::PrintDRegister(int reg) {
-  Print(assembler::arm::VFPRegisters::Name(reg + 32));
+  Print(assembler::arm::VFPRegisters::Name(reg, true));
 }
 
 
@@ -930,85 +934,61 @@
 // VMRS
 void Decoder::DecodeTypeVFP(Instr* instr) {
   ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
+  ASSERT(instr->Bits(11, 9) == 0x5);
 
-  if (instr->Bit(23) == 1) {
-    if ((instr->Bits(21, 19) == 0x7) &&
-        (instr->Bits(18, 16) == 0x5) &&
-        (instr->Bits(11, 9) == 0x5) &&
-        (instr->Bit(8) == 1) &&
-        (instr->Bit(6) == 1) &&
-        (instr->Bit(4) == 0)) {
-      Format(instr, "vcvt.s32.f64'cond 'Sd, 'Dm");
-    } else if ((instr->Bits(21, 19) == 0x7) &&
-               (instr->Bits(18, 16) == 0x0) &&
-               (instr->Bits(11, 9) == 0x5) &&
-               (instr->Bit(8) == 1) &&
-               (instr->Bit(7) == 1) &&
-               (instr->Bit(6) == 1) &&
-               (instr->Bit(4) == 0)) {
-      Format(instr, "vcvt.f64.s32'cond 'Dd, 'Sm");
-    } else if ((instr->Bit(21) == 0x0) &&
-               (instr->Bit(20) == 0x0) &&
-               (instr->Bits(11, 9) == 0x5) &&
-               (instr->Bit(8) == 1) &&
-               (instr->Bit(6) == 0) &&
-               (instr->Bit(4) == 0)) {
+  if (instr->Bit(4) == 0) {
+    if (instr->Opc1Field() == 0x7) {
+      // Other data processing instructions
+      if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
+        DecodeVCVTBetweenDoubleAndSingle(instr);
+      } else if ((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) {
+        DecodeVCVTBetweenFloatingPointAndInteger(instr);
+      } else if (((instr->Opc2Field() >> 1) == 0x6) &&
+                 (instr->Opc3Field() & 0x1)) {
+        DecodeVCVTBetweenFloatingPointAndInteger(instr);
+      } else if (((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
+                 (instr->Opc3Field() & 0x1)) {
+        DecodeVCMP(instr);
+      } else {
+        Unknown(instr);  // Not used by V8.
+      }
+    } else if (instr->Opc1Field() == 0x3) {
+      if (instr->SzField() == 0x1) {
+        if (instr->Opc3Field() & 0x1) {
+          Format(instr, "vsub.f64'cond 'Dd, 'Dn, 'Dm");
+        } else {
+          Format(instr, "vadd.f64'cond 'Dd, 'Dn, 'Dm");
+        }
+      } else {
+        Unknown(instr);  // Not used by V8.
+      }
+    } else if ((instr->Opc1Field() == 0x2) && !(instr->Opc3Field() & 0x1)) {
+      if (instr->SzField() == 0x1) {
+        Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm");
+      } else {
+        Unknown(instr);  // Not used by V8.
+      }
+    } else if ((instr->Opc1Field() == 0x4) && !(instr->Opc3Field() & 0x1)) {
+      if (instr->SzField() == 0x1) {
         Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm");
-    } else if ((instr->Bits(21, 20) == 0x3) &&
-               (instr->Bits(19, 16) == 0x4) &&
-               (instr->Bits(11, 9) == 0x5) &&
-               (instr->Bit(8) == 0x1) &&
-               (instr->Bit(6) == 0x1) &&
-               (instr->Bit(4) == 0x0)) {
-      Format(instr, "vcmp.f64'cond 'Dd, 'Dm");
-    } else if ((instr->Bits(23, 20) == 0xF) &&
-               (instr->Bits(19, 16) == 0x1) &&
-               (instr->Bits(11, 8) == 0xA) &&
-               (instr->Bits(7, 5) == 0x0) &&
-               (instr->Bit(4) == 0x1)    &&
-               (instr->Bits(3, 0) == 0x0)) {
-        if (instr->Bits(15, 12) == 0xF)
-          Format(instr, "vmrs'cond APSR, FPSCR");
-        else
-          Unknown(instr);  // Not used by V8.
-    } else {
-      Unknown(instr);  // Not used by V8.
-    }
-  } else if (instr->Bit(21) == 1) {
-    if ((instr->Bit(20) == 0x1) &&
-        (instr->Bits(11, 9) == 0x5) &&
-        (instr->Bit(8) == 0x1) &&
-        (instr->Bit(6) == 0) &&
-        (instr->Bit(4) == 0)) {
-      Format(instr, "vadd.f64'cond 'Dd, 'Dn, 'Dm");
-    } else if ((instr->Bit(20) == 0x1) &&
-               (instr->Bits(11, 9) == 0x5) &&
-               (instr->Bit(8) == 0x1) &&
-               (instr->Bit(6) == 1) &&
-               (instr->Bit(4) == 0)) {
-      Format(instr, "vsub.f64'cond 'Dd, 'Dn, 'Dm");
-    } else if ((instr->Bit(20) == 0x0) &&
-               (instr->Bits(11, 9) == 0x5) &&
-               (instr->Bit(8) == 0x1) &&
-               (instr->Bit(6) == 0) &&
-               (instr->Bit(4) == 0)) {
-      Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm");
+      } else {
+        Unknown(instr);  // Not used by V8.
+      }
     } else {
       Unknown(instr);  // Not used by V8.
     }
   } else {
-    if ((instr->Bit(20) == 0x0) &&
-        (instr->Bits(11, 8) == 0xA) &&
-        (instr->Bits(6, 5) == 0x0) &&
-        (instr->Bit(4) == 1) &&
-        (instr->Bits(3, 0) == 0x0)) {
-      Format(instr, "vmov'cond 'Sn, 'rt");
-    } else if ((instr->Bit(20) == 0x1) &&
-               (instr->Bits(11, 8) == 0xA) &&
-               (instr->Bits(6, 5) == 0x0) &&
-               (instr->Bit(4) == 1) &&
-               (instr->Bits(3, 0) == 0x0)) {
-      Format(instr, "vmov'cond 'rt, 'Sn");
+    if ((instr->VCField() == 0x0) &&
+        (instr->VAField() == 0x0)) {
+      DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
+    } else if ((instr->VLField() == 0x1) &&
+               (instr->VCField() == 0x0) &&
+               (instr->VAField() == 0x7) &&
+               (instr->Bits(19, 16) == 0x1)) {
+      if (instr->Bits(15, 12) == 0xF)
+        Format(instr, "vmrs'cond APSR, FPSCR");
+      else
+        Unknown(instr);  // Not used by V8.
     } else {
       Unknown(instr);  // Not used by V8.
     }
@@ -1016,6 +996,94 @@
 }
 
 
+void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr) {
+  ASSERT((instr->Bit(4) == 1) && (instr->VCField() == 0x0) &&
+         (instr->VAField() == 0x0));
+
+  bool to_arm_register = (instr->VLField() == 0x1);
+
+  if (to_arm_register) {
+    Format(instr, "vmov'cond 'rt, 'Sn");
+  } else {
+    Format(instr, "vmov'cond 'Sn, 'rt");
+  }
+}
+
+
+void Decoder::DecodeVCMP(Instr* instr) {
+  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
+  ASSERT(((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
+         (instr->Opc3Field() & 0x1));
+
+  // Comparison.
+  bool dp_operation = (instr->SzField() == 1);
+  bool raise_exception_for_qnan = (instr->Bit(7) == 0x1);
+
+  if (dp_operation && !raise_exception_for_qnan) {
+    Format(instr, "vcmp.f64'cond 'Dd, 'Dm");
+  } else {
+    Unknown(instr);  // Not used by V8.
+  }
+}
+
+
+void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instr* instr) {
+  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
+  ASSERT((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3));
+
+  bool double_to_single = (instr->SzField() == 1);
+
+  if (double_to_single) {
+    Format(instr, "vcvt.f32.f64'cond 'Sd, 'Dm");
+  } else {
+    Format(instr, "vcvt.f64.f32'cond 'Dd, 'Sm");
+  }
+}
+
+
+void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
+  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
+  ASSERT(((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) ||
+         (((instr->Opc2Field() >> 1) == 0x6) && (instr->Opc3Field() & 0x1)));
+
+  bool to_integer = (instr->Bit(18) == 1);
+  bool dp_operation = (instr->SzField() == 1);
+  if (to_integer) {
+    bool unsigned_integer = (instr->Bit(16) == 0);
+
+    if (dp_operation) {
+      if (unsigned_integer) {
+        Format(instr, "vcvt.u32.f64'cond 'Sd, 'Dm");
+      } else {
+        Format(instr, "vcvt.s32.f64'cond 'Sd, 'Dm");
+      }
+    } else {
+      if (unsigned_integer) {
+        Format(instr, "vcvt.u32.f32'cond 'Sd, 'Sm");
+      } else {
+        Format(instr, "vcvt.s32.f32'cond 'Sd, 'Sm");
+      }
+    }
+  } else {
+    bool unsigned_integer = (instr->Bit(7) == 0);
+
+    if (dp_operation) {
+      if (unsigned_integer) {
+        Format(instr, "vcvt.f64.u32'cond 'Dd, 'Sm");
+      } else {
+        Format(instr, "vcvt.f64.s32'cond 'Dd, 'Sm");
+      }
+    } else {
+      if (unsigned_integer) {
+        Format(instr, "vcvt.f32.u32'cond 'Sd, 'Sm");
+      } else {
+        Format(instr, "vcvt.f32.s32'cond 'Sd, 'Sm");
+      }
+    }
+  }
+}
+
+
 // Decode Type 6 coprocessor instructions.
 // Dm = vmov(Rt, Rt2)
 // <Rt, Rt2> = vmov(Dm)
@@ -1024,9 +1092,27 @@
 void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
   ASSERT((instr->TypeField() == 6));
 
-  if (instr->CoprocessorField() != 0xB) {
-    Unknown(instr);  // Not used by V8.
-  } else {
+  if (instr->CoprocessorField() == 0xA) {
+    switch (instr->OpcodeField()) {
+      case 0x8:
+        if (instr->HasL()) {
+          Format(instr, "vldr'cond 'Sd, ['rn - 4*'off8]");
+        } else {
+          Format(instr, "vstr'cond 'Sd, ['rn - 4*'off8]");
+        }
+        break;
+      case 0xC:
+        if (instr->HasL()) {
+          Format(instr, "vldr'cond 'Sd, ['rn + 4*'off8]");
+        } else {
+          Format(instr, "vstr'cond 'Sd, ['rn + 4*'off8]");
+        }
+        break;
+      default:
+        Unknown(instr);  // Not used by V8.
+        break;
+    }
+  } else if (instr->CoprocessorField() == 0xB) {
     switch (instr->OpcodeField()) {
       case 0x2:
         // Load and store double to two GP registers
@@ -1056,6 +1142,8 @@
         Unknown(instr);  // Not used by V8.
         break;
     }
+  } else {
+    UNIMPLEMENTED();  // Not used by V8.
   }
 }
 
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index a70cf44..cc9e70b 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -667,14 +667,12 @@
   Comment cmnt(masm_, "[ FunctionLiteral");
 
   // Build the function boilerplate and instantiate it.
-  Handle<JSFunction> boilerplate =
-      Compiler::BuildBoilerplate(expr, script(), this);
+  Handle<SharedFunctionInfo> function_info =
+      Compiler::BuildFunctionInfo(expr, script(), this);
   if (HasStackOverflow()) return;
 
-  ASSERT(boilerplate->IsBoilerplate());
-
   // Create a new closure.
-  __ mov(r0, Operand(boilerplate));
+  __ mov(r0, Operand(function_info));
   __ stm(db_w, sp, cp.bit() | r0.bit());
   __ CallRuntime(Runtime::kNewClosure, 2);
   Apply(context_, r0);
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index e68a77a..2259aea 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -42,7 +42,6 @@
 
 #define __ ACCESS_MASM(masm)
 
-
 // Helper function used from LoadIC/CallIC GenerateNormal.
 static void GenerateDictionaryLoad(MacroAssembler* masm,
                                    Label* miss,
@@ -145,25 +144,6 @@
 }
 
 
-// Helper function used to check that a value is either not an object
-// or is loaded if it is an object.
-static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm,
-                                           Label* miss,
-                                           Register value,
-                                           Register scratch) {
-  Label done;
-  // Check if the value is a Smi.
-  __ tst(value, Operand(kSmiTagMask));
-  __ b(eq, &done);
-  // Check if the object has been loaded.
-  __ ldr(scratch, FieldMemOperand(value, JSObject::kMapOffset));
-  __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
-  __ tst(scratch, Operand(1 << Map::kNeedsLoading));
-  __ b(ne, miss);
-  __ bind(&done);
-}
-
-
 void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r2    : name
@@ -292,12 +272,6 @@
   __ CompareObjectType(r1, r0, r0, JS_FUNCTION_TYPE);
   __ b(ne, miss);
 
-  // Check that the function has been loaded.
-  __ ldr(r0, FieldMemOperand(r1, JSObject::kMapOffset));
-  __ ldrb(r0, FieldMemOperand(r0, Map::kBitField2Offset));
-  __ tst(r0, Operand(1 << Map::kNeedsLoading));
-  __ b(ne, miss);
-
   // Patch the receiver with the global proxy if necessary.
   if (is_global_object) {
     __ ldr(r0, MemOperand(sp, argc * kPointerSize));
@@ -469,7 +443,6 @@
 
   __ bind(&probe);
   GenerateDictionaryLoad(masm, &miss, r1, r0);
-  GenerateCheckNonObjectOrLoaded(masm, &miss, r0, r1);
   __ Ret();
 
   // Global object access: Check access rights.
@@ -557,7 +530,7 @@
   //  -- sp[0]  : key
   //  -- sp[4]  : receiver
   // -----------------------------------
-  Label slow, fast;
+  Label slow, fast, check_pixel_array;
 
   // Get the key and receiver object from the stack.
   __ ldm(ia, sp, r0.bit() | r1.bit());
@@ -595,6 +568,19 @@
   __ cmp(r0, Operand(r3));
   __ b(lo, &fast);
 
+  // Check whether the elements is a pixel array.
+  __ bind(&check_pixel_array);
+  __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
+  __ cmp(r3, ip);
+  __ b(ne, &slow);
+  __ ldr(ip, FieldMemOperand(r1, PixelArray::kLengthOffset));
+  __ cmp(r0, ip);
+  __ b(hs, &slow);
+  __ ldr(ip, FieldMemOperand(r1, PixelArray::kExternalPointerOffset));
+  __ ldrb(r0, MemOperand(ip, r0));
+  __ mov(r0, Operand(r0, LSL, kSmiTagSize));  // Tag result as smi.
+  __ Ret();
+
   // Slow case: Push extra copies of the arguments (2).
   __ bind(&slow);
   __ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r0, r1);
@@ -625,10 +611,283 @@
 }
 
 
+// Convert unsigned integer with specified number of leading zeroes in binary
+// representation to IEEE 754 double.
+// Integer to convert is passed in register hiword.
+// Resulting double is returned in registers hiword:loword.
+// This functions does not work correctly for 0.
+static void GenerateUInt2Double(MacroAssembler* masm,
+                                Register hiword,
+                                Register loword,
+                                Register scratch,
+                                int leading_zeroes) {
+  const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
+  const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
+
+  const int mantissa_shift_for_hi_word =
+      meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
+
+  const int mantissa_shift_for_lo_word =
+      kBitsPerInt - mantissa_shift_for_hi_word;
+
+  __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
+  if (mantissa_shift_for_hi_word > 0) {
+    __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
+    __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
+  } else {
+    __ mov(loword, Operand(0));
+    __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
+  }
+
+  // If least significant bit of biased exponent was not 1 it was corrupted
+  // by most significant bit of mantissa so we should fix that.
+  if (!(biased_exponent & 1)) {
+    __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
+  }
+}
+
+
 void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
                                         ExternalArrayType array_type) {
-  // TODO(476): port specialized code.
-  GenerateGeneric(masm);
+  // ---------- S t a t e --------------
+  //  -- lr     : return address
+  //  -- sp[0]  : key
+  //  -- sp[4]  : receiver
+  // -----------------------------------
+  Label slow, failed_allocation;
+
+  // Get the key and receiver object from the stack.
+  __ ldm(ia, sp, r0.bit() | r1.bit());
+
+  // r0: key
+  // r1: receiver object
+
+  // Check that the object isn't a smi
+  __ BranchOnSmi(r1, &slow);
+
+  // Check that the key is a smi.
+  __ BranchOnNotSmi(r0, &slow);
+
+  // Check that the object is a JS object. Load map into r2.
+  __ CompareObjectType(r1, r2, r3, FIRST_JS_OBJECT_TYPE);
+  __ b(lt, &slow);
+
+  // Check that the receiver does not require access checks.  We need
+  // to check this explicitly since this generic stub does not perform
+  // map checks.
+  __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
+  __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
+  __ b(ne, &slow);
+
+  // Check that the elements array is the appropriate type of
+  // ExternalArray.
+  // r0: index (as a smi)
+  // r1: JSObject
+  __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
+  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
+  __ cmp(r2, ip);
+  __ b(ne, &slow);
+
+  // Check that the index is in range.
+  __ ldr(ip, FieldMemOperand(r1, ExternalArray::kLengthOffset));
+  __ cmp(r1, Operand(r0, ASR, kSmiTagSize));
+  // Unsigned comparison catches both negative and too-large values.
+  __ b(lo, &slow);
+
+  // r0: index (smi)
+  // r1: elements array
+  __ ldr(r1, FieldMemOperand(r1, ExternalArray::kExternalPointerOffset));
+  // r1: base pointer of external storage
+
+  // We are not untagging smi key and instead work with it
+  // as if it was premultiplied by 2.
+  ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
+
+  switch (array_type) {
+    case kExternalByteArray:
+      __ ldrsb(r0, MemOperand(r1, r0, LSR, 1));
+      break;
+    case kExternalUnsignedByteArray:
+      __ ldrb(r0, MemOperand(r1, r0, LSR, 1));
+      break;
+    case kExternalShortArray:
+      __ ldrsh(r0, MemOperand(r1, r0, LSL, 0));
+      break;
+    case kExternalUnsignedShortArray:
+      __ ldrh(r0, MemOperand(r1, r0, LSL, 0));
+      break;
+    case kExternalIntArray:
+    case kExternalUnsignedIntArray:
+      __ ldr(r0, MemOperand(r1, r0, LSL, 1));
+      break;
+    case kExternalFloatArray:
+      if (CpuFeatures::IsSupported(VFP3)) {
+        CpuFeatures::Scope scope(VFP3);
+        __ add(r0, r1, Operand(r0, LSL, 1));
+        __ vldr(s0, r0, 0);
+      } else {
+        __ ldr(r0, MemOperand(r1, r0, LSL, 1));
+      }
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  // For integer array types:
+  // r0: value
+  // For floating-point array type
+  // s0: value (if VFP3 is supported)
+  // r0: value (if VFP3 is not supported)
+
+  if (array_type == kExternalIntArray) {
+    // For the Int and UnsignedInt array types, we need to see whether
+    // the value can be represented in a Smi. If not, we need to convert
+    // it to a HeapNumber.
+    Label box_int;
+    __ cmp(r0, Operand(0xC0000000));
+    __ b(mi, &box_int);
+    __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+    __ Ret();
+
+    __ bind(&box_int);
+
+    __ mov(r1, r0);
+    // Allocate a HeapNumber for the int and perform int-to-double
+    // conversion.
+    __ AllocateHeapNumber(r0, r3, r4, &slow);
+
+    if (CpuFeatures::IsSupported(VFP3)) {
+      CpuFeatures::Scope scope(VFP3);
+      __ vmov(s0, r1);
+      __ vcvt_f64_s32(d0, s0);
+      __ sub(r1, r0, Operand(kHeapObjectTag));
+      __ vstr(d0, r1, HeapNumber::kValueOffset);
+      __ Ret();
+    } else {
+      WriteInt32ToHeapNumberStub stub(r1, r0, r3);
+      __ TailCallStub(&stub);
+    }
+  } else if (array_type == kExternalUnsignedIntArray) {
+    // The test is different for unsigned int values. Since we need
+    // the value to be in the range of a positive smi, we can't
+    // handle either of the top two bits being set in the value.
+    if (CpuFeatures::IsSupported(VFP3)) {
+      CpuFeatures::Scope scope(VFP3);
+      Label box_int, done;
+      __ tst(r0, Operand(0xC0000000));
+      __ b(ne, &box_int);
+
+      __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+      __ Ret();
+
+      __ bind(&box_int);
+      __ vmov(s0, r0);
+      __ AllocateHeapNumber(r0, r1, r2, &slow);
+
+      __ vcvt_f64_u32(d0, s0);
+      __ sub(r1, r0, Operand(kHeapObjectTag));
+      __ vstr(d0, r1, HeapNumber::kValueOffset);
+      __ Ret();
+    } else {
+      // Check whether unsigned integer fits into smi.
+      Label box_int_0, box_int_1, done;
+      __ tst(r0, Operand(0x80000000));
+      __ b(ne, &box_int_0);
+      __ tst(r0, Operand(0x40000000));
+      __ b(ne, &box_int_1);
+
+      // Tag integer as smi and return it.
+      __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+      __ Ret();
+
+      __ bind(&box_int_0);
+      // Integer does not have leading zeros.
+      GenerateUInt2Double(masm, r0, r1, r2, 0);
+      __ b(&done);
+
+      __ bind(&box_int_1);
+      // Integer has one leading zero.
+      GenerateUInt2Double(masm, r0, r1, r2, 1);
+
+      __ bind(&done);
+      // Integer was converted to double in registers r0:r1.
+      // Wrap it into a HeapNumber.
+      __ AllocateHeapNumber(r2, r3, r5, &slow);
+
+      __ str(r0, FieldMemOperand(r2, HeapNumber::kExponentOffset));
+      __ str(r1, FieldMemOperand(r2, HeapNumber::kMantissaOffset));
+
+      __ mov(r0, r2);
+
+      __ Ret();
+    }
+  } else if (array_type == kExternalFloatArray) {
+    // For the floating-point array type, we need to always allocate a
+    // HeapNumber.
+    if (CpuFeatures::IsSupported(VFP3)) {
+      CpuFeatures::Scope scope(VFP3);
+      __ AllocateHeapNumber(r0, r1, r2, &slow);
+      __ vcvt_f64_f32(d0, s0);
+      __ sub(r1, r0, Operand(kHeapObjectTag));
+      __ vstr(d0, r1, HeapNumber::kValueOffset);
+      __ Ret();
+    } else {
+      __ AllocateHeapNumber(r3, r1, r2, &slow);
+      // VFP is not available, do manual single to double conversion.
+
+      // r0: floating point value (binary32)
+
+      // Extract mantissa to r1.
+      __ and_(r1, r0, Operand(kBinary32MantissaMask));
+
+      // Extract exponent to r2.
+      __ mov(r2, Operand(r0, LSR, kBinary32MantissaBits));
+      __ and_(r2, r2, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
+
+      Label exponent_rebiased;
+      __ teq(r2, Operand(0x00));
+      __ b(eq, &exponent_rebiased);
+
+      __ teq(r2, Operand(0xff));
+      __ mov(r2, Operand(0x7ff), LeaveCC, eq);
+      __ b(eq, &exponent_rebiased);
+
+      // Rebias exponent.
+      __ add(r2,
+             r2,
+             Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
+
+      __ bind(&exponent_rebiased);
+      __ and_(r0, r0, Operand(kBinary32SignMask));
+      __ orr(r0, r0, Operand(r2, LSL, HeapNumber::kMantissaBitsInTopWord));
+
+      // Shift mantissa.
+      static const int kMantissaShiftForHiWord =
+          kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+      static const int kMantissaShiftForLoWord =
+          kBitsPerInt - kMantissaShiftForHiWord;
+
+      __ orr(r0, r0, Operand(r1, LSR, kMantissaShiftForHiWord));
+      __ mov(r1, Operand(r1, LSL, kMantissaShiftForLoWord));
+
+      __ str(r0, FieldMemOperand(r3, HeapNumber::kExponentOffset));
+      __ str(r1, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
+      __ mov(r0, r3);
+      __ Ret();
+    }
+
+  } else {
+    __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+    __ Ret();
+  }
+
+  // Slow case: Load name and receiver from stack and jump to runtime.
+  __ bind(&slow);
+  __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r0, r1);
+  GenerateRuntimeGetProperty(masm);
 }
 
 
@@ -709,7 +968,7 @@
   //  -- sp[0]  : key
   //  -- sp[1]  : receiver
   // -----------------------------------
-  Label slow, fast, array, extra, exit;
+  Label slow, fast, array, extra, exit, check_pixel_array;
 
   // Get the key and the object from the stack.
   __ ldm(ia, sp, r1.bit() | r3.bit());  // r1 = key, r3 = receiver
@@ -742,7 +1001,7 @@
   __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
   __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
   __ cmp(r2, ip);
-  __ b(ne, &slow);
+  __ b(ne, &check_pixel_array);
   // Untag the key (for checking against untagged length in the fixed array).
   __ mov(r1, Operand(r1, ASR, kSmiTagSize));
   // Compute address to store into and check array bounds.
@@ -757,6 +1016,37 @@
   __ bind(&slow);
   GenerateRuntimeSetProperty(masm);
 
+  // Check whether the elements is a pixel array.
+  // r0: value
+  // r1: index (as a smi), zero-extended.
+  // r3: elements array
+  __ bind(&check_pixel_array);
+  __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
+  __ cmp(r2, ip);
+  __ b(ne, &slow);
+  // Check that the value is a smi. If a conversion is needed call into the
+  // runtime to convert and clamp.
+  __ BranchOnNotSmi(r0, &slow);
+  __ mov(r1, Operand(r1, ASR, kSmiTagSize));  // Untag the key.
+  __ ldr(ip, FieldMemOperand(r3, PixelArray::kLengthOffset));
+  __ cmp(r1, Operand(ip));
+  __ b(hs, &slow);
+  __ mov(r4, r0);  // Save the value.
+  __ mov(r0, Operand(r0, ASR, kSmiTagSize));  // Untag the value.
+  {  // Clamp the value to [0..255].
+    Label done;
+    __ tst(r0, Operand(0xFFFFFF00));
+    __ b(eq, &done);
+    __ mov(r0, Operand(0), LeaveCC, mi);  // 0 if negative.
+    __ mov(r0, Operand(255), LeaveCC, pl);  // 255 if positive.
+    __ bind(&done);
+  }
+  __ ldr(r2, FieldMemOperand(r3, PixelArray::kExternalPointerOffset));
+  __ strb(r0, MemOperand(r2, r1));
+  __ mov(r0, Operand(r4));  // Return the original value.
+  __ Ret();
+
+
   // Extra capacity case: Check if there is extra capacity to
   // perform the store and update the length. Used for adding one
   // element to the array by writing to array[array.length].
@@ -819,10 +1109,376 @@
 }
 
 
+// Convert int passed in register ival to IEE 754 single precision
+// floating point value and store it into register fval.
+// If VFP3 is available use it for conversion.
+static void ConvertIntToFloat(MacroAssembler* masm,
+                              Register ival,
+                              Register fval,
+                              Register scratch1,
+                              Register scratch2) {
+  if (CpuFeatures::IsSupported(VFP3)) {
+    CpuFeatures::Scope scope(VFP3);
+    __ vmov(s0, ival);
+    __ vcvt_f32_s32(s0, s0);
+    __ vmov(fval, s0);
+  } else {
+    Label not_special, done;
+    // Move sign bit from source to destination.  This works because the sign
+    // bit in the exponent word of the double has the same position and polarity
+    // as the 2's complement sign bit in a Smi.
+    ASSERT(kBinary32SignMask == 0x80000000u);
+
+    __ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
+    // Negate value if it is negative.
+    __ rsb(ival, ival, Operand(0), LeaveCC, ne);
+
+    // We have -1, 0 or 1, which we treat specially. Register ival contains
+    // absolute value: it is either equal to 1 (special case of -1 and 1),
+    // greater than 1 (not a special case) or less than 1 (special case of 0).
+    __ cmp(ival, Operand(1));
+    __ b(gt, &not_special);
+
+    // For 1 or -1 we need to or in the 0 exponent (biased).
+    static const uint32_t exponent_word_for_1 =
+        kBinary32ExponentBias << kBinary32ExponentShift;
+
+    __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
+    __ b(&done);
+
+    __ bind(&not_special);
+    // Count leading zeros.
+    // Gets the wrong answer for 0, but we already checked for that case above.
+    Register zeros = scratch2;
+    __ CountLeadingZeros(ival, scratch1, zeros);
+
+    // Compute exponent and or it into the exponent register.
+    __ rsb(scratch1,
+           zeros,
+           Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
+
+    __ orr(fval,
+           fval,
+           Operand(scratch1, LSL, kBinary32ExponentShift));
+
+    // Shift up the source chopping the top bit off.
+    __ add(zeros, zeros, Operand(1));
+    // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
+    __ mov(ival, Operand(ival, LSL, zeros));
+    // And the top (top 20 bits).
+    __ orr(fval,
+           fval,
+           Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
+
+    __ bind(&done);
+  }
+}
+
+
+static bool IsElementTypeSigned(ExternalArrayType array_type) {
+  switch (array_type) {
+    case kExternalByteArray:
+    case kExternalShortArray:
+    case kExternalIntArray:
+      return true;
+
+    case kExternalUnsignedByteArray:
+    case kExternalUnsignedShortArray:
+    case kExternalUnsignedIntArray:
+      return false;
+
+    default:
+      UNREACHABLE();
+      return false;
+  }
+}
+
+
 void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
                                          ExternalArrayType array_type) {
-  // TODO(476): port specialized code.
-  GenerateGeneric(masm);
+  // ---------- S t a t e --------------
+  //  -- r0     : value
+  //  -- lr     : return address
+  //  -- sp[0]  : key
+  //  -- sp[1]  : receiver
+  // -----------------------------------
+  Label slow, check_heap_number;
+
+  // Get the key and the object from the stack.
+  __ ldm(ia, sp, r1.bit() | r2.bit());  // r1 = key, r2 = receiver
+
+  // Check that the object isn't a smi.
+  __ BranchOnSmi(r2, &slow);
+
+  // Check that the object is a JS object. Load map into r3
+  __ CompareObjectType(r2, r3, r4, FIRST_JS_OBJECT_TYPE);
+  __ b(le, &slow);
+
+  // Check that the receiver does not require access checks.  We need
+  // to do this because this generic stub does not perform map checks.
+  __ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset));
+  __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
+  __ b(ne, &slow);
+
+  // Check that the key is a smi.
+  __ BranchOnNotSmi(r1, &slow);
+
+  // Check that the elements array is the appropriate type of
+  // ExternalArray.
+  // r0: value
+  // r1: index (smi)
+  // r2: object
+  __ ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
+  __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
+  __ cmp(r3, ip);
+  __ b(ne, &slow);
+
+  // Check that the index is in range.
+  __ mov(r1, Operand(r1, ASR, kSmiTagSize));  // Untag the index.
+  __ ldr(ip, FieldMemOperand(r2, ExternalArray::kLengthOffset));
+  __ cmp(r1, ip);
+  // Unsigned comparison catches both negative and too-large values.
+  __ b(hs, &slow);
+
+  // Handle both smis and HeapNumbers in the fast path. Go to the
+  // runtime for all other kinds of values.
+  // r0: value
+  // r1: index (integer)
+  // r2: array
+  __ BranchOnNotSmi(r0, &check_heap_number);
+  __ mov(r3, Operand(r0, ASR, kSmiTagSize));  // Untag the value.
+  __ ldr(r2, FieldMemOperand(r2, ExternalArray::kExternalPointerOffset));
+
+  // r1: index (integer)
+  // r2: base pointer of external storage
+  // r3: value (integer)
+  switch (array_type) {
+    case kExternalByteArray:
+    case kExternalUnsignedByteArray:
+      __ strb(r3, MemOperand(r2, r1, LSL, 0));
+      break;
+    case kExternalShortArray:
+    case kExternalUnsignedShortArray:
+      __ strh(r3, MemOperand(r2, r1, LSL, 1));
+      break;
+    case kExternalIntArray:
+    case kExternalUnsignedIntArray:
+      __ str(r3, MemOperand(r2, r1, LSL, 2));
+      break;
+    case kExternalFloatArray:
+      // Need to perform int-to-float conversion.
+      ConvertIntToFloat(masm, r3, r4, r5, r6);
+      __ str(r4, MemOperand(r2, r1, LSL, 2));
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  // r0: value
+  __ Ret();
+
+
+  // r0: value
+  // r1: index (integer)
+  // r2: external array object
+  __ bind(&check_heap_number);
+  __ CompareObjectType(r0, r3, r4, HEAP_NUMBER_TYPE);
+  __ b(ne, &slow);
+
+  __ ldr(r2, FieldMemOperand(r2, ExternalArray::kExternalPointerOffset));
+
+  // The WebGL specification leaves the behavior of storing NaN and
+  // +/-Infinity into integer arrays basically undefined. For more
+  // reproducible behavior, convert these to zero.
+  if (CpuFeatures::IsSupported(VFP3)) {
+    CpuFeatures::Scope scope(VFP3);
+
+    // vldr requires offset to be a multiple of 4 so we can not
+    // include -kHeapObjectTag into it.
+    __ sub(r3, r0, Operand(kHeapObjectTag));
+    __ vldr(d0, r3, HeapNumber::kValueOffset);
+
+    if (array_type == kExternalFloatArray) {
+      __ vcvt_f32_f64(s0, d0);
+      __ vmov(r3, s0);
+      __ str(r3, MemOperand(r2, r1, LSL, 2));
+    } else {
+      Label done;
+
+      // Need to perform float-to-int conversion.
+      // Test for NaN.
+      __ vcmp(d0, d0);
+      // Move vector status bits to normal status bits.
+      __ vmrs(v8::internal::pc);
+      __ mov(r3, Operand(0), LeaveCC, vs);  // NaN converts to 0
+      __ b(vs, &done);
+
+      // Test whether exponent equal to 0x7FF (infinity or NaN)
+      __ vmov(r4, r3, d0);
+      __ mov(r5, Operand(0x7FF00000));
+      __ and_(r3, r3, Operand(r5));
+      __ teq(r3, Operand(r5));
+      __ mov(r3, Operand(0), LeaveCC, eq);
+
+      // Not infinity or NaN simply convert to int
+      if (IsElementTypeSigned(array_type)) {
+        __ vcvt_s32_f64(s0, d0, ne);
+      } else {
+        __ vcvt_u32_f64(s0, d0, ne);
+      }
+
+      __ vmov(r3, s0, ne);
+
+      __ bind(&done);
+      switch (array_type) {
+        case kExternalByteArray:
+        case kExternalUnsignedByteArray:
+          __ strb(r3, MemOperand(r2, r1, LSL, 0));
+          break;
+        case kExternalShortArray:
+        case kExternalUnsignedShortArray:
+          __ strh(r3, MemOperand(r2, r1, LSL, 1));
+          break;
+        case kExternalIntArray:
+        case kExternalUnsignedIntArray:
+          __ str(r3, MemOperand(r2, r1, LSL, 2));
+          break;
+        default:
+          UNREACHABLE();
+          break;
+      }
+    }
+
+    // r0: original value
+    __ Ret();
+  } else {
+    // VFP3 is not available do manual conversions
+    __ ldr(r3, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+    __ ldr(r4, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+
+    if (array_type == kExternalFloatArray) {
+      Label done, nan_or_infinity_or_zero;
+      static const int kMantissaInHiWordShift =
+          kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+      static const int kMantissaInLoWordShift =
+          kBitsPerInt - kMantissaInHiWordShift;
+
+      // Test for all special exponent values: zeros, subnormal numbers, NaNs
+      // and infinities. All these should be converted to 0.
+      __ mov(r5, Operand(HeapNumber::kExponentMask));
+      __ and_(r6, r3, Operand(r5), SetCC);
+      __ b(eq, &nan_or_infinity_or_zero);
+
+      __ teq(r6, Operand(r5));
+      __ mov(r6, Operand(kBinary32ExponentMask), LeaveCC, eq);
+      __ b(eq, &nan_or_infinity_or_zero);
+
+      // Rebias exponent.
+      __ mov(r6, Operand(r6, LSR, HeapNumber::kExponentShift));
+      __ add(r6,
+             r6,
+             Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
+
+      __ cmp(r6, Operand(kBinary32MaxExponent));
+      __ and_(r3, r3, Operand(HeapNumber::kSignMask), LeaveCC, gt);
+      __ orr(r3, r3, Operand(kBinary32ExponentMask), LeaveCC, gt);
+      __ b(gt, &done);
+
+      __ cmp(r6, Operand(kBinary32MinExponent));
+      __ and_(r3, r3, Operand(HeapNumber::kSignMask), LeaveCC, lt);
+      __ b(lt, &done);
+
+      __ and_(r7, r3, Operand(HeapNumber::kSignMask));
+      __ and_(r3, r3, Operand(HeapNumber::kMantissaMask));
+      __ orr(r7, r7, Operand(r3, LSL, kMantissaInHiWordShift));
+      __ orr(r7, r7, Operand(r4, LSR, kMantissaInLoWordShift));
+      __ orr(r3, r7, Operand(r6, LSL, kBinary32ExponentShift));
+
+      __ bind(&done);
+      __ str(r3, MemOperand(r2, r1, LSL, 2));
+      __ Ret();
+
+      __ bind(&nan_or_infinity_or_zero);
+      __ and_(r7, r3, Operand(HeapNumber::kSignMask));
+      __ and_(r3, r3, Operand(HeapNumber::kMantissaMask));
+      __ orr(r6, r6, r7);
+      __ orr(r6, r6, Operand(r3, LSL, kMantissaInHiWordShift));
+      __ orr(r3, r6, Operand(r4, LSR, kMantissaInLoWordShift));
+      __ b(&done);
+    } else {
+      bool is_signed_type  = IsElementTypeSigned(array_type);
+      int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
+      int32_t min_value    = is_signed_type ? 0x80000000 : 0x00000000;
+
+      Label done, sign;
+
+      // Test for all special exponent values: zeros, subnormal numbers, NaNs
+      // and infinities. All these should be converted to 0.
+      __ mov(r5, Operand(HeapNumber::kExponentMask));
+      __ and_(r6, r3, Operand(r5), SetCC);
+      __ mov(r3, Operand(0), LeaveCC, eq);
+      __ b(eq, &done);
+
+      __ teq(r6, Operand(r5));
+      __ mov(r3, Operand(0), LeaveCC, eq);
+      __ b(eq, &done);
+
+      // Unbias exponent.
+      __ mov(r6, Operand(r6, LSR, HeapNumber::kExponentShift));
+      __ sub(r6, r6, Operand(HeapNumber::kExponentBias), SetCC);
+      // If exponent is negative than result is 0.
+      __ mov(r3, Operand(0), LeaveCC, mi);
+      __ b(mi, &done);
+
+      // If exponent is too big than result is minimal value
+      __ cmp(r6, Operand(meaningfull_bits - 1));
+      __ mov(r3, Operand(min_value), LeaveCC, ge);
+      __ b(ge, &done);
+
+      __ and_(r5, r3, Operand(HeapNumber::kSignMask), SetCC);
+      __ and_(r3, r3, Operand(HeapNumber::kMantissaMask));
+      __ orr(r3, r3, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
+
+      __ rsb(r6, r6, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
+      __ mov(r3, Operand(r3, LSR, r6), LeaveCC, pl);
+      __ b(pl, &sign);
+
+      __ rsb(r6, r6, Operand(0));
+      __ mov(r3, Operand(r3, LSL, r6));
+      __ rsb(r6, r6, Operand(meaningfull_bits));
+      __ orr(r3, r3, Operand(r4, LSR, r6));
+
+      __ bind(&sign);
+      __ teq(r5, Operand(0));
+      __ rsb(r3, r3, Operand(0), LeaveCC, ne);
+
+      __ bind(&done);
+      switch (array_type) {
+        case kExternalByteArray:
+        case kExternalUnsignedByteArray:
+          __ strb(r3, MemOperand(r2, r1, LSL, 0));
+          break;
+        case kExternalShortArray:
+        case kExternalUnsignedShortArray:
+          __ strh(r3, MemOperand(r2, r1, LSL, 1));
+          break;
+        case kExternalIntArray:
+        case kExternalUnsignedIntArray:
+          __ str(r3, MemOperand(r2, r1, LSL, 2));
+          break;
+        default:
+          UNREACHABLE();
+          break;
+      }
+    }
+  }
+
+  // Slow case: call runtime.
+  __ bind(&slow);
+  GenerateRuntimeSetProperty(masm);
 }
 
 
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 691c08c..ac1c14f 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1192,7 +1192,7 @@
   // ARMv7 VFP3 instructions to implement integer to double conversion.
   mov(r7, Operand(inReg, ASR, kSmiTagSize));
   vmov(s15, r7);
-  vcvt(d7, s15);
+  vcvt_f64_s32(d7, s15);
   vmov(outLowReg, outHighReg, d7);
 }
 
@@ -1455,6 +1455,58 @@
 }
 
 
+// Allocates a heap number or jumps to the need_gc label if the young space
+// is full and a scavenge is needed.
+void MacroAssembler::AllocateHeapNumber(Register result,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Label* gc_required) {
+  // Allocate an object in the heap for the heap number and tag it as a heap
+  // object.
+  AllocateInNewSpace(HeapNumber::kSize / kPointerSize,
+                     result,
+                     scratch1,
+                     scratch2,
+                     gc_required,
+                     TAG_OBJECT);
+
+  // Get heap number map and store it in the allocated object.
+  LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
+  str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+}
+
+
+void MacroAssembler::CountLeadingZeros(Register source,
+                                       Register scratch,
+                                       Register zeros) {
+#ifdef CAN_USE_ARMV5_INSTRUCTIONS
+  clz(zeros, source);  // This instruction is only supported after ARM5.
+#else
+  mov(zeros, Operand(0));
+  mov(scratch, source);
+  // Top 16.
+  tst(scratch, Operand(0xffff0000));
+  add(zeros, zeros, Operand(16), LeaveCC, eq);
+  mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
+  // Top 8.
+  tst(scratch, Operand(0xff000000));
+  add(zeros, zeros, Operand(8), LeaveCC, eq);
+  mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
+  // Top 4.
+  tst(scratch, Operand(0xf0000000));
+  add(zeros, zeros, Operand(4), LeaveCC, eq);
+  mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
+  // Top 2.
+  tst(scratch, Operand(0xc0000000));
+  add(zeros, zeros, Operand(2), LeaveCC, eq);
+  mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
+  // Top bit.
+  tst(scratch, Operand(0x80000000u));
+  add(zeros, zeros, Operand(1), LeaveCC, eq);
+#endif
+}
+
+
 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
     Register first,
     Register second,
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 8c70d95..1097bd9 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -239,6 +239,12 @@
                                Register scratch2,
                                Label* gc_required);
 
+  // Allocates a heap number or jumps to the need_gc label if the young space
+  // is full and a scavenge is needed.
+  void AllocateHeapNumber(Register result,
+                          Register scratch1,
+                          Register scratch2,
+                          Label* gc_required);
 
   // ---------------------------------------------------------------------------
   // Support functions.
@@ -319,6 +325,12 @@
                                          Register outHighReg,
                                          Register outLowReg);
 
+  // Count leading zeros in a 32 bit word.  On ARM5 and later it uses the clz
+  // instruction.  On pre-ARM5 hardware this routine gives the wrong answer
+  // for 0 (31 instead of 32).
+  void CountLeadingZeros(Register source,
+                         Register scratch,
+                         Register zeros);
 
   // ---------------------------------------------------------------------------
   // Runtime calls
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index cee5aea..49b4a5b 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -72,6 +72,8 @@
 
   int32_t GetRegisterValue(int regnum);
   bool GetValue(const char* desc, int32_t* value);
+  bool GetVFPSingleValue(const char* desc, float* value);
+  bool GetVFPDoubleValue(const char* desc, double* value);
 
   // Set or delete a breakpoint. Returns true if successful.
   bool SetBreakpoint(Instr* breakpc);
@@ -154,6 +156,28 @@
 }
 
 
+bool Debugger::GetVFPSingleValue(const char* desc, float* value) {
+  bool is_double;
+  int regnum = VFPRegisters::Number(desc, &is_double);
+  if (regnum != kNoRegister && !is_double) {
+    *value = sim_->get_float_from_s_register(regnum);
+    return true;
+  }
+  return false;
+}
+
+
+bool Debugger::GetVFPDoubleValue(const char* desc, double* value) {
+  bool is_double;
+  int regnum = VFPRegisters::Number(desc, &is_double);
+  if (regnum != kNoRegister && is_double) {
+    *value = sim_->get_double_from_d_register(regnum);
+    return true;
+  }
+  return false;
+}
+
+
 bool Debugger::SetBreakpoint(Instr* breakpc) {
   // Check if a breakpoint can be set. If not return without any side-effects.
   if (sim_->break_pc_ != NULL) {
@@ -249,6 +273,8 @@
       } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
         if (args == 2) {
           int32_t value;
+          float svalue;
+          double dvalue;
           if (strcmp(arg1, "all") == 0) {
             for (int i = 0; i < kNumRegisters; i++) {
               value = GetRegisterValue(i);
@@ -257,6 +283,10 @@
           } else {
             if (GetValue(arg1, &value)) {
               PrintF("%s: 0x%08x %d \n", arg1, value, value);
+            } else if (GetVFPSingleValue(arg1, &svalue)) {
+              PrintF("%s: %f \n", arg1, svalue);
+            } else if (GetVFPDoubleValue(arg1, &dvalue)) {
+              PrintF("%s: %lf \n", arg1, dvalue);
             } else {
               PrintF("%s unrecognized\n", arg1);
             }
@@ -1919,6 +1949,13 @@
 }
 
 
+// Depending on value of last_bit flag glue register code from vm and m values
+// (where m is expected to be a single bit).
+static int GlueRegCode(bool last_bit, int vm, int m) {
+  return last_bit ? ((vm << 1) | m) : ((m << 4) | vm);
+}
+
+
 // void Simulator::DecodeTypeVFP(Instr* instr)
 // The Following ARMv7 VFPv instructions are currently supported.
 // vmov :Sn = Rt
@@ -1933,114 +1970,212 @@
 // VMRS
 void Simulator::DecodeTypeVFP(Instr* instr) {
   ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
+  ASSERT(instr->Bits(11, 9) == 0x5);
 
-  int rt = instr->RtField();
   int vm = instr->VmField();
-  int vn = instr->VnField();
   int vd = instr->VdField();
+  int vn = instr->VnField();
 
-  if (instr->Bit(23) == 1) {
-    if ((instr->Bits(21, 19) == 0x7) &&
-        (instr->Bits(18, 16) == 0x5) &&
-        (instr->Bits(11, 9) == 0x5) &&
-        (instr->Bit(8) == 1) &&
-        (instr->Bit(6) == 1) &&
-        (instr->Bit(4) == 0)) {
-      double dm_val = get_double_from_d_register(vm);
-      int32_t int_value = static_cast<int32_t>(dm_val);
-      set_s_register_from_sinteger(((vd<<1) | instr->DField()), int_value);
-    } else if ((instr->Bits(21, 19) == 0x7) &&
-               (instr->Bits(18, 16) == 0x0) &&
-               (instr->Bits(11, 9) == 0x5) &&
-               (instr->Bit(8) == 1) &&
-               (instr->Bit(7) == 1) &&
-               (instr->Bit(6) == 1) &&
-               (instr->Bit(4) == 0)) {
-      int32_t int_value = get_sinteger_from_s_register(((vm<<1) |
-                                                       instr->MField()));
-      double dbl_value = static_cast<double>(int_value);
-      set_d_register_from_double(vd, dbl_value);
-    } else if ((instr->Bit(21) == 0x0) &&
-               (instr->Bit(20) == 0x0) &&
-               (instr->Bits(11, 9) == 0x5) &&
-               (instr->Bit(8) == 1) &&
-               (instr->Bit(6) == 0) &&
-               (instr->Bit(4) == 0)) {
+  if (instr->Bit(4) == 0) {
+    if (instr->Opc1Field() == 0x7) {
+      // Other data processing instructions
+      if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
+        DecodeVCVTBetweenDoubleAndSingle(instr);
+      } else if ((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) {
+        DecodeVCVTBetweenFloatingPointAndInteger(instr);
+      } else if (((instr->Opc2Field() >> 1) == 0x6) &&
+                 (instr->Opc3Field() & 0x1)) {
+        DecodeVCVTBetweenFloatingPointAndInteger(instr);
+      } else if (((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
+                 (instr->Opc3Field() & 0x1)) {
+        DecodeVCMP(instr);
+      } else {
+        UNREACHABLE();  // Not used by V8.
+      }
+    } else if (instr->Opc1Field() == 0x3) {
+      if (instr->SzField() != 0x1) {
+        UNREACHABLE();  // Not used by V8.
+      }
+
+      if (instr->Opc3Field() & 0x1) {
+        // vsub
+        double dn_value = get_double_from_d_register(vn);
+        double dm_value = get_double_from_d_register(vm);
+        double dd_value = dn_value - dm_value;
+        set_d_register_from_double(vd, dd_value);
+      } else {
+        // vadd
+        double dn_value = get_double_from_d_register(vn);
+        double dm_value = get_double_from_d_register(vm);
+        double dd_value = dn_value + dm_value;
+        set_d_register_from_double(vd, dd_value);
+      }
+    } else if ((instr->Opc1Field() == 0x2) && !(instr->Opc3Field() & 0x1)) {
+      // vmul
+      if (instr->SzField() != 0x1) {
+        UNREACHABLE();  // Not used by V8.
+      }
+
+      double dn_value = get_double_from_d_register(vn);
+      double dm_value = get_double_from_d_register(vm);
+      double dd_value = dn_value * dm_value;
+      set_d_register_from_double(vd, dd_value);
+    } else if ((instr->Opc1Field() == 0x4) && !(instr->Opc3Field() & 0x1)) {
+      // vdiv
+      if (instr->SzField() != 0x1) {
+        UNREACHABLE();  // Not used by V8.
+      }
+
       double dn_value = get_double_from_d_register(vn);
       double dm_value = get_double_from_d_register(vm);
       double dd_value = dn_value / dm_value;
       set_d_register_from_double(vd, dd_value);
-    } else if ((instr->Bits(21, 20) == 0x3) &&
-               (instr->Bits(19, 16) == 0x4) &&
-               (instr->Bits(11, 9) == 0x5) &&
-               (instr->Bit(8) == 0x1) &&
-               (instr->Bit(6) == 0x1) &&
-               (instr->Bit(4) == 0x0)) {
-      double dd_value = get_double_from_d_register(vd);
-      double dm_value = get_double_from_d_register(vm);
-      Compute_FPSCR_Flags(dd_value, dm_value);
-    } else if ((instr->Bits(23, 20) == 0xF) &&
-               (instr->Bits(19, 16) == 0x1) &&
-               (instr->Bits(11, 8) == 0xA) &&
-               (instr->Bits(7, 5) == 0x0) &&
-               (instr->Bit(4) == 0x1)    &&
-               (instr->Bits(3, 0) == 0x0)) {
-      if (instr->Bits(15, 12) == 0xF)
+    } else {
+      UNIMPLEMENTED();  // Not used by V8.
+    }
+  } else {
+    if ((instr->VCField() == 0x0) &&
+        (instr->VAField() == 0x0)) {
+      DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
+    } else if ((instr->VLField() == 0x1) &&
+               (instr->VCField() == 0x0) &&
+               (instr->VAField() == 0x7) &&
+               (instr->Bits(19, 16) == 0x1)) {
+      // vmrs
+      if (instr->RtField() == 0xF)
         Copy_FPSCR_to_APSR();
       else
         UNIMPLEMENTED();  // Not used by V8.
     } else {
       UNIMPLEMENTED();  // Not used by V8.
     }
-  } else if (instr->Bit(21) == 1) {
-    if ((instr->Bit(20) == 0x1) &&
-        (instr->Bits(11, 9) == 0x5) &&
-        (instr->Bit(8) == 0x1) &&
-        (instr->Bit(6) == 0) &&
-        (instr->Bit(4) == 0)) {
-      double dn_value = get_double_from_d_register(vn);
-      double dm_value = get_double_from_d_register(vm);
-      double dd_value = dn_value + dm_value;
-      set_d_register_from_double(vd, dd_value);
-    } else if ((instr->Bit(20) == 0x1) &&
-               (instr->Bits(11, 9) == 0x5) &&
-               (instr->Bit(8) == 0x1) &&
-               (instr->Bit(6) == 1) &&
-               (instr->Bit(4) == 0)) {
-      double dn_value = get_double_from_d_register(vn);
-      double dm_value = get_double_from_d_register(vm);
-      double dd_value = dn_value - dm_value;
-      set_d_register_from_double(vd, dd_value);
-    } else if ((instr->Bit(20) == 0x0) &&
-               (instr->Bits(11, 9) == 0x5) &&
-               (instr->Bit(8) == 0x1) &&
-               (instr->Bit(6) == 0) &&
-               (instr->Bit(4) == 0)) {
-      double dn_value = get_double_from_d_register(vn);
-      double dm_value = get_double_from_d_register(vm);
-      double dd_value = dn_value * dm_value;
-      set_d_register_from_double(vd, dd_value);
-    } else {
+  }
+}
+
+
+void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr) {
+  ASSERT((instr->Bit(4) == 1) && (instr->VCField() == 0x0) &&
+         (instr->VAField() == 0x0));
+
+  int t = instr->RtField();
+  int n  = GlueRegCode(true, instr->VnField(), instr->NField());
+  bool to_arm_register = (instr->VLField() == 0x1);
+
+  if (to_arm_register) {
+    int32_t int_value = get_sinteger_from_s_register(n);
+    set_register(t, int_value);
+  } else {
+    int32_t rs_val = get_register(t);
+    set_s_register_from_sinteger(n, rs_val);
+  }
+}
+
+
+void Simulator::DecodeVCMP(Instr* instr) {
+  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
+  ASSERT(((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
+         (instr->Opc3Field() & 0x1));
+
+  // Comparison.
+  bool dp_operation = (instr->SzField() == 1);
+
+  if (instr->Bit(7) != 0) {
+    // Raising exceptions for quiet NaNs are not supported.
+    UNIMPLEMENTED();  // Not used by V8.
+  }
+
+  int d = GlueRegCode(!dp_operation, instr->VdField(), instr->DField());
+  int m = GlueRegCode(!dp_operation, instr->VmField(), instr->MField());
+
+  if (dp_operation) {
+    double dd_value = get_double_from_d_register(d);
+    double dm_value = get_double_from_d_register(m);
+
+    Compute_FPSCR_Flags(dd_value, dm_value);
+  } else {
+    UNIMPLEMENTED();  // Not used by V8.
+  }
+}
+
+
+void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instr* instr) {
+  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
+  ASSERT((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3));
+
+  bool double_to_single = (instr->SzField() == 1);
+  int dst = GlueRegCode(double_to_single, instr->VdField(), instr->DField());
+  int src = GlueRegCode(!double_to_single, instr->VmField(), instr->MField());
+
+  if (double_to_single) {
+    double val = get_double_from_d_register(src);
+    set_s_register_from_float(dst, static_cast<float>(val));
+  } else {
+    float val = get_float_from_s_register(src);
+    set_d_register_from_double(dst, static_cast<double>(val));
+  }
+}
+
+
+void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
+  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
+  ASSERT(((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) ||
+         (((instr->Opc2Field() >> 1) == 0x6) && (instr->Opc3Field() & 0x1)));
+
+  // Conversion between floating-point and integer.
+  int vd = instr->VdField();
+  int d = instr->DField();
+  int vm = instr->VmField();
+  int m = instr->MField();
+
+  bool to_integer = (instr->Bit(18) == 1);
+  bool dp_operation = (instr->SzField() == 1);
+  if (to_integer) {
+    bool unsigned_integer = (instr->Bit(16) == 0);
+    if (instr->Bit(7) != 1) {
+      // Only rounding towards zero supported.
       UNIMPLEMENTED();  // Not used by V8.
     }
-  } else {
-    if ((instr->Bit(20) == 0x0) &&
-        (instr->Bits(11, 8) == 0xA) &&
-        (instr->Bits(6, 5) == 0x0) &&
-        (instr->Bit(4) == 1) &&
-        (instr->Bits(3, 0) == 0x0)) {
-      int32_t rs_val = get_register(rt);
-      set_s_register_from_sinteger(((vn<<1) | instr->NField()), rs_val);
-    } else if ((instr->Bit(20) == 0x1) &&
-               (instr->Bits(11, 8) == 0xA) &&
-               (instr->Bits(6, 5) == 0x0) &&
-               (instr->Bit(4) == 1) &&
-               (instr->Bits(3, 0) == 0x0)) {
-      int32_t int_value = get_sinteger_from_s_register(((vn<<1) |
-                                                       instr->NField()));
-      set_register(rt, int_value);
+
+    int dst = GlueRegCode(true, vd, d);
+    int src = GlueRegCode(!dp_operation, vm, m);
+
+    if (dp_operation) {
+      double val = get_double_from_d_register(src);
+
+      int sint = unsigned_integer ? static_cast<uint32_t>(val) :
+                                    static_cast<int32_t>(val);
+
+      set_s_register_from_sinteger(dst, sint);
     } else {
-      UNIMPLEMENTED();  // Not used by V8.
+      float val = get_float_from_s_register(src);
+
+      int sint = unsigned_integer ? static_cast<uint32_t>(val) :
+                                      static_cast<int32_t>(val);
+
+      set_s_register_from_sinteger(dst, sint);
+    }
+  } else {
+    bool unsigned_integer = (instr->Bit(7) == 0);
+
+    int dst = GlueRegCode(!dp_operation, vd, d);
+    int src = GlueRegCode(true, vm, m);
+
+    int val = get_sinteger_from_s_register(src);
+
+    if (dp_operation) {
+      if (unsigned_integer) {
+        set_d_register_from_double(dst,
+                                   static_cast<double>((uint32_t)val));
+      } else {
+        set_d_register_from_double(dst, static_cast<double>(val));
+      }
+    } else {
+      if (unsigned_integer) {
+        set_s_register_from_float(dst,
+                                  static_cast<float>((uint32_t)val));
+      } else {
+        set_s_register_from_float(dst, static_cast<float>(val));
+      }
     }
   }
 }
@@ -2055,9 +2190,32 @@
 void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
   ASSERT((instr->TypeField() == 6));
 
-  if (instr->CoprocessorField() != 0xB) {
-    UNIMPLEMENTED();  // Not used by V8.
-  } else {
+  if (instr->CoprocessorField() == 0xA) {
+    switch (instr->OpcodeField()) {
+      case 0x8:
+      case 0xC: {  // Load and store float to memory.
+        int rn = instr->RnField();
+        int vd = instr->VdField();
+        int offset = instr->Immed8Field();
+        if (!instr->HasU()) {
+          offset = -offset;
+        }
+
+        int32_t address = get_register(rn) + 4 * offset;
+        if (instr->HasL()) {
+          // Load double from memory: vldr.
+          set_s_register_from_sinteger(vd, ReadW(address, instr));
+        } else {
+          // Store double to memory: vstr.
+          WriteW(address, get_sinteger_from_s_register(vd), instr);
+        }
+        break;
+      }
+      default:
+        UNIMPLEMENTED();  // Not used by V8.
+        break;
+    }
+  } else if (instr->CoprocessorField() == 0xB) {
     switch (instr->OpcodeField()) {
       case 0x2:
         // Load and store double to two GP registers
@@ -2106,6 +2264,8 @@
         UNIMPLEMENTED();  // Not used by V8.
         break;
     }
+  } else {
+    UNIMPLEMENTED();  // Not used by V8.
   }
 }
 
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 1973730..4ee9070 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -231,6 +231,11 @@
   void DecodeTypeVFP(Instr* instr);
   void DecodeType6CoprocessorIns(Instr* instr);
 
+  void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr);
+  void DecodeVCMP(Instr* instr);
+  void DecodeVCVTBetweenDoubleAndSingle(Instr* instr);
+  void DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr);
+
   // Executes one instruction.
   void InstructionDecode(Instr* instr);
 
diff --git a/src/assembler.cc b/src/assembler.cc
index aaf10ef..bb010c8 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -664,6 +664,16 @@
 }
 
 
+ExternalReference ExternalReference::compile_array_pop_call() {
+  return ExternalReference(FUNCTION_ADDR(CompileArrayPopCall));
+}
+
+
+ExternalReference ExternalReference::compile_array_push_call() {
+  return ExternalReference(FUNCTION_ADDR(CompileArrayPushCall));
+}
+
+
 #ifdef V8_NATIVE_REGEXP
 
 ExternalReference ExternalReference::re_check_stack_guard_state() {
diff --git a/src/assembler.h b/src/assembler.h
index cde7d69..b4834e5 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -443,6 +443,9 @@
 
   static ExternalReference scheduled_exception_address();
 
+  static ExternalReference compile_array_pop_call();
+  static ExternalReference compile_array_push_call();
+
   Address address() const {return reinterpret_cast<Address>(address_);}
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
diff --git a/src/ast.cc b/src/ast.cc
index 9fc4af0..7dd991f 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -79,7 +79,8 @@
     is_this_(is_this),
     inside_with_(inside_with),
     is_trivial_(false),
-    reaching_definitions_(NULL) {
+    reaching_definitions_(NULL),
+    is_primitive_(false) {
   // names must be canonicalized for fast equality checks
   ASSERT(name->IsSymbol());
 }
@@ -87,7 +88,8 @@
 
 VariableProxy::VariableProxy(bool is_this)
   : is_this_(is_this),
-    reaching_definitions_(NULL) {
+    reaching_definitions_(NULL),
+    is_primitive_(false) {
 }
 
 
@@ -507,7 +509,7 @@
 // The following expression types are never primitive because they express
 // Object values.
 bool FunctionLiteral::IsPrimitive() { return false; }
-bool FunctionBoilerplateLiteral::IsPrimitive() { return false; }
+bool SharedFunctionInfoLiteral::IsPrimitive() { return false; }
 bool RegExpLiteral::IsPrimitive() { return false; }
 bool ObjectLiteral::IsPrimitive() { return false; }
 bool ArrayLiteral::IsPrimitive() { return false; }
@@ -518,12 +520,18 @@
 
 // The following expression types are not always primitive because we do not
 // have enough information to conclude that they are.
-bool VariableProxy::IsPrimitive() { return false; }
 bool Property::IsPrimitive() { return false; }
 bool Call::IsPrimitive() { return false; }
 bool CallRuntime::IsPrimitive() { return false; }
 
 
+// A variable use is not primitive unless the primitive-type analysis
+// determines otherwise.
+bool VariableProxy::IsPrimitive() {
+  ASSERT(!is_primitive_ || (var() != NULL && var()->IsStackAllocated()));
+  return is_primitive_;
+}
+
 // The value of a conditional is the value of one of the alternatives.  It's
 // always primitive if both alternatives are always primitive.
 bool Conditional::IsPrimitive() {
@@ -592,4 +600,387 @@
 bool CompareOperation::IsPrimitive() { return true; }
 
 
+// Implementation of a copy visitor. The visitor create a deep copy
+// of ast nodes. Nodes that do not require a deep copy are copied
+// with the default copy constructor.
+
+AstNode::AstNode(AstNode* other) : num_(kNoNumber) {
+  // AST node number should be unique. Assert that we only copy AstNodes
+  // before node numbers are assigned.
+  ASSERT(other->num_ == kNoNumber);
+}
+
+
+Statement::Statement(Statement* other)
+    : AstNode(other), statement_pos_(other->statement_pos_) {}
+
+
+Expression::Expression(Expression* other)
+    : AstNode(other),
+      bitfields_(other->bitfields_),
+      type_(other->type_) {}
+
+
+BreakableStatement::BreakableStatement(BreakableStatement* other)
+    : Statement(other), labels_(other->labels_), type_(other->type_) {}
+
+
+Block::Block(Block* other, ZoneList<Statement*>* statements)
+    : BreakableStatement(other),
+      statements_(statements->length()),
+      is_initializer_block_(other->is_initializer_block_) {
+  statements_.AddAll(*statements);
+}
+
+
+ExpressionStatement::ExpressionStatement(ExpressionStatement* other,
+                                         Expression* expression)
+    : Statement(other), expression_(expression) {}
+
+
+IfStatement::IfStatement(IfStatement* other,
+                         Expression* condition,
+                         Statement* then_statement,
+                         Statement* else_statement)
+    : Statement(other),
+      condition_(condition),
+      then_statement_(then_statement),
+      else_statement_(else_statement) {}
+
+
+EmptyStatement::EmptyStatement(EmptyStatement* other) : Statement(other) {}
+
+
+IterationStatement::IterationStatement(IterationStatement* other,
+                                       Statement* body)
+    : BreakableStatement(other), body_(body) {}
+
+
+ForStatement::ForStatement(ForStatement* other,
+                           Statement* init,
+                           Expression* cond,
+                           Statement* next,
+                           Statement* body)
+    : IterationStatement(other, body),
+      init_(init),
+      cond_(cond),
+      next_(next),
+      may_have_function_literal_(other->may_have_function_literal_),
+      loop_variable_(other->loop_variable_),
+      peel_this_loop_(other->peel_this_loop_) {}
+
+
+Assignment::Assignment(Assignment* other,
+                       Expression* target,
+                       Expression* value)
+    : Expression(other),
+      op_(other->op_),
+      target_(target),
+      value_(value),
+      pos_(other->pos_),
+      block_start_(other->block_start_),
+      block_end_(other->block_end_) {}
+
+
+Property::Property(Property* other, Expression* obj, Expression* key)
+    : Expression(other),
+      obj_(obj),
+      key_(key),
+      pos_(other->pos_),
+      type_(other->type_) {}
+
+
+Call::Call(Call* other,
+           Expression* expression,
+           ZoneList<Expression*>* arguments)
+    : Expression(other),
+      expression_(expression),
+      arguments_(arguments),
+      pos_(other->pos_) {}
+
+
+UnaryOperation::UnaryOperation(UnaryOperation* other, Expression* expression)
+    : Expression(other), op_(other->op_), expression_(expression) {}
+
+
+BinaryOperation::BinaryOperation(BinaryOperation* other,
+                                 Expression* left,
+                                 Expression* right)
+    : Expression(other),
+      op_(other->op_),
+      left_(left),
+      right_(right) {}
+
+
+CountOperation::CountOperation(CountOperation* other, Expression* expression)
+    : Expression(other),
+      is_prefix_(other->is_prefix_),
+      op_(other->op_),
+      expression_(expression) {}
+
+
+CompareOperation::CompareOperation(CompareOperation* other,
+                                   Expression* left,
+                                   Expression* right)
+    : Expression(other),
+      op_(other->op_),
+      left_(left),
+      right_(right) {}
+
+
+Expression* CopyAstVisitor::DeepCopyExpr(Expression* expr) {
+  expr_ = NULL;
+  if (expr != NULL) Visit(expr);
+  return expr_;
+}
+
+
+Statement* CopyAstVisitor::DeepCopyStmt(Statement* stmt) {
+  stmt_ = NULL;
+  if (stmt != NULL) Visit(stmt);
+  return stmt_;
+}
+
+
+ZoneList<Expression*>* CopyAstVisitor::DeepCopyExprList(
+    ZoneList<Expression*>* expressions) {
+  ZoneList<Expression*>* copy =
+      new ZoneList<Expression*>(expressions->length());
+  for (int i = 0; i < expressions->length(); i++) {
+    copy->Add(DeepCopyExpr(expressions->at(i)));
+  }
+  return copy;
+}
+
+
+ZoneList<Statement*>* CopyAstVisitor::DeepCopyStmtList(
+    ZoneList<Statement*>* statements) {
+  ZoneList<Statement*>* copy = new ZoneList<Statement*>(statements->length());
+  for (int i = 0; i < statements->length(); i++) {
+    copy->Add(DeepCopyStmt(statements->at(i)));
+  }
+  return copy;
+}
+
+
+void CopyAstVisitor::VisitBlock(Block* stmt) {
+  stmt_ = new Block(stmt,
+                    DeepCopyStmtList(stmt->statements()));
+}
+
+
+void CopyAstVisitor::VisitExpressionStatement(
+    ExpressionStatement* stmt) {
+  stmt_ = new ExpressionStatement(stmt, DeepCopyExpr(stmt->expression()));
+}
+
+
+void CopyAstVisitor::VisitEmptyStatement(EmptyStatement* stmt) {
+  stmt_ = new EmptyStatement(stmt);
+}
+
+
+void CopyAstVisitor::VisitIfStatement(IfStatement* stmt) {
+  stmt_ = new IfStatement(stmt,
+                          DeepCopyExpr(stmt->condition()),
+                          DeepCopyStmt(stmt->then_statement()),
+                          DeepCopyStmt(stmt->else_statement()));
+}
+
+
+void CopyAstVisitor::VisitContinueStatement(ContinueStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitBreakStatement(BreakStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitReturnStatement(ReturnStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitWithEnterStatement(
+    WithEnterStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitWithExitStatement(WithExitStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitSwitchStatement(SwitchStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitWhileStatement(WhileStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitForStatement(ForStatement* stmt) {
+  stmt_ = new ForStatement(stmt,
+                           DeepCopyStmt(stmt->init()),
+                           DeepCopyExpr(stmt->cond()),
+                           DeepCopyStmt(stmt->next()),
+                           DeepCopyStmt(stmt->body()));
+}
+
+
+void CopyAstVisitor::VisitForInStatement(ForInStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitTryCatchStatement(TryCatchStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitTryFinallyStatement(
+    TryFinallyStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitDebuggerStatement(
+    DebuggerStatement* stmt) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitFunctionLiteral(FunctionLiteral* expr) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitConditional(Conditional* expr) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitSlot(Slot* expr) {
+  UNREACHABLE();
+}
+
+
+void CopyAstVisitor::VisitVariableProxy(VariableProxy* expr) {
+  expr_ = new VariableProxy(*expr);
+}
+
+
+void CopyAstVisitor::VisitLiteral(Literal* expr) {
+  expr_ = new Literal(*expr);
+}
+
+
+void CopyAstVisitor::VisitRegExpLiteral(RegExpLiteral* expr) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitObjectLiteral(ObjectLiteral* expr) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitArrayLiteral(ArrayLiteral* expr) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitCatchExtensionObject(
+    CatchExtensionObject* expr) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitAssignment(Assignment* expr) {
+  expr_ = new Assignment(expr,
+                         DeepCopyExpr(expr->target()),
+                         DeepCopyExpr(expr->value()));
+}
+
+
+void CopyAstVisitor::VisitThrow(Throw* expr) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitProperty(Property* expr) {
+  expr_ = new Property(expr,
+                       DeepCopyExpr(expr->obj()),
+                       DeepCopyExpr(expr->key()));
+}
+
+
+void CopyAstVisitor::VisitCall(Call* expr) {
+  expr_ = new Call(expr,
+                   DeepCopyExpr(expr->expression()),
+                   DeepCopyExprList(expr->arguments()));
+}
+
+
+void CopyAstVisitor::VisitCallNew(CallNew* expr) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitCallRuntime(CallRuntime* expr) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitUnaryOperation(UnaryOperation* expr) {
+  expr_ = new UnaryOperation(expr, DeepCopyExpr(expr->expression()));
+}
+
+
+void CopyAstVisitor::VisitCountOperation(CountOperation* expr) {
+  expr_ = new CountOperation(expr,
+                             DeepCopyExpr(expr->expression()));
+}
+
+
+void CopyAstVisitor::VisitBinaryOperation(BinaryOperation* expr) {
+  expr_ = new BinaryOperation(expr,
+                              DeepCopyExpr(expr->left()),
+                              DeepCopyExpr(expr->right()));
+}
+
+
+void CopyAstVisitor::VisitCompareOperation(CompareOperation* expr) {
+  expr_ = new CompareOperation(expr,
+                               DeepCopyExpr(expr->left()),
+                               DeepCopyExpr(expr->right()));
+}
+
+
+void CopyAstVisitor::VisitThisFunction(ThisFunction* expr) {
+  SetStackOverflow();
+}
+
+
+void CopyAstVisitor::VisitDeclaration(Declaration* decl) {
+  UNREACHABLE();
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/ast.h b/src/ast.h
index 8248f62..def803b 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -73,7 +73,7 @@
 
 #define EXPRESSION_NODE_LIST(V)                 \
   V(FunctionLiteral)                            \
-  V(FunctionBoilerplateLiteral)                 \
+  V(SharedFunctionInfoLiteral)                  \
   V(Conditional)                                \
   V(Slot)                                       \
   V(VariableProxy)                              \
@@ -121,11 +121,15 @@
   static const int kNoNumber = -1;
 
   AstNode() : num_(kNoNumber) {}
+
+  explicit AstNode(AstNode* other);
+
   virtual ~AstNode() { }
   virtual void Accept(AstVisitor* v) = 0;
 
   // Type testing & conversion.
   virtual Statement* AsStatement() { return NULL; }
+  virtual Block* AsBlock() { return NULL; }
   virtual ExpressionStatement* AsExpressionStatement() { return NULL; }
   virtual EmptyStatement* AsEmptyStatement() { return NULL; }
   virtual Expression* AsExpression() { return NULL; }
@@ -137,6 +141,7 @@
   virtual TargetCollector* AsTargetCollector() { return NULL; }
   virtual BreakableStatement* AsBreakableStatement() { return NULL; }
   virtual IterationStatement* AsIterationStatement() { return NULL; }
+  virtual ForStatement* AsForStatement() { return NULL; }
   virtual UnaryOperation* AsUnaryOperation() { return NULL; }
   virtual CountOperation* AsCountOperation() { return NULL; }
   virtual BinaryOperation* AsBinaryOperation() { return NULL; }
@@ -160,6 +165,8 @@
  public:
   Statement() : statement_pos_(RelocInfo::kNoPosition) {}
 
+  explicit Statement(Statement* other);
+
   virtual Statement* AsStatement()  { return this; }
   virtual ReturnStatement* AsReturnStatement() { return NULL; }
 
@@ -198,6 +205,8 @@
 
   Expression() : bitfields_(0) {}
 
+  explicit Expression(Expression* other);
+
   virtual Expression* AsExpression()  { return this; }
 
   virtual bool IsValidLeftHandSide() { return false; }
@@ -230,6 +239,15 @@
   // Static type information for this expression.
   StaticType* type() { return &type_; }
 
+  // True if the expression is a loop condition.
+  bool is_loop_condition() const {
+    return LoopConditionField::decode(bitfields_);
+  }
+  void set_is_loop_condition(bool flag) {
+    bitfields_ = (bitfields_ & ~LoopConditionField::mask()) |
+        LoopConditionField::encode(flag);
+  }
+
   // AST analysis results
 
   // True if the expression rooted at this node can be compiled by the
@@ -265,7 +283,6 @@
     bitfields_ |= NumBitOpsField::encode(num_bit_ops);
   }
 
-
  private:
   static const int kMaxNumBitOps = (1 << 5) - 1;
 
@@ -277,6 +294,7 @@
   class NoNegativeZeroField : public BitField<bool, 1, 1> {};
   class ToInt32Field : public BitField<bool, 2, 1> {};
   class NumBitOpsField : public BitField<int, 3, 5> {};
+  class LoopConditionField: public BitField<bool, 8, 1> {};
 };
 
 
@@ -327,6 +345,8 @@
     ASSERT(labels == NULL || labels->length() > 0);
   }
 
+  explicit BreakableStatement(BreakableStatement* other);
+
  private:
   ZoneStringList* labels_;
   Type type_;
@@ -341,8 +361,14 @@
         statements_(capacity),
         is_initializer_block_(is_initializer_block) { }
 
+  // Construct a clone initialized from the original block and
+  // a deep copy of all statements of the original block.
+  Block(Block* other, ZoneList<Statement*>* statements);
+
   virtual void Accept(AstVisitor* v);
 
+  virtual Block* AsBlock() { return this; }
+
   virtual Assignment* StatementAsSimpleAssignment() {
     if (statements_.length() != 1) return NULL;
     return statements_[0]->StatementAsSimpleAssignment();
@@ -394,6 +420,7 @@
   virtual IterationStatement* AsIterationStatement() { return this; }
 
   Statement* body() const { return body_; }
+  void set_body(Statement* stmt) { body_ = stmt; }
 
   // Code generation
   BreakTarget* continue_target()  { return &continue_target_; }
@@ -402,6 +429,10 @@
   explicit IterationStatement(ZoneStringList* labels)
       : BreakableStatement(labels, TARGET_FOR_ANONYMOUS), body_(NULL) { }
 
+  // Construct a clone initialized from  original and
+  // a deep copy of the original body.
+  IterationStatement(IterationStatement* other, Statement* body);
+
   void Initialize(Statement* body) {
     body_ = body;
   }
@@ -475,7 +506,18 @@
         cond_(NULL),
         next_(NULL),
         may_have_function_literal_(true),
-        loop_variable_(NULL) {}
+        loop_variable_(NULL),
+        peel_this_loop_(false) {}
+
+  // Construct a for-statement initialized from another for-statement
+  // and deep copies of all parts of the original statement.
+  ForStatement(ForStatement* other,
+               Statement* init,
+               Expression* cond,
+               Statement* next,
+               Statement* body);
+
+  virtual ForStatement* AsForStatement() { return this; }
 
   void Initialize(Statement* init,
                   Expression* cond,
@@ -490,8 +532,11 @@
   virtual void Accept(AstVisitor* v);
 
   Statement* init() const  { return init_; }
+  void set_init(Statement* stmt) { init_ = stmt; }
   Expression* cond() const  { return cond_; }
+  void set_cond(Expression* expr) { cond_ = expr; }
   Statement* next() const  { return next_; }
+  void set_next(Statement* stmt) { next_ = stmt; }
   bool may_have_function_literal() const {
     return may_have_function_literal_;
   }
@@ -500,6 +545,9 @@
   Variable* loop_variable() { return loop_variable_; }
   void set_loop_variable(Variable* var) { loop_variable_ = var; }
 
+  bool peel_this_loop() { return peel_this_loop_; }
+  void set_peel_this_loop(bool b) { peel_this_loop_ = b; }
+
  private:
   Statement* init_;
   Expression* cond_;
@@ -507,6 +555,7 @@
   // True if there is a function literal subexpression in the condition.
   bool may_have_function_literal_;
   Variable* loop_variable_;
+  bool peel_this_loop_;
 
   friend class AstOptimizer;
 };
@@ -539,6 +588,10 @@
   explicit ExpressionStatement(Expression* expression)
       : expression_(expression) { }
 
+  // Construct an expression statement initialized from another
+  // expression statement and a deep copy of the original expression.
+  ExpressionStatement(ExpressionStatement* other, Expression* expression);
+
   virtual void Accept(AstVisitor* v);
 
   // Type testing & conversion.
@@ -681,6 +734,13 @@
         then_statement_(then_statement),
         else_statement_(else_statement) { }
 
+  // Construct an if-statement initialized from another if-statement
+  // and deep copies of all parts of the original.
+  IfStatement(IfStatement* other,
+              Expression* condition,
+              Statement* then_statement,
+              Statement* else_statement);
+
   virtual void Accept(AstVisitor* v);
 
   bool HasThenStatement() const { return !then_statement()->IsEmpty(); }
@@ -688,7 +748,9 @@
 
   Expression* condition() const { return condition_; }
   Statement* then_statement() const { return then_statement_; }
+  void set_then_statement(Statement* stmt) { then_statement_ = stmt; }
   Statement* else_statement() const { return else_statement_; }
+  void set_else_statement(Statement* stmt) { else_statement_ = stmt; }
 
  private:
   Expression* condition_;
@@ -783,6 +845,10 @@
 
 class EmptyStatement: public Statement {
  public:
+  EmptyStatement() {}
+
+  explicit EmptyStatement(EmptyStatement* other);
+
   virtual void Accept(AstVisitor* v);
 
   // Type testing & conversion.
@@ -1022,6 +1088,8 @@
 
   virtual bool IsPrimitive();
 
+  void SetIsPrimitive(bool value) { is_primitive_ = value; }
+
   bool IsVariable(Handle<String> n) {
     return !is_this() && name().is_identical_to(n);
   }
@@ -1051,6 +1119,7 @@
   bool inside_with_;
   bool is_trivial_;
   BitVector* reaching_definitions_;
+  bool is_primitive_;
 
   VariableProxy(Handle<String> name, bool is_this, bool inside_with);
   explicit VariableProxy(bool is_this);
@@ -1145,6 +1214,8 @@
   Property(Expression* obj, Expression* key, int pos, Type type = NORMAL)
       : obj_(obj), key_(key), pos_(pos), type_(type) { }
 
+  Property(Property* other, Expression* obj, Expression* key);
+
   virtual void Accept(AstVisitor* v);
 
   // Type testing & conversion
@@ -1179,6 +1250,8 @@
   Call(Expression* expression, ZoneList<Expression*>* arguments, int pos)
       : expression_(expression), arguments_(arguments), pos_(pos) { }
 
+  Call(Call* other, Expression* expression, ZoneList<Expression*>* arguments);
+
   virtual void Accept(AstVisitor* v);
 
   // Type testing and conversion.
@@ -1255,6 +1328,8 @@
     ASSERT(Token::IsUnaryOp(op));
   }
 
+  UnaryOperation(UnaryOperation* other, Expression* expression);
+
   virtual void Accept(AstVisitor* v);
 
   // Type testing & conversion
@@ -1278,6 +1353,8 @@
     ASSERT(Token::IsBinaryOp(op));
   }
 
+  BinaryOperation(BinaryOperation* other, Expression* left, Expression* right);
+
   virtual void Accept(AstVisitor* v);
 
   // Type testing & conversion
@@ -1329,6 +1406,8 @@
     ASSERT(Token::IsCountOp(op));
   }
 
+  CountOperation(CountOperation* other, Expression* expression);
+
   virtual void Accept(AstVisitor* v);
 
   virtual CountOperation* AsCountOperation() { return this; }
@@ -1359,10 +1438,14 @@
 class CompareOperation: public Expression {
  public:
   CompareOperation(Token::Value op, Expression* left, Expression* right)
-      : op_(op), left_(left), right_(right), is_for_loop_condition_(false) {
+      : op_(op), left_(left), right_(right) {
     ASSERT(Token::IsCompareOp(op));
   }
 
+  CompareOperation(CompareOperation* other,
+                   Expression* left,
+                   Expression* right);
+
   virtual void Accept(AstVisitor* v);
 
   virtual bool IsPrimitive();
@@ -1371,10 +1454,6 @@
   Expression* left() const { return left_; }
   Expression* right() const { return right_; }
 
-  // Accessors for flag whether this compare operation is hanging of a for loop.
-  bool is_for_loop_condition() const { return is_for_loop_condition_; }
-  void set_is_for_loop_condition() { is_for_loop_condition_ = true; }
-
   // Type testing & conversion
   virtual CompareOperation* AsCompareOperation() { return this; }
 
@@ -1382,7 +1461,6 @@
   Token::Value op_;
   Expression* left_;
   Expression* right_;
-  bool is_for_loop_condition_;
 };
 
 
@@ -1418,6 +1496,8 @@
     ASSERT(Token::IsAssignmentOp(op));
   }
 
+  Assignment(Assignment* other, Expression* target, Expression* value);
+
   virtual void Accept(AstVisitor* v);
   virtual Assignment* AsAssignment() { return this; }
 
@@ -1574,14 +1654,15 @@
 };
 
 
-class FunctionBoilerplateLiteral: public Expression {
+class SharedFunctionInfoLiteral: public Expression {
  public:
-  explicit FunctionBoilerplateLiteral(Handle<JSFunction> boilerplate)
-      : boilerplate_(boilerplate) {
-    ASSERT(boilerplate->IsBoilerplate());
-  }
+  explicit SharedFunctionInfoLiteral(
+      Handle<SharedFunctionInfo> shared_function_info)
+      : shared_function_info_(shared_function_info) { }
 
-  Handle<JSFunction> boilerplate() const { return boilerplate_; }
+  Handle<SharedFunctionInfo> shared_function_info() const {
+    return shared_function_info_;
+  }
 
   virtual bool IsLeaf() { return true; }
 
@@ -1590,7 +1671,7 @@
   virtual bool IsPrimitive();
 
  private:
-  Handle<JSFunction> boilerplate_;
+  Handle<SharedFunctionInfo> shared_function_info_;
 };
 
 
@@ -1993,6 +2074,28 @@
 };
 
 
+class CopyAstVisitor : public AstVisitor {
+ public:
+  Expression* DeepCopyExpr(Expression* expr);
+
+  Statement* DeepCopyStmt(Statement* stmt);
+
+ private:
+  ZoneList<Expression*>* DeepCopyExprList(ZoneList<Expression*>* expressions);
+
+  ZoneList<Statement*>* DeepCopyStmtList(ZoneList<Statement*>* statements);
+
+  // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+  // Holds the result of copying an expression.
+  Expression* expr_;
+  // Holds the result of copying a statement.
+  Statement* stmt_;
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_AST_H_
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 56b185a..3dc2470 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -59,11 +59,12 @@
   }
 
 
-  bool Lookup(Vector<const char> name, Handle<JSFunction>* handle) {
+  bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle) {
     for (int i = 0; i < cache_->length(); i+=2) {
       SeqAsciiString* str = SeqAsciiString::cast(cache_->get(i));
       if (str->IsEqualTo(name)) {
-        *handle = Handle<JSFunction>(JSFunction::cast(cache_->get(i + 1)));
+        *handle = Handle<SharedFunctionInfo>(
+            SharedFunctionInfo::cast(cache_->get(i + 1)));
         return true;
       }
     }
@@ -71,8 +72,7 @@
   }
 
 
-  void Add(Vector<const char> name, Handle<JSFunction> fun) {
-    ASSERT(fun->IsBoilerplate());
+  void Add(Vector<const char> name, Handle<SharedFunctionInfo> shared) {
     HandleScope scope;
     int length = cache_->length();
     Handle<FixedArray> new_array =
@@ -81,8 +81,8 @@
     cache_ = *new_array;
     Handle<String> str = Factory::NewStringFromAscii(name, TENURED);
     cache_->set(length, *str);
-    cache_->set(length + 1, *fun);
-    Script::cast(fun->shared()->script())->set_type(Smi::FromInt(type_));
+    cache_->set(length + 1, *shared);
+    Script::cast(shared->script())->set_type(Smi::FromInt(type_));
   }
 
  private:
@@ -91,7 +91,6 @@
   DISALLOW_COPY_AND_ASSIGN(SourceCodeCache);
 };
 
-static SourceCodeCache natives_cache(Script::TYPE_NATIVE);
 static SourceCodeCache extensions_cache(Script::TYPE_EXTENSION);
 // This is for delete, not delete[].
 static List<char*>* delete_these_non_arrays_on_tear_down = NULL;
@@ -134,20 +133,7 @@
 }
 
 
-bool Bootstrapper::NativesCacheLookup(Vector<const char> name,
-                                      Handle<JSFunction>* handle) {
-  return natives_cache.Lookup(name, handle);
-}
-
-
-void Bootstrapper::NativesCacheAdd(Vector<const char> name,
-                                   Handle<JSFunction> fun) {
-  natives_cache.Add(name, fun);
-}
-
-
 void Bootstrapper::Initialize(bool create_heap_objects) {
-  natives_cache.Initialize(create_heap_objects);
   extensions_cache.Initialize(create_heap_objects);
 }
 
@@ -187,8 +173,7 @@
     delete_these_arrays_on_tear_down = NULL;
   }
 
-  natives_cache.Initialize(false);  // Yes, symmetrical
-  extensions_cache.Initialize(false);
+  extensions_cache.Initialize(false);  // Yes, symmetrical
 }
 
 
@@ -197,17 +182,11 @@
   Genesis(Handle<Object> global_object,
           v8::Handle<v8::ObjectTemplate> global_template,
           v8::ExtensionConfiguration* extensions);
-  ~Genesis();
+  ~Genesis() { }
 
   Handle<Context> result() { return result_; }
 
   Genesis* previous() { return previous_; }
-  static Genesis* current() { return current_; }
-
-  // Support for thread preemption.
-  static int ArchiveSpacePerThread();
-  static char* ArchiveState(char* to);
-  static char* RestoreState(char* from);
 
  private:
   Handle<Context> global_context_;
@@ -216,18 +195,46 @@
   // triggered during environment creation there may be weak handle
   // processing callbacks which may create new environments.
   Genesis* previous_;
-  static Genesis* current_;
 
   Handle<Context> global_context() { return global_context_; }
 
-  void CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
-                   Handle<Object> global_object);
+  // Creates some basic objects. Used for creating a context from scratch.
+  void CreateRoots();
+  // Creates the empty function.  Used for creating a context from scratch.
+  Handle<JSFunction> CreateEmptyFunction();
+  // Creates the global objects using the global and the template passed in
+  // through the API.  We call this regardless of whether we are building a
+  // context from scratch or using a deserialized one from the partial snapshot
+  // but in the latter case we don't use the objects it produces directly, as
+  // we have to used the deserialized ones that are linked together with the
+  // rest of the context snapshot.
+  Handle<JSGlobalProxy> CreateNewGlobals(
+      v8::Handle<v8::ObjectTemplate> global_template,
+      Handle<Object> global_object,
+      Handle<GlobalObject>* global_proxy_out);
+  // Hooks the given global proxy into the context.  If the context was created
+  // by deserialization then this will unhook the global proxy that was
+  // deserialized, leaving the GC to pick it up.
+  void HookUpGlobalProxy(Handle<GlobalObject> inner_global,
+                         Handle<JSGlobalProxy> global_proxy);
+  // Similarly, we want to use the inner global that has been created by the
+  // templates passed through the API.  The inner global from the snapshot is
+  // detached from the other objects in the snapshot.
+  void HookUpInnerGlobal(Handle<GlobalObject> inner_global);
+  // New context initialization.  Used for creating a context from scratch.
+  void InitializeGlobal(Handle<GlobalObject> inner_global,
+                        Handle<JSFunction> empty_function);
+  // Installs the contents of the native .js files on the global objects.
+  // Used for creating a context from scratch.
   void InstallNativeFunctions();
   bool InstallNatives();
-  bool InstallExtensions(v8::ExtensionConfiguration* extensions);
-  bool InstallExtension(const char* name);
-  bool InstallExtension(v8::RegisteredExtension* current);
-  bool InstallSpecialObjects();
+  // Used both for deserialized and from-scratch contexts to add the extensions
+  // provided.
+  static bool InstallExtensions(Handle<Context> global_context,
+                                v8::ExtensionConfiguration* extensions);
+  static bool InstallExtension(const char* name);
+  static bool InstallExtension(v8::RegisteredExtension* current);
+  static void InstallSpecialObjects(Handle<Context> global_context);
   bool InstallJSBuiltins(Handle<JSBuiltinsObject> builtins);
   bool ConfigureApiObject(Handle<JSObject> object,
                           Handle<ObjectTemplateInfo> object_template);
@@ -251,33 +258,36 @@
                                   Handle<String> source,
                                   SourceCodeCache* cache,
                                   v8::Extension* extension,
+                                  Handle<Context> top_context,
                                   bool use_runtime_context);
 
   Handle<Context> result_;
+  Handle<JSFunction> empty_function_;
+  BootstrapperActive active_;
+  friend class Bootstrapper;
 };
 
-Genesis* Genesis::current_ = NULL;
-
 
 void Bootstrapper::Iterate(ObjectVisitor* v) {
-  natives_cache.Iterate(v);
-  v->Synchronize("NativesCache");
   extensions_cache.Iterate(v);
   v->Synchronize("Extensions");
 }
 
 
-bool Bootstrapper::IsActive() {
-  return Genesis::current() != NULL;
-}
-
-
 Handle<Context> Bootstrapper::CreateEnvironment(
     Handle<Object> global_object,
     v8::Handle<v8::ObjectTemplate> global_template,
     v8::ExtensionConfiguration* extensions) {
+  HandleScope scope;
+  Handle<Context> env;
   Genesis genesis(global_object, global_template, extensions);
-  return genesis.result();
+  env = genesis.result();
+  if (!env.is_null()) {
+    if (InstallExtensions(env, extensions)) {
+      return env;
+    }
+  }
+  return Handle<Context>();
 }
 
 
@@ -299,12 +309,6 @@
 }
 
 
-Genesis::~Genesis() {
-  ASSERT(current_ == this);
-  current_ = previous_;
-}
-
-
 static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
                                           const char* name,
                                           InstanceType type,
@@ -384,22 +388,7 @@
 }
 
 
-void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
-                          Handle<Object> global_object) {
-  HandleScope scope;
-  // Allocate the global context FixedArray first and then patch the
-  // closure and extension object later (we need the empty function
-  // and the global object, but in order to create those, we need the
-  // global context).
-  global_context_ =
-      Handle<Context>::cast(
-          GlobalHandles::Create(*Factory::NewGlobalContext()));
-  Top::set_context(*global_context());
-
-  // Allocate the message listeners object.
-  v8::NeanderArray listeners;
-  global_context()->set_message_listeners(*listeners.value());
-
+Handle<JSFunction> Genesis::CreateEmptyFunction() {
   // Allocate the map for function instances.
   Handle<Map> fm = Factory::NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
   global_context()->set_function_instance_map(*fm);
@@ -443,138 +432,196 @@
   Handle<JSFunction> empty_function =
       Factory::NewFunction(symbol, Factory::null_value());
 
-  {  // --- E m p t y ---
-    Handle<Code> code =
-        Handle<Code>(Builtins::builtin(Builtins::EmptyFunction));
-    empty_function->set_code(*code);
-    Handle<String> source = Factory::NewStringFromAscii(CStrVector("() {}"));
-    Handle<Script> script = Factory::NewScript(source);
-    script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
-    empty_function->shared()->set_script(*script);
-    empty_function->shared()->set_start_position(0);
-    empty_function->shared()->set_end_position(source->length());
-    empty_function->shared()->DontAdaptArguments();
-    global_context()->function_map()->set_prototype(*empty_function);
-    global_context()->function_instance_map()->set_prototype(*empty_function);
+  // --- E m p t y ---
+  Handle<Code> code =
+      Handle<Code>(Builtins::builtin(Builtins::EmptyFunction));
+  empty_function->set_code(*code);
+  Handle<String> source = Factory::NewStringFromAscii(CStrVector("() {}"));
+  Handle<Script> script = Factory::NewScript(source);
+  script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
+  empty_function->shared()->set_script(*script);
+  empty_function->shared()->set_start_position(0);
+  empty_function->shared()->set_end_position(source->length());
+  empty_function->shared()->DontAdaptArguments();
+  global_context()->function_map()->set_prototype(*empty_function);
+  global_context()->function_instance_map()->set_prototype(*empty_function);
 
-    // Allocate the function map first and then patch the prototype later
-    Handle<Map> empty_fm = Factory::CopyMapDropDescriptors(fm);
-    empty_fm->set_instance_descriptors(*function_map_descriptors);
-    empty_fm->set_prototype(global_context()->object_function()->prototype());
-    empty_function->set_map(*empty_fm);
+  // Allocate the function map first and then patch the prototype later
+  Handle<Map> empty_fm = Factory::CopyMapDropDescriptors(fm);
+  empty_fm->set_instance_descriptors(*function_map_descriptors);
+  empty_fm->set_prototype(global_context()->object_function()->prototype());
+  empty_function->set_map(*empty_fm);
+  return empty_function;
+}
+
+
+void Genesis::CreateRoots() {
+  // Allocate the global context FixedArray first and then patch the
+  // closure and extension object later (we need the empty function
+  // and the global object, but in order to create those, we need the
+  // global context).
+  global_context_ =
+      Handle<Context>::cast(
+          GlobalHandles::Create(*Factory::NewGlobalContext()));
+  Top::set_context(*global_context());
+
+  // Allocate the message listeners object.
+  {
+    v8::NeanderArray listeners;
+    global_context()->set_message_listeners(*listeners.value());
+  }
+}
+
+
+Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
+    v8::Handle<v8::ObjectTemplate> global_template,
+    Handle<Object> global_object,
+    Handle<GlobalObject>* inner_global_out) {
+  // The argument global_template aka data is an ObjectTemplateInfo.
+  // It has a constructor pointer that points at global_constructor which is a
+  // FunctionTemplateInfo.
+  // The global_constructor is used to create or reinitialize the global_proxy.
+  // The global_constructor also has a prototype_template pointer that points at
+  // js_global_template which is an ObjectTemplateInfo.
+  // That in turn has a constructor pointer that points at
+  // js_global_constructor which is a FunctionTemplateInfo.
+  // js_global_constructor is used to make js_global_function
+  // js_global_function is used to make the new inner_global.
+  //
+  // --- G l o b a l ---
+  // Step 1: Create a fresh inner JSGlobalObject.
+  Handle<JSFunction> js_global_function;
+  Handle<ObjectTemplateInfo> js_global_template;
+  if (!global_template.IsEmpty()) {
+    // Get prototype template of the global_template.
+    Handle<ObjectTemplateInfo> data =
+        v8::Utils::OpenHandle(*global_template);
+    Handle<FunctionTemplateInfo> global_constructor =
+        Handle<FunctionTemplateInfo>(
+            FunctionTemplateInfo::cast(data->constructor()));
+    Handle<Object> proto_template(global_constructor->prototype_template());
+    if (!proto_template->IsUndefined()) {
+      js_global_template =
+          Handle<ObjectTemplateInfo>::cast(proto_template);
+    }
   }
 
-  {  // --- G l o b a l ---
-    // Step 1: create a fresh inner JSGlobalObject
-    Handle<GlobalObject> object;
-    {
-      Handle<JSFunction> js_global_function;
-      Handle<ObjectTemplateInfo> js_global_template;
-      if (!global_template.IsEmpty()) {
-        // Get prototype template of the global_template
-        Handle<ObjectTemplateInfo> data =
-            v8::Utils::OpenHandle(*global_template);
-        Handle<FunctionTemplateInfo> global_constructor =
-            Handle<FunctionTemplateInfo>(
-                FunctionTemplateInfo::cast(data->constructor()));
-        Handle<Object> proto_template(global_constructor->prototype_template());
-        if (!proto_template->IsUndefined()) {
-          js_global_template =
-              Handle<ObjectTemplateInfo>::cast(proto_template);
-        }
-      }
-
-      if (js_global_template.is_null()) {
-        Handle<String> name = Handle<String>(Heap::empty_symbol());
-        Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
-        js_global_function =
-            Factory::NewFunction(name, JS_GLOBAL_OBJECT_TYPE,
-                                 JSGlobalObject::kSize, code, true);
-        // Change the constructor property of the prototype of the
-        // hidden global function to refer to the Object function.
-        Handle<JSObject> prototype =
-            Handle<JSObject>(
-                JSObject::cast(js_global_function->instance_prototype()));
-        SetProperty(prototype, Factory::constructor_symbol(),
-                    Top::object_function(), NONE);
-      } else {
-        Handle<FunctionTemplateInfo> js_global_constructor(
-            FunctionTemplateInfo::cast(js_global_template->constructor()));
-        js_global_function =
-            Factory::CreateApiFunction(js_global_constructor,
-                                       Factory::InnerGlobalObject);
-      }
-
-      js_global_function->initial_map()->set_is_hidden_prototype();
-      object = Factory::NewGlobalObject(js_global_function);
-    }
-
-    // Set the global context for the global object.
-    object->set_global_context(*global_context());
-
-    // Step 2: create or re-initialize the global proxy object.
-    Handle<JSGlobalProxy> global_proxy;
-    {
-      Handle<JSFunction> global_proxy_function;
-      if (global_template.IsEmpty()) {
-        Handle<String> name = Handle<String>(Heap::empty_symbol());
-        Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
-        global_proxy_function =
-            Factory::NewFunction(name, JS_GLOBAL_PROXY_TYPE,
-                                 JSGlobalProxy::kSize, code, true);
-      } else {
-        Handle<ObjectTemplateInfo> data =
-            v8::Utils::OpenHandle(*global_template);
-        Handle<FunctionTemplateInfo> global_constructor(
-                FunctionTemplateInfo::cast(data->constructor()));
-        global_proxy_function =
-            Factory::CreateApiFunction(global_constructor,
-                                       Factory::OuterGlobalObject);
-      }
-
-      Handle<String> global_name = Factory::LookupAsciiSymbol("global");
-      global_proxy_function->shared()->set_instance_class_name(*global_name);
-      global_proxy_function->initial_map()->set_is_access_check_needed(true);
-
-      // Set global_proxy.__proto__ to js_global after ConfigureGlobalObjects
-
-      if (global_object.location() != NULL) {
-        ASSERT(global_object->IsJSGlobalProxy());
-        global_proxy =
-            ReinitializeJSGlobalProxy(
-                global_proxy_function,
-                Handle<JSGlobalProxy>::cast(global_object));
-      } else {
-        global_proxy = Handle<JSGlobalProxy>::cast(
-            Factory::NewJSObject(global_proxy_function, TENURED));
-      }
-
-      // Security setup: Set the security token of the global object to
-      // its the inner global. This makes the security check between two
-      // different contexts fail by default even in case of global
-      // object reinitialization.
-      object->set_global_receiver(*global_proxy);
-      global_proxy->set_context(*global_context());
-    }
-
-    {  // --- G l o b a l   C o n t e x t ---
-      // use the empty function as closure (no scope info)
-      global_context()->set_closure(*empty_function);
-      global_context()->set_fcontext(*global_context());
-      global_context()->set_previous(NULL);
-
-      // set extension and global object
-      global_context()->set_extension(*object);
-      global_context()->set_global(*object);
-      global_context()->set_global_proxy(*global_proxy);
-      // use inner global object as security token by default
-      global_context()->set_security_token(*object);
-    }
-
-    Handle<JSObject> global = Handle<JSObject>(global_context()->global());
-    SetProperty(global, object_name, Top::object_function(), DONT_ENUM);
+  if (js_global_template.is_null()) {
+    Handle<String> name = Handle<String>(Heap::empty_symbol());
+    Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
+    js_global_function =
+        Factory::NewFunction(name, JS_GLOBAL_OBJECT_TYPE,
+                             JSGlobalObject::kSize, code, true);
+    // Change the constructor property of the prototype of the
+    // hidden global function to refer to the Object function.
+    Handle<JSObject> prototype =
+        Handle<JSObject>(
+            JSObject::cast(js_global_function->instance_prototype()));
+    SetProperty(prototype, Factory::constructor_symbol(),
+                Top::object_function(), NONE);
+  } else {
+    Handle<FunctionTemplateInfo> js_global_constructor(
+        FunctionTemplateInfo::cast(js_global_template->constructor()));
+    js_global_function =
+        Factory::CreateApiFunction(js_global_constructor,
+                                   Factory::InnerGlobalObject);
   }
 
+  js_global_function->initial_map()->set_is_hidden_prototype();
+  Handle<GlobalObject> inner_global =
+      Factory::NewGlobalObject(js_global_function);
+  if (inner_global_out != NULL) {
+    *inner_global_out = inner_global;
+  }
+
+  // Step 2: create or re-initialize the global proxy object.
+  Handle<JSFunction> global_proxy_function;
+  if (global_template.IsEmpty()) {
+    Handle<String> name = Handle<String>(Heap::empty_symbol());
+    Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
+    global_proxy_function =
+        Factory::NewFunction(name, JS_GLOBAL_PROXY_TYPE,
+                             JSGlobalProxy::kSize, code, true);
+  } else {
+    Handle<ObjectTemplateInfo> data =
+        v8::Utils::OpenHandle(*global_template);
+    Handle<FunctionTemplateInfo> global_constructor(
+            FunctionTemplateInfo::cast(data->constructor()));
+    global_proxy_function =
+        Factory::CreateApiFunction(global_constructor,
+                                   Factory::OuterGlobalObject);
+  }
+
+  Handle<String> global_name = Factory::LookupAsciiSymbol("global");
+  global_proxy_function->shared()->set_instance_class_name(*global_name);
+  global_proxy_function->initial_map()->set_is_access_check_needed(true);
+
+  // Set global_proxy.__proto__ to js_global after ConfigureGlobalObjects
+  // Return the global proxy.
+
+  if (global_object.location() != NULL) {
+    ASSERT(global_object->IsJSGlobalProxy());
+    return ReinitializeJSGlobalProxy(
+        global_proxy_function,
+        Handle<JSGlobalProxy>::cast(global_object));
+  } else {
+    return Handle<JSGlobalProxy>::cast(
+        Factory::NewJSObject(global_proxy_function, TENURED));
+  }
+}
+
+
+void Genesis::HookUpGlobalProxy(Handle<GlobalObject> inner_global,
+                                Handle<JSGlobalProxy> global_proxy) {
+  // Set the global context for the global object.
+  inner_global->set_global_context(*global_context());
+  inner_global->set_global_receiver(*global_proxy);
+  global_proxy->set_context(*global_context());
+  global_context()->set_global_proxy(*global_proxy);
+}
+
+
+void Genesis::HookUpInnerGlobal(Handle<GlobalObject> inner_global) {
+  Handle<GlobalObject> inner_global_from_snapshot(
+      GlobalObject::cast(global_context_->extension()));
+  Handle<JSBuiltinsObject> builtins_global(global_context_->builtins());
+  global_context_->set_extension(*inner_global);
+  global_context_->set_global(*inner_global);
+  global_context_->set_security_token(*inner_global);
+  static const PropertyAttributes attributes =
+      static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
+  ForceSetProperty(builtins_global,
+                   Factory::LookupAsciiSymbol("global"),
+                   inner_global,
+                   attributes);
+  // Setup the reference from the global object to the builtins object.
+  JSGlobalObject::cast(*inner_global)->set_builtins(*builtins_global);
+  TransferNamedProperties(inner_global_from_snapshot, inner_global);
+  TransferIndexedProperties(inner_global_from_snapshot, inner_global);
+}
+
+
+// This is only called if we are not using snapshots.  The equivalent
+// work in the snapshot case is done in HookUpInnerGlobal.
+void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
+                               Handle<JSFunction> empty_function) {
+  // --- G l o b a l   C o n t e x t ---
+  // Use the empty function as closure (no scope info).
+  global_context()->set_closure(*empty_function);
+  global_context()->set_fcontext(*global_context());
+  global_context()->set_previous(NULL);
+  // Set extension and global object.
+  global_context()->set_extension(*inner_global);
+  global_context()->set_global(*inner_global);
+  // Security setup: Set the security token of the global object to
+  // its the inner global. This makes the security check between two
+  // different contexts fail by default even in case of global
+  // object reinitialization.
+  global_context()->set_security_token(*inner_global);
+
+  Handle<String> object_name = Handle<String>(Heap::Object_symbol());
+  SetProperty(inner_global, object_name, Top::object_function(), DONT_ENUM);
+
   Handle<JSObject> global = Handle<JSObject>(global_context()->global());
 
   // Install global Function object
@@ -791,8 +838,12 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
   Debugger::set_compiling_natives(true);
 #endif
-  bool result =
-      CompileScriptCached(name, source, &natives_cache, NULL, true);
+  bool result = CompileScriptCached(name,
+                                    source,
+                                    NULL,
+                                    NULL,
+                                    Handle<Context>(Top::context()),
+                                    true);
   ASSERT(Top::has_pending_exception() != result);
   if (!result) Top::clear_pending_exception();
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -806,46 +857,46 @@
                                   Handle<String> source,
                                   SourceCodeCache* cache,
                                   v8::Extension* extension,
+                                  Handle<Context> top_context,
                                   bool use_runtime_context) {
   HandleScope scope;
-  Handle<JSFunction> boilerplate;
+  Handle<SharedFunctionInfo> function_info;
 
   // If we can't find the function in the cache, we compile a new
   // function and insert it into the cache.
-  if (!cache->Lookup(name, &boilerplate)) {
+  if (cache == NULL || !cache->Lookup(name, &function_info)) {
     ASSERT(source->IsAsciiRepresentation());
     Handle<String> script_name = Factory::NewStringFromUtf8(name);
-    boilerplate =
-        Compiler::Compile(
-            source,
-            script_name,
-            0,
-            0,
-            extension,
-            NULL,
-            Handle<String>::null(),
-            use_runtime_context ? NATIVES_CODE : NOT_NATIVES_CODE);
-    if (boilerplate.is_null()) return false;
-    cache->Add(name, boilerplate);
+    function_info = Compiler::Compile(
+        source,
+        script_name,
+        0,
+        0,
+        extension,
+        NULL,
+        Handle<String>::null(),
+        use_runtime_context ? NATIVES_CODE : NOT_NATIVES_CODE);
+    if (function_info.is_null()) return false;
+    if (cache != NULL) cache->Add(name, function_info);
   }
 
   // Setup the function context. Conceptually, we should clone the
   // function before overwriting the context but since we're in a
   // single-threaded environment it is not strictly necessary.
-  ASSERT(Top::context()->IsGlobalContext());
+  ASSERT(top_context->IsGlobalContext());
   Handle<Context> context =
       Handle<Context>(use_runtime_context
-                      ? Top::context()->runtime_context()
-                      : Top::context());
+                      ? Handle<Context>(top_context->runtime_context())
+                      : top_context);
   Handle<JSFunction> fun =
-      Factory::NewFunctionFromBoilerplate(boilerplate, context);
+      Factory::NewFunctionFromSharedFunctionInfo(function_info, context);
 
   // Call function using either the runtime object or the global
   // object as the receiver. Provide no parameters.
   Handle<Object> receiver =
       Handle<Object>(use_runtime_context
-                     ? Top::context()->builtins()
-                     : Top::context()->global());
+                     ? top_context->builtins()
+                     : top_context->global());
   bool has_pending_exception;
   Handle<Object> result =
       Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
@@ -1047,7 +1098,7 @@
     // Allocate the empty script.
     Handle<Script> script = Factory::NewScript(Factory::empty_string());
     script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
-    global_context()->set_empty_script(*script);
+    Heap::public_set_empty_script(*script);
   }
   {
     // Builtin function for OpaqueReference -- a JSValue-based object,
@@ -1063,48 +1114,23 @@
     global_context()->set_opaque_reference_function(*opaque_reference_fun);
   }
 
-  if (FLAG_natives_file == NULL) {
-    // Without natives file, install default natives.
-    for (int i = Natives::GetDelayCount();
-         i < Natives::GetBuiltinsCount();
-         i++) {
-      if (!CompileBuiltin(i)) return false;
-      // TODO(ager): We really only need to install the JS builtin
-      // functions on the builtins object after compiling and running
-      // runtime.js.
-      if (!InstallJSBuiltins(builtins)) return false;
-    }
-
-    // Setup natives with lazy loading.
-    SetupLazy(Handle<JSFunction>(global_context()->date_function()),
-              Natives::GetIndex("date"),
-              Top::global_context(),
-              Handle<Context>(Top::context()->runtime_context()));
-    SetupLazy(Handle<JSFunction>(global_context()->regexp_function()),
-              Natives::GetIndex("regexp"),
-              Top::global_context(),
-              Handle<Context>(Top::context()->runtime_context()));
-    SetupLazy(Handle<JSObject>(global_context()->json_object()),
-              Natives::GetIndex("json"),
-              Top::global_context(),
-              Handle<Context>(Top::context()->runtime_context()));
-
-  } else if (strlen(FLAG_natives_file) != 0) {
-    // Otherwise install natives from natives file if file exists and
-    // compiles.
-    bool exists;
-    Vector<const char> source = ReadFile(FLAG_natives_file, &exists);
-    Handle<String> source_string = Factory::NewStringFromAscii(source);
-    if (source.is_empty()) return false;
-    bool result = CompileNative(CStrVector(FLAG_natives_file), source_string);
-    if (!result) return false;
-
-  } else {
-    // Empty natives file name - do not install any natives.
+  if (FLAG_disable_native_files) {
     PrintF("Warning: Running without installed natives!\n");
     return true;
   }
 
+  // Install natives.
+  for (int i = Natives::GetDebuggerCount();
+       i < Natives::GetBuiltinsCount();
+       i++) {
+    Vector<const char> name = Natives::GetScriptName(i);
+    if (!CompileBuiltin(i)) return false;
+    // TODO(ager): We really only need to install the JS builtin
+    // functions on the builtins object after compiling and running
+    // runtime.js.
+    if (!InstallJSBuiltins(builtins)) return false;
+  }
+
   InstallNativeFunctions();
 
   // Install Function.prototype.call and apply.
@@ -1143,14 +1169,29 @@
 #ifdef DEBUG
   builtins->Verify();
 #endif
+
   return true;
 }
 
 
-bool Genesis::InstallSpecialObjects() {
+int BootstrapperActive::nesting_ = 0;
+
+
+bool Bootstrapper::InstallExtensions(Handle<Context> global_context,
+                                     v8::ExtensionConfiguration* extensions) {
+  BootstrapperActive active;
+  SaveContext saved_context;
+  Top::set_context(*global_context);
+  if (!Genesis::InstallExtensions(global_context, extensions)) return false;
+  Genesis::InstallSpecialObjects(global_context);
+  return true;
+}
+
+
+void Genesis::InstallSpecialObjects(Handle<Context> global_context) {
   HandleScope scope;
   Handle<JSGlobalObject> js_global(
-      JSGlobalObject::cast(global_context()->global()));
+      JSGlobalObject::cast(global_context->global()));
   // Expose the natives in global if a name for it is specified.
   if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
     Handle<String> natives_string =
@@ -1173,13 +1214,12 @@
   if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) {
     // If loading fails we just bail out without installing the
     // debugger but without tanking the whole context.
-    if (!Debug::Load())
-      return true;
+    if (!Debug::Load()) return;
     // Set the security token for the debugger context to the same as
     // the shell global context to allow calling between these (otherwise
     // exposing debug global object doesn't make much sense).
     Debug::debug_context()->set_security_token(
-        global_context()->security_token());
+        global_context->security_token());
 
     Handle<String> debug_string =
         Factory::LookupAsciiSymbol(FLAG_expose_debug_as);
@@ -1187,19 +1227,18 @@
         Handle<Object>(Debug::debug_context()->global_proxy()), DONT_ENUM);
   }
 #endif
-
-  return true;
 }
 
 
-bool Genesis::InstallExtensions(v8::ExtensionConfiguration* extensions) {
+bool Genesis::InstallExtensions(Handle<Context> global_context,
+                                v8::ExtensionConfiguration* extensions) {
   // Clear coloring of extension list
   v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
   while (current != NULL) {
     current->set_state(v8::UNVISITED);
     current = current->next();
   }
-  // Install auto extensions
+  // Install auto extensions.
   current = v8::RegisteredExtension::first_extension();
   while (current != NULL) {
     if (current->extension()->auto_enable())
@@ -1263,7 +1302,9 @@
   Handle<String> source_code = Factory::NewStringFromAscii(source);
   bool result = CompileScriptCached(CStrVector(extension->name()),
                                     source_code,
-                                    &extensions_cache, extension,
+                                    &extensions_cache,
+                                    extension,
+                                    Handle<Context>(Top::context()),
                                     false);
   ASSERT(Top::has_pending_exception() != result);
   if (!result) {
@@ -1294,7 +1335,7 @@
     v8::Handle<v8::ObjectTemplate> global_proxy_template) {
   Handle<JSObject> global_proxy(
       JSObject::cast(global_context()->global_proxy()));
-  Handle<JSObject> js_global(JSObject::cast(global_context()->global()));
+  Handle<JSObject> inner_global(JSObject::cast(global_context()->global()));
 
   if (!global_proxy_template.IsEmpty()) {
     // Configure the global proxy object.
@@ -1308,11 +1349,11 @@
     if (!proxy_constructor->prototype_template()->IsUndefined()) {
       Handle<ObjectTemplateInfo> inner_data(
           ObjectTemplateInfo::cast(proxy_constructor->prototype_template()));
-      if (!ConfigureApiObject(js_global, inner_data)) return false;
+      if (!ConfigureApiObject(inner_global, inner_data)) return false;
     }
   }
 
-  SetObjectPrototype(global_proxy, js_global);
+  SetObjectPrototype(global_proxy, inner_global);
   return true;
 }
 
@@ -1366,15 +1407,13 @@
           // If the property is already there we skip it
           if (result.IsProperty()) continue;
           HandleScope inner;
-          Handle<DescriptorArray> inst_descs =
-              Handle<DescriptorArray>(to->map()->instance_descriptors());
+          ASSERT(!to->HasFastProperties());
+          // Add to dictionary.
           Handle<String> key = Handle<String>(descs->GetKey(i));
-          Handle<Object> entry = Handle<Object>(descs->GetCallbacksObject(i));
-          inst_descs = Factory::CopyAppendProxyDescriptor(inst_descs,
-                                                          key,
-                                                          entry,
-                                                          details.attributes());
-          to->map()->set_instance_descriptors(*inst_descs);
+          Handle<Object> callbacks(descs->GetCallbacksObject(i));
+          PropertyDetails d =
+              PropertyDetails(details.attributes(), CALLBACKS, details.index());
+          SetNormalizedProperty(to, key, callbacks, d);
           break;
         }
         case MAP_TRANSITION:
@@ -1459,32 +1498,51 @@
 Genesis::Genesis(Handle<Object> global_object,
                  v8::Handle<v8::ObjectTemplate> global_template,
                  v8::ExtensionConfiguration* extensions) {
-  // Link this genesis object into the stacked genesis chain. This
-  // must be done before any early exits because the destructor
-  // will always do unlinking.
-  previous_ = current_;
-  current_  = this;
   result_ = Handle<Context>::null();
-
   // If V8 isn't running and cannot be initialized, just return.
   if (!V8::IsRunning() && !V8::Initialize(NULL)) return;
 
   // Before creating the roots we must save the context and restore it
   // on all function exits.
   HandleScope scope;
-  SaveContext context;
+  SaveContext saved_context;
 
-  CreateRoots(global_template, global_object);
+  Handle<Context> new_context = Snapshot::NewContextFromSnapshot();
+  if (!new_context.is_null()) {
+    global_context_ =
+      Handle<Context>::cast(GlobalHandles::Create(*new_context));
+    Top::set_context(*global_context_);
+    i::Counters::contexts_created_by_snapshot.Increment();
+    result_ = global_context_;
+    JSFunction* empty_function =
+        JSFunction::cast(result_->function_map()->prototype());
+    empty_function_ = Handle<JSFunction>(empty_function);
+    Handle<GlobalObject> inner_global;
+    Handle<JSGlobalProxy> global_proxy =
+        CreateNewGlobals(global_template,
+                         global_object,
+                         &inner_global);
 
-  if (!InstallNatives()) return;
+    HookUpGlobalProxy(inner_global, global_proxy);
+    HookUpInnerGlobal(inner_global);
 
-  MakeFunctionInstancePrototypeWritable();
+    if (!ConfigureGlobalObjects(global_template)) return;
+  } else {
+    // We get here if there was no context snapshot.
+    CreateRoots();
+    Handle<JSFunction> empty_function = CreateEmptyFunction();
+    Handle<GlobalObject> inner_global;
+    Handle<JSGlobalProxy> global_proxy =
+        CreateNewGlobals(global_template, global_object, &inner_global);
+    HookUpGlobalProxy(inner_global, global_proxy);
+    InitializeGlobal(inner_global, empty_function);
+    if (!InstallNatives()) return;
 
-  if (!ConfigureGlobalObjects(global_template)) return;
+    MakeFunctionInstancePrototypeWritable();
 
-  if (!InstallExtensions(extensions)) return;
-
-  if (!InstallSpecialObjects()) return;
+    if (!ConfigureGlobalObjects(global_template)) return;
+    i::Counters::contexts_created_from_scratch.Increment();
+  }
 
   result_ = global_context_;
 }
@@ -1494,46 +1552,46 @@
 
 // Reserve space for statics needing saving and restoring.
 int Bootstrapper::ArchiveSpacePerThread() {
-  return Genesis::ArchiveSpacePerThread();
+  return BootstrapperActive::ArchiveSpacePerThread();
 }
 
 
 // Archive statics that are thread local.
 char* Bootstrapper::ArchiveState(char* to) {
-  return Genesis::ArchiveState(to);
+  return BootstrapperActive::ArchiveState(to);
 }
 
 
 // Restore statics that are thread local.
 char* Bootstrapper::RestoreState(char* from) {
-  return Genesis::RestoreState(from);
+  return BootstrapperActive::RestoreState(from);
 }
 
 
 // Called when the top-level V8 mutex is destroyed.
 void Bootstrapper::FreeThreadResources() {
-  ASSERT(Genesis::current() == NULL);
+  ASSERT(!BootstrapperActive::IsActive());
 }
 
 
 // Reserve space for statics needing saving and restoring.
-int Genesis::ArchiveSpacePerThread() {
-  return sizeof(current_);
+int BootstrapperActive::ArchiveSpacePerThread() {
+  return sizeof(nesting_);
 }
 
 
 // Archive statics that are thread local.
-char* Genesis::ArchiveState(char* to) {
-  *reinterpret_cast<Genesis**>(to) = current_;
-  current_ = NULL;
-  return to + sizeof(current_);
+char* BootstrapperActive::ArchiveState(char* to) {
+  *reinterpret_cast<int*>(to) = nesting_;
+  nesting_ = 0;
+  return to + sizeof(nesting_);
 }
 
 
 // Restore statics that are thread local.
-char* Genesis::RestoreState(char* from) {
-  current_ = *reinterpret_cast<Genesis**>(from);
-  return from + sizeof(current_);
+char* BootstrapperActive::RestoreState(char* from) {
+  nesting_ = *reinterpret_cast<int*>(from);
+  return from + sizeof(nesting_);
 }
 
 } }  // namespace v8::internal
diff --git a/src/bootstrapper.h b/src/bootstrapper.h
index cc775b2..72b438a 100644
--- a/src/bootstrapper.h
+++ b/src/bootstrapper.h
@@ -32,6 +32,24 @@
 namespace v8 {
 namespace internal {
 
+
+class BootstrapperActive BASE_EMBEDDED {
+ public:
+  BootstrapperActive() { nesting_++; }
+  ~BootstrapperActive() { nesting_--; }
+
+  // Support for thread preemption.
+  static int ArchiveSpacePerThread();
+  static char* ArchiveState(char* to);
+  static char* RestoreState(char* from);
+
+ private:
+  static bool IsActive() { return nesting_ != 0; }
+  static int nesting_;
+  friend class Bootstrapper;
+};
+
+
 // The Boostrapper is the public interface for creating a JavaScript global
 // context.
 class Bootstrapper : public AllStatic {
@@ -53,14 +71,11 @@
   // Traverses the pointers for memory management.
   static void Iterate(ObjectVisitor* v);
 
-  // Accessors for the native scripts cache. Used in lazy loading.
+  // Accessor for the native scripts source code.
   static Handle<String> NativesSourceLookup(int index);
-  static bool NativesCacheLookup(Vector<const char> name,
-                                 Handle<JSFunction>* handle);
-  static void NativesCacheAdd(Vector<const char> name, Handle<JSFunction> fun);
 
   // Tells whether bootstrapping is active.
-  static bool IsActive();
+  static bool IsActive() { return BootstrapperActive::IsActive(); }
 
   // Encoding/decoding support for fixup flags.
   class FixupFlagsUseCodeObject: public BitField<bool, 0, 1> {};
@@ -75,6 +90,10 @@
   // This will allocate a char array that is deleted when V8 is shut down.
   // It should only be used for strictly finite allocations.
   static char* AllocateAutoDeletedArray(int bytes);
+
+  // Used for new context creation.
+  static bool InstallExtensions(Handle<Context> global_context,
+                                v8::ExtensionConfiguration* extensions);
 };
 
 
diff --git a/src/builtins.cc b/src/builtins.cc
index 91cb151..122fbba 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -443,6 +443,38 @@
 }
 
 
+static FixedArray* LeftTrimFixedArray(FixedArray* elms) {
+  // For now this trick is only applied to fixed arrays in new space.
+  // In large object space the object's start must coincide with chunk
+  // and thus the trick is just not applicable.
+  // In old space we do not use this trick to avoid dealing with
+  // remembered sets.
+  ASSERT(Heap::new_space()->Contains(elms));
+
+  Object** former_map =
+      HeapObject::RawField(elms, FixedArray::kMapOffset);
+  Object** former_length =
+      HeapObject::RawField(elms, FixedArray::kLengthOffset);
+  Object** former_first =
+      HeapObject::RawField(elms, FixedArray::kHeaderSize);
+  // Check that we don't forget to copy all the bits.
+  STATIC_ASSERT(FixedArray::kMapOffset + 2 * kPointerSize
+      == FixedArray::kHeaderSize);
+
+  int len = elms->length();
+
+  *former_first = reinterpret_cast<Object*>(len - 1);
+  *former_length = Heap::fixed_array_map();
+  // Technically in new space this write might be omitted (except for
+  // debug mode which iterates through the heap), but to play safer
+  // we still do it.
+  *former_map = Heap::raw_unchecked_one_pointer_filler_map();
+
+  ASSERT(elms->address() + kPointerSize == (elms + kPointerSize)->address());
+  return elms + kPointerSize;
+}
+
+
 BUILTIN(ArrayShift) {
   Object* receiver = *args.receiver();
   FixedArray* elms = NULL;
@@ -462,10 +494,14 @@
     first = Heap::undefined_value();
   }
 
-  // Shift the elements.
-  AssertNoAllocation no_gc;
-  MoveElements(&no_gc, elms, 0, elms, 1, len - 1);
-  elms->set(len - 1, Heap::the_hole_value());
+  if (Heap::new_space()->Contains(elms)) {
+    array->set_elements(LeftTrimFixedArray(elms));
+  } else {
+    // Shift the elements.
+    AssertNoAllocation no_gc;
+    MoveElements(&no_gc, elms, 0, elms, 1, len - 1);
+    elms->set(len - 1, Heap::the_hole_value());
+  }
 
   // Set the length.
   array->set_length(Smi::FromInt(len - 1));
diff --git a/src/circular-queue-inl.h b/src/circular-queue-inl.h
index ffe8fb0..962b069 100644
--- a/src/circular-queue-inl.h
+++ b/src/circular-queue-inl.h
@@ -82,11 +82,10 @@
 
 
 void* SamplingCircularQueue::Enqueue() {
-  Cell* enqueue_pos = reinterpret_cast<Cell*>(
-      Thread::GetThreadLocal(producer_key_));
-  WrapPositionIfNeeded(&enqueue_pos);
-  Thread::SetThreadLocal(producer_key_, enqueue_pos + record_size_);
-  return enqueue_pos;
+  WrapPositionIfNeeded(&producer_pos_->enqueue_pos);
+  void* result = producer_pos_->enqueue_pos;
+  producer_pos_->enqueue_pos += record_size_;
+  return result;
 }
 
 
diff --git a/src/circular-queue.cc b/src/circular-queue.cc
index 5f7a33e..a7c2532 100644
--- a/src/circular-queue.cc
+++ b/src/circular-queue.cc
@@ -52,52 +52,44 @@
     buffer_[i] = kClear;
   }
   buffer_[buffer_size_] = kEnd;
+
+  // Layout producer and consumer position pointers each on their own
+  // cache lines to avoid cache lines thrashing due to simultaneous
+  // updates of positions by different processor cores.
+  const int positions_size =
+      RoundUp(1, kProcessorCacheLineSize) +
+      RoundUp(sizeof(ProducerPosition), kProcessorCacheLineSize) +
+      RoundUp(sizeof(ConsumerPosition), kProcessorCacheLineSize);
+  positions_ = NewArray<byte>(positions_size);
+
+  producer_pos_ = reinterpret_cast<ProducerPosition*>(
+      RoundUp(positions_, kProcessorCacheLineSize));
+  producer_pos_->enqueue_pos = buffer_;
+
+  consumer_pos_ = reinterpret_cast<ConsumerPosition*>(
+      reinterpret_cast<byte*>(producer_pos_) + kProcessorCacheLineSize);
+  ASSERT(reinterpret_cast<byte*>(consumer_pos_ + 1) <=
+         positions_ + positions_size);
+  consumer_pos_->dequeue_chunk_pos = buffer_;
+  consumer_pos_->dequeue_chunk_poll_pos = buffer_ + producer_consumer_distance_;
+  consumer_pos_->dequeue_pos = NULL;
 }
 
 
 SamplingCircularQueue::~SamplingCircularQueue() {
+  DeleteArray(positions_);
   DeleteArray(buffer_);
 }
 
 
-void SamplingCircularQueue::SetUpProducer() {
-  producer_key_ = Thread::CreateThreadLocalKey();
-  Thread::SetThreadLocal(producer_key_, buffer_);
-}
-
-
-void SamplingCircularQueue::TearDownProducer() {
-  Thread::DeleteThreadLocalKey(producer_key_);
-}
-
-
-void SamplingCircularQueue::SetUpConsumer() {
-  consumer_key_ = Thread::CreateThreadLocalKey();
-  ConsumerPosition* cp = new ConsumerPosition;
-  cp->dequeue_chunk_pos = buffer_;
-  cp->dequeue_chunk_poll_pos = buffer_ + producer_consumer_distance_;
-  cp->dequeue_pos = NULL;
-  Thread::SetThreadLocal(consumer_key_, cp);
-}
-
-
-void SamplingCircularQueue::TearDownConsumer() {
-  delete reinterpret_cast<ConsumerPosition*>(
-      Thread::GetThreadLocal(consumer_key_));
-  Thread::DeleteThreadLocalKey(consumer_key_);
-}
-
-
 void* SamplingCircularQueue::StartDequeue() {
-  ConsumerPosition* cp = reinterpret_cast<ConsumerPosition*>(
-      Thread::GetThreadLocal(consumer_key_));
-  if (cp->dequeue_pos != NULL) {
-    return cp->dequeue_pos;
+  if (consumer_pos_->dequeue_pos != NULL) {
+    return consumer_pos_->dequeue_pos;
   } else {
-    if (*cp->dequeue_chunk_poll_pos != kClear) {
-      cp->dequeue_pos = cp->dequeue_chunk_pos;
-      cp->dequeue_end_pos = cp->dequeue_pos + chunk_size_;
-      return cp->dequeue_pos;
+    if (*consumer_pos_->dequeue_chunk_poll_pos != kClear) {
+      consumer_pos_->dequeue_pos = consumer_pos_->dequeue_chunk_pos;
+      consumer_pos_->dequeue_end_pos = consumer_pos_->dequeue_pos + chunk_size_;
+      return consumer_pos_->dequeue_pos;
     } else {
       return NULL;
     }
@@ -106,25 +98,21 @@
 
 
 void SamplingCircularQueue::FinishDequeue() {
-  ConsumerPosition* cp = reinterpret_cast<ConsumerPosition*>(
-      Thread::GetThreadLocal(consumer_key_));
-  cp->dequeue_pos += record_size_;
-  if (cp->dequeue_pos < cp->dequeue_end_pos) return;
+  consumer_pos_->dequeue_pos += record_size_;
+  if (consumer_pos_->dequeue_pos < consumer_pos_->dequeue_end_pos) return;
   // Move to next chunk.
-  cp->dequeue_pos = NULL;
-  *cp->dequeue_chunk_pos = kClear;
-  cp->dequeue_chunk_pos += chunk_size_;
-  WrapPositionIfNeeded(&cp->dequeue_chunk_pos);
-  cp->dequeue_chunk_poll_pos += chunk_size_;
-  WrapPositionIfNeeded(&cp->dequeue_chunk_poll_pos);
+  consumer_pos_->dequeue_pos = NULL;
+  *consumer_pos_->dequeue_chunk_pos = kClear;
+  consumer_pos_->dequeue_chunk_pos += chunk_size_;
+  WrapPositionIfNeeded(&consumer_pos_->dequeue_chunk_pos);
+  consumer_pos_->dequeue_chunk_poll_pos += chunk_size_;
+  WrapPositionIfNeeded(&consumer_pos_->dequeue_chunk_poll_pos);
 }
 
 
 void SamplingCircularQueue::FlushResidualRecords() {
-  ConsumerPosition* cp = reinterpret_cast<ConsumerPosition*>(
-      Thread::GetThreadLocal(consumer_key_));
   // Eliminate producer / consumer distance.
-  cp->dequeue_chunk_poll_pos = cp->dequeue_chunk_pos;
+  consumer_pos_->dequeue_chunk_poll_pos = consumer_pos_->dequeue_chunk_pos;
 }
 
 
diff --git a/src/circular-queue.h b/src/circular-queue.h
index 11159e0..dce7fc2 100644
--- a/src/circular-queue.h
+++ b/src/circular-queue.h
@@ -76,15 +76,11 @@
                         int buffer_size_in_chunks);
   ~SamplingCircularQueue();
 
-  // Executed on the producer (sampler) or application thread.
-  void SetUpProducer();
   // Enqueue returns a pointer to a memory location for storing the next
   // record.
   INLINE(void* Enqueue());
-  void TearDownProducer();
 
   // Executed on the consumer (analyzer) thread.
-  void SetUpConsumer();
   // StartDequeue returns a pointer to a memory location for retrieving
   // the next record. After the record had been read by a consumer,
   // FinishDequeue must be called. Until that moment, subsequent calls
@@ -95,7 +91,6 @@
   // the queue must be notified whether producing has been finished in order
   // to process remaining records from the buffer.
   void FlushResidualRecords();
-  void TearDownConsumer();
 
   typedef AtomicWord Cell;
   // Reserved values for the first cell of a record.
@@ -103,6 +98,9 @@
   static const Cell kEnd = -1;   // Marks the end of the buffer.
 
  private:
+  struct ProducerPosition {
+    Cell* enqueue_pos;
+  };
   struct ConsumerPosition {
     Cell* dequeue_chunk_pos;
     Cell* dequeue_chunk_poll_pos;
@@ -118,10 +116,9 @@
   const int buffer_size_;
   const int producer_consumer_distance_;
   Cell* buffer_;
-  // Store producer and consumer data in TLS to avoid modifying the
-  // same CPU cache line from two threads simultaneously.
-  Thread::LocalStorageKey consumer_key_;
-  Thread::LocalStorageKey producer_key_;
+  byte* positions_;
+  ProducerPosition* producer_pos_;
+  ConsumerPosition* consumer_pos_;
 };
 
 
diff --git a/src/codegen.cc b/src/codegen.cc
index f9913b9..a23b54f 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -336,8 +336,8 @@
           array->set_undefined(j++);
         }
       } else {
-        Handle<JSFunction> function =
-            Compiler::BuildBoilerplate(node->fun(), script(), this);
+        Handle<SharedFunctionInfo> function =
+            Compiler::BuildFunctionInfo(node->fun(), script(), this);
         // Check for stack-overflow exception.
         if (HasStackOverflow()) return;
         array->set(j++, *function);
diff --git a/src/codegen.h b/src/codegen.h
index 40ed6ce..c459280 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -58,7 +58,7 @@
 //   ProcessDeferred
 //   Generate
 //   ComputeLazyCompile
-//   BuildBoilerplate
+//   BuildFunctionInfo
 //   ComputeCallInitialize
 //   ComputeCallInitializeInLoop
 //   ProcessDeclarations
@@ -346,8 +346,13 @@
  public:
   CompareStub(Condition cc,
               bool strict,
-              NaNInformation nan_info = kBothCouldBeNaN) :
-      cc_(cc), strict_(strict), never_nan_nan_(nan_info == kCantBothBeNaN) { }
+              NaNInformation nan_info = kBothCouldBeNaN,
+              bool include_number_compare = true) :
+      cc_(cc),
+      strict_(strict),
+      never_nan_nan_(nan_info == kCantBothBeNaN),
+      include_number_compare_(include_number_compare),
+      name_(NULL) { }
 
   void Generate(MacroAssembler* masm);
 
@@ -360,6 +365,16 @@
   // generating the minor key for other comparisons to avoid creating more
   // stubs.
   bool never_nan_nan_;
+  // Do generate the number comparison code in the stub. Stubs without number
+  // comparison code is used when the number comparison has been inlined, and
+  // the stub will be called if one of the operands is not a number.
+  bool include_number_compare_;
+
+  // Encoding of the minor key CCCCCCCCCCCCCCNS.
+  class StrictField: public BitField<bool, 0, 1> {};
+  class NeverNanNanField: public BitField<bool, 1, 1> {};
+  class IncludeNumberCompareField: public BitField<bool, 2, 1> {};
+  class ConditionField: public BitField<int, 3, 13> {};
 
   Major MajorKey() { return Compare; }
 
@@ -373,12 +388,16 @@
 
   // Unfortunately you have to run without snapshots to see most of these
   // names in the profile since most compare stubs end up in the snapshot.
+  char* name_;
   const char* GetName();
 #ifdef DEBUG
   void Print() {
-    PrintF("CompareStub (cc %d), (strict %s)\n",
+    PrintF("CompareStub (cc %d), (strict %s), "
+           "(never_nan_nan %s), (number_compare %s)\n",
            static_cast<int>(cc_),
-           strict_ ? "true" : "false");
+           strict_ ? "true" : "false",
+           never_nan_nan_ ? "true" : "false",
+           include_number_compare_ ? "included" : "not included");
   }
 #endif
 };
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
index 378a24e..f1ab87b 100644
--- a/src/compilation-cache.cc
+++ b/src/compilation-cache.cc
@@ -28,6 +28,7 @@
 #include "v8.h"
 
 #include "compilation-cache.h"
+#include "serialize.h"
 
 namespace v8 {
 namespace internal {
@@ -101,18 +102,18 @@
   explicit CompilationCacheScript(int generations)
       : CompilationSubCache(generations) { }
 
-  Handle<JSFunction> Lookup(Handle<String> source,
-                            Handle<Object> name,
-                            int line_offset,
-                            int column_offset);
-  void Put(Handle<String> source, Handle<JSFunction> boilerplate);
+  Handle<SharedFunctionInfo> Lookup(Handle<String> source,
+                                    Handle<Object> name,
+                                    int line_offset,
+                                    int column_offset);
+  void Put(Handle<String> source, Handle<SharedFunctionInfo> function_info);
 
  private:
   // Note: Returns a new hash table if operation results in expansion.
-  Handle<CompilationCacheTable> TablePut(Handle<String> source,
-                                         Handle<JSFunction> boilerplate);
+  Handle<CompilationCacheTable> TablePut(
+      Handle<String> source, Handle<SharedFunctionInfo> function_info);
 
-  bool HasOrigin(Handle<JSFunction> boilerplate,
+  bool HasOrigin(Handle<SharedFunctionInfo> function_info,
                  Handle<Object> name,
                  int line_offset,
                  int column_offset);
@@ -127,17 +128,19 @@
   explicit CompilationCacheEval(int generations)
       : CompilationSubCache(generations) { }
 
-  Handle<JSFunction> Lookup(Handle<String> source, Handle<Context> context);
+  Handle<SharedFunctionInfo> Lookup(Handle<String> source,
+                                    Handle<Context> context);
 
   void Put(Handle<String> source,
            Handle<Context> context,
-           Handle<JSFunction> boilerplate);
+           Handle<SharedFunctionInfo> function_info);
 
  private:
   // Note: Returns a new hash table if operation results in expansion.
-  Handle<CompilationCacheTable> TablePut(Handle<String> source,
-                                         Handle<Context> context,
-                                         Handle<JSFunction> boilerplate);
+  Handle<CompilationCacheTable> TablePut(
+      Handle<String> source,
+      Handle<Context> context,
+      Handle<SharedFunctionInfo> function_info);
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
 };
@@ -225,12 +228,13 @@
 // We only re-use a cached function for some script source code if the
 // script originates from the same place. This is to avoid issues
 // when reporting errors, etc.
-bool CompilationCacheScript::HasOrigin(Handle<JSFunction> boilerplate,
-                                       Handle<Object> name,
-                                       int line_offset,
-                                       int column_offset) {
+bool CompilationCacheScript::HasOrigin(
+    Handle<SharedFunctionInfo> function_info,
+    Handle<Object> name,
+    int line_offset,
+    int column_offset) {
   Handle<Script> script =
-      Handle<Script>(Script::cast(boilerplate->shared()->script()));
+      Handle<Script>(Script::cast(function_info->script()));
   // If the script name isn't set, the boilerplate script should have
   // an undefined name to have the same origin.
   if (name.is_null()) {
@@ -250,10 +254,10 @@
 // be cached in the same script generation. Currently the first use
 // will be cached, but subsequent code from different source / line
 // won't.
-Handle<JSFunction> CompilationCacheScript::Lookup(Handle<String> source,
-                                                  Handle<Object> name,
-                                                  int line_offset,
-                                                  int column_offset) {
+Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(Handle<String> source,
+                                                          Handle<Object> name,
+                                                          int line_offset,
+                                                          int column_offset) {
   Object* result = NULL;
   int generation;
 
@@ -263,12 +267,13 @@
     for (generation = 0; generation < generations(); generation++) {
       Handle<CompilationCacheTable> table = GetTable(generation);
       Handle<Object> probe(table->Lookup(*source));
-      if (probe->IsJSFunction()) {
-        Handle<JSFunction> boilerplate = Handle<JSFunction>::cast(probe);
+      if (probe->IsSharedFunctionInfo()) {
+        Handle<SharedFunctionInfo> function_info =
+            Handle<SharedFunctionInfo>::cast(probe);
         // Break when we've found a suitable boilerplate function that
         // matches the origin.
-        if (HasOrigin(boilerplate, name, line_offset, column_offset)) {
-          result = *boilerplate;
+        if (HasOrigin(function_info, name, line_offset, column_offset)) {
+          result = *function_info;
           break;
         }
       }
@@ -290,38 +295,37 @@
   // to see if we actually found a cached script. If so, we return a
   // handle created in the caller's handle scope.
   if (result != NULL) {
-    Handle<JSFunction> boilerplate(JSFunction::cast(result));
-    ASSERT(HasOrigin(boilerplate, name, line_offset, column_offset));
+    Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result));
+    ASSERT(HasOrigin(shared, name, line_offset, column_offset));
     // If the script was found in a later generation, we promote it to
     // the first generation to let it survive longer in the cache.
-    if (generation != 0) Put(source, boilerplate);
+    if (generation != 0) Put(source, shared);
     Counters::compilation_cache_hits.Increment();
-    return boilerplate;
+    return shared;
   } else {
     Counters::compilation_cache_misses.Increment();
-    return Handle<JSFunction>::null();
+    return Handle<SharedFunctionInfo>::null();
   }
 }
 
 
 Handle<CompilationCacheTable> CompilationCacheScript::TablePut(
     Handle<String> source,
-    Handle<JSFunction> boilerplate) {
-  CALL_HEAP_FUNCTION(GetFirstTable()->Put(*source, *boilerplate),
+    Handle<SharedFunctionInfo> function_info) {
+  CALL_HEAP_FUNCTION(GetFirstTable()->Put(*source, *function_info),
                      CompilationCacheTable);
 }
 
 
 void CompilationCacheScript::Put(Handle<String> source,
-                                 Handle<JSFunction> boilerplate) {
+                                 Handle<SharedFunctionInfo> function_info) {
   HandleScope scope;
-  ASSERT(boilerplate->IsBoilerplate());
-  SetFirstTable(TablePut(source, boilerplate));
+  SetFirstTable(TablePut(source, function_info));
 }
 
 
-Handle<JSFunction> CompilationCacheEval::Lookup(Handle<String> source,
-                                                Handle<Context> context) {
+Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
+    Handle<String> source, Handle<Context> context) {
   // Make sure not to leak the table into the surrounding handle
   // scope. Otherwise, we risk keeping old tables around even after
   // having cleared the cache.
@@ -331,21 +335,22 @@
     for (generation = 0; generation < generations(); generation++) {
       Handle<CompilationCacheTable> table = GetTable(generation);
       result = table->LookupEval(*source, *context);
-      if (result->IsJSFunction()) {
+      if (result->IsSharedFunctionInfo()) {
         break;
       }
     }
   }
-  if (result->IsJSFunction()) {
-    Handle<JSFunction> boilerplate(JSFunction::cast(result));
+  if (result->IsSharedFunctionInfo()) {
+    Handle<SharedFunctionInfo>
+        function_info(SharedFunctionInfo::cast(result));
     if (generation != 0) {
-      Put(source, context, boilerplate);
+      Put(source, context, function_info);
     }
     Counters::compilation_cache_hits.Increment();
-    return boilerplate;
+    return function_info;
   } else {
     Counters::compilation_cache_misses.Increment();
-    return Handle<JSFunction>::null();
+    return Handle<SharedFunctionInfo>::null();
   }
 }
 
@@ -353,18 +358,19 @@
 Handle<CompilationCacheTable> CompilationCacheEval::TablePut(
     Handle<String> source,
     Handle<Context> context,
-    Handle<JSFunction> boilerplate) {
-  CALL_HEAP_FUNCTION(GetFirstTable()->PutEval(*source, *context, *boilerplate),
+    Handle<SharedFunctionInfo> function_info) {
+  CALL_HEAP_FUNCTION(GetFirstTable()->PutEval(*source,
+                                              *context,
+                                              *function_info),
                      CompilationCacheTable);
 }
 
 
 void CompilationCacheEval::Put(Handle<String> source,
                                Handle<Context> context,
-                               Handle<JSFunction> boilerplate) {
+                               Handle<SharedFunctionInfo> function_info) {
   HandleScope scope;
-  ASSERT(boilerplate->IsBoilerplate());
-  SetFirstTable(TablePut(source, context, boilerplate));
+  SetFirstTable(TablePut(source, context, function_info));
 }
 
 
@@ -415,26 +421,26 @@
 }
 
 
-Handle<JSFunction> CompilationCache::LookupScript(Handle<String> source,
-                                                  Handle<Object> name,
-                                                  int line_offset,
-                                                  int column_offset) {
+Handle<SharedFunctionInfo> CompilationCache::LookupScript(Handle<String> source,
+                                                          Handle<Object> name,
+                                                          int line_offset,
+                                                          int column_offset) {
   if (!IsEnabled()) {
-    return Handle<JSFunction>::null();
+    return Handle<SharedFunctionInfo>::null();
   }
 
   return script.Lookup(source, name, line_offset, column_offset);
 }
 
 
-Handle<JSFunction> CompilationCache::LookupEval(Handle<String> source,
-                                                Handle<Context> context,
-                                                bool is_global) {
+Handle<SharedFunctionInfo> CompilationCache::LookupEval(Handle<String> source,
+                                                        Handle<Context> context,
+                                                        bool is_global) {
   if (!IsEnabled()) {
-    return Handle<JSFunction>::null();
+    return Handle<SharedFunctionInfo>::null();
   }
 
-  Handle<JSFunction> result;
+  Handle<SharedFunctionInfo> result;
   if (is_global) {
     result = eval_global.Lookup(source, context);
   } else {
@@ -455,30 +461,28 @@
 
 
 void CompilationCache::PutScript(Handle<String> source,
-                                 Handle<JSFunction> boilerplate) {
+                                 Handle<SharedFunctionInfo> function_info) {
   if (!IsEnabled()) {
     return;
   }
 
-  ASSERT(boilerplate->IsBoilerplate());
-  script.Put(source, boilerplate);
+  script.Put(source, function_info);
 }
 
 
 void CompilationCache::PutEval(Handle<String> source,
                                Handle<Context> context,
                                bool is_global,
-                               Handle<JSFunction> boilerplate) {
+                               Handle<SharedFunctionInfo> function_info) {
   if (!IsEnabled()) {
     return;
   }
 
   HandleScope scope;
-  ASSERT(boilerplate->IsBoilerplate());
   if (is_global) {
-    eval_global.Put(source, context, boilerplate);
+    eval_global.Put(source, context, function_info);
   } else {
-    eval_contextual.Put(source, context, boilerplate);
+    eval_contextual.Put(source, context, function_info);
   }
 }
 
diff --git a/src/compilation-cache.h b/src/compilation-cache.h
index 3487c08..d231822 100644
--- a/src/compilation-cache.h
+++ b/src/compilation-cache.h
@@ -40,17 +40,17 @@
   // Finds the script function boilerplate for a source
   // string. Returns an empty handle if the cache doesn't contain a
   // script for the given source string with the right origin.
-  static Handle<JSFunction> LookupScript(Handle<String> source,
-                                         Handle<Object> name,
-                                         int line_offset,
-                                         int column_offset);
+  static Handle<SharedFunctionInfo> LookupScript(Handle<String> source,
+                                                 Handle<Object> name,
+                                                 int line_offset,
+                                                 int column_offset);
 
   // Finds the function boilerplate for a source string for eval in a
   // given context.  Returns an empty handle if the cache doesn't
   // contain a script for the given source string.
-  static Handle<JSFunction> LookupEval(Handle<String> source,
-                                       Handle<Context> context,
-                                       bool is_global);
+  static Handle<SharedFunctionInfo> LookupEval(Handle<String> source,
+                                               Handle<Context> context,
+                                               bool is_global);
 
   // Returns the regexp data associated with the given regexp if it
   // is in cache, otherwise an empty handle.
@@ -60,14 +60,14 @@
   // Associate the (source, kind) pair to the boilerplate. This may
   // overwrite an existing mapping.
   static void PutScript(Handle<String> source,
-                        Handle<JSFunction> boilerplate);
+                        Handle<SharedFunctionInfo> function_info);
 
   // Associate the (source, context->closure()->shared(), kind) triple
   // with the boilerplate. This may overwrite an existing mapping.
   static void PutEval(Handle<String> source,
                       Handle<Context> context,
                       bool is_global,
-                      Handle<JSFunction> boilerplate);
+                      Handle<SharedFunctionInfo> function_info);
 
   // Associate the (source, flags) pair to the given regexp data.
   // This may overwrite an existing mapping.
diff --git a/src/compiler.cc b/src/compiler.cc
index 11098ba..a88f18a 100755
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -89,23 +89,29 @@
   }
 
   if (FLAG_use_flow_graph) {
-    FlowGraphBuilder builder;
+    int variable_count =
+        function->num_parameters() + function->scope()->num_stack_slots();
+    FlowGraphBuilder builder(variable_count);
     builder.Build(function);
 
     if (!builder.HasStackOverflow()) {
-      int variable_count =
-          function->num_parameters() + function->scope()->num_stack_slots();
-      if (variable_count > 0 && builder.definitions()->length() > 0) {
+      if (variable_count > 0) {
         ReachingDefinitions rd(builder.postorder(),
-                               builder.definitions(),
+                               builder.body_definitions(),
                                variable_count);
         rd.Compute();
+
+        TypeAnalyzer ta(builder.postorder(),
+                        builder.body_definitions(),
+                        variable_count,
+                        function->num_parameters());
+        ta.Compute();
       }
     }
 
 #ifdef DEBUG
     if (FLAG_print_graph_text && !builder.HasStackOverflow()) {
-      builder.graph()->PrintText(builder.postorder());
+      builder.graph()->PrintText(function, builder.postorder());
     }
 #endif
   }
@@ -156,13 +162,13 @@
 #endif
 
 
-static Handle<JSFunction> MakeFunction(bool is_global,
-                                       bool is_eval,
-                                       Compiler::ValidationState validate,
-                                       Handle<Script> script,
-                                       Handle<Context> context,
-                                       v8::Extension* extension,
-                                       ScriptDataImpl* pre_data) {
+static Handle<SharedFunctionInfo> MakeFunctionInfo(bool is_global,
+    bool is_eval,
+    Compiler::ValidationState validate,
+    Handle<Script> script,
+    Handle<Context> context,
+    v8::Extension* extension,
+    ScriptDataImpl* pre_data) {
   CompilationZoneScope zone_scope(DELETE_ON_EXIT);
 
   PostponeInterruptsScope postpone;
@@ -204,7 +210,7 @@
   // Check for parse errors.
   if (lit == NULL) {
     ASSERT(Top::has_pending_exception());
-    return Handle<JSFunction>::null();
+    return Handle<SharedFunctionInfo>::null();
   }
 
   // Measure how long it takes to do the compilation; only take the
@@ -222,7 +228,7 @@
   // Check for stack-overflow exceptions.
   if (code.is_null()) {
     Top::StackOverflow();
-    return Handle<JSFunction>::null();
+    return Handle<SharedFunctionInfo>::null();
   }
 
 #if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
@@ -248,38 +254,39 @@
 #endif
 
   // Allocate function.
-  Handle<JSFunction> fun =
-      Factory::NewFunctionBoilerplate(lit->name(),
-                                      lit->materialized_literal_count(),
-                                      code);
+  Handle<SharedFunctionInfo> result =
+      Factory::NewSharedFunctionInfo(lit->name(),
+                                     lit->materialized_literal_count(),
+                                     code);
 
   ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
-  Compiler::SetFunctionInfo(fun, lit, true, script);
+  Compiler::SetFunctionInfo(result, lit, true, script);
 
   // Hint to the runtime system used when allocating space for initial
   // property space by setting the expected number of properties for
   // the instances of the function.
-  SetExpectedNofPropertiesFromEstimate(fun, lit->expected_property_count());
+  SetExpectedNofPropertiesFromEstimate(result, lit->expected_property_count());
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Notify debugger
   Debugger::OnAfterCompile(script, Debugger::NO_AFTER_COMPILE_FLAGS);
 #endif
 
-  return fun;
+  return result;
 }
 
 
 static StaticResource<SafeStringInputBuffer> safe_string_input_buffer;
 
 
-Handle<JSFunction> Compiler::Compile(Handle<String> source,
-                                     Handle<Object> script_name,
-                                     int line_offset, int column_offset,
-                                     v8::Extension* extension,
-                                     ScriptDataImpl* input_pre_data,
-                                     Handle<Object> script_data,
-                                     NativesFlag natives) {
+Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
+                                             Handle<Object> script_name,
+                                             int line_offset,
+                                             int column_offset,
+                                             v8::Extension* extension,
+                                             ScriptDataImpl* input_pre_data,
+                                             Handle<Object> script_data,
+                                             NativesFlag natives) {
   int source_length = source->length();
   Counters::total_load_size.Increment(source_length);
   Counters::total_compile_size.Increment(source_length);
@@ -288,7 +295,7 @@
   VMState state(COMPILER);
 
   // Do a lookup in the compilation cache but not for extensions.
-  Handle<JSFunction> result;
+  Handle<SharedFunctionInfo> result;
   if (extension == NULL) {
     result = CompilationCache::LookupScript(source,
                                             script_name,
@@ -320,13 +327,13 @@
                                            : *script_data);
 
     // Compile the function and add it to the cache.
-    result = MakeFunction(true,
-                          false,
-                          DONT_VALIDATE_JSON,
-                          script,
-                          Handle<Context>::null(),
-                          extension,
-                          pre_data);
+    result = MakeFunctionInfo(true,
+                              false,
+                              DONT_VALIDATE_JSON,
+                              script,
+                              Handle<Context>::null(),
+                              extension,
+                              pre_data);
     if (extension == NULL && !result.is_null()) {
       CompilationCache::PutScript(source, result);
     }
@@ -342,10 +349,10 @@
 }
 
 
-Handle<JSFunction> Compiler::CompileEval(Handle<String> source,
-                                         Handle<Context> context,
-                                         bool is_global,
-                                         ValidationState validate) {
+Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
+                                                 Handle<Context> context,
+                                                 bool is_global,
+                                                 ValidationState validate) {
   // Note that if validation is required then no path through this
   // function is allowed to return a value without validating that
   // the input is legal json.
@@ -361,20 +368,20 @@
   // invoke the compiler and add the result to the cache.  If we're
   // evaluating json we bypass the cache since we can't be sure a
   // potential value in the cache has been validated.
-  Handle<JSFunction> result;
+  Handle<SharedFunctionInfo> result;
   if (validate == DONT_VALIDATE_JSON)
     result = CompilationCache::LookupEval(source, context, is_global);
 
   if (result.is_null()) {
     // Create a script object describing the script to be compiled.
     Handle<Script> script = Factory::NewScript(source);
-    result = MakeFunction(is_global,
-                          true,
-                          validate,
-                          script,
-                          context,
-                          NULL,
-                          NULL);
+    result = MakeFunctionInfo(is_global,
+                              true,
+                              validate,
+                              script,
+                              context,
+                              NULL,
+                              NULL);
     if (!result.is_null() && validate != VALIDATE_JSON) {
       // For json it's unlikely that we'll ever see exactly the same
       // string again so we don't use the compilation cache.
@@ -459,9 +466,9 @@
 }
 
 
-Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
-                                              Handle<Script> script,
-                                              AstVisitor* caller) {
+Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
+                                                       Handle<Script> script,
+                                                       AstVisitor* caller) {
 #ifdef DEBUG
   // We should not try to compile the same function literal more than
   // once.
@@ -484,7 +491,7 @@
     // The bodies of function literals have not yet been visited by
     // the AST optimizer/analyzer.
     if (!Rewriter::Optimize(literal)) {
-      return Handle<JSFunction>::null();
+      return Handle<SharedFunctionInfo>::null();
     }
 
     if (literal->scope()->num_parameters() > 0 ||
@@ -492,28 +499,34 @@
       AssignedVariablesAnalyzer ava(literal);
       ava.Analyze();
       if (ava.HasStackOverflow()) {
-        return Handle<JSFunction>::null();
+        return Handle<SharedFunctionInfo>::null();
       }
     }
 
     if (FLAG_use_flow_graph) {
-      FlowGraphBuilder builder;
-      builder.Build(literal);
-
-    if (!builder.HasStackOverflow()) {
       int variable_count =
           literal->num_parameters() + literal->scope()->num_stack_slots();
-      if (variable_count > 0 && builder.definitions()->length() > 0) {
-        ReachingDefinitions rd(builder.postorder(),
-                               builder.definitions(),
-                               variable_count);
-        rd.Compute();
+      FlowGraphBuilder builder(variable_count);
+      builder.Build(literal);
+
+      if (!builder.HasStackOverflow()) {
+        if (variable_count > 0) {
+          ReachingDefinitions rd(builder.postorder(),
+                                 builder.body_definitions(),
+                                 variable_count);
+          rd.Compute();
+
+          TypeAnalyzer ta(builder.postorder(),
+                          builder.body_definitions(),
+                          variable_count,
+                          literal->num_parameters());
+          ta.Compute();
+        }
       }
-    }
 
 #ifdef DEBUG
       if (FLAG_print_graph_text && !builder.HasStackOverflow()) {
-        builder.graph()->PrintText(builder.postorder());
+        builder.graph()->PrintText(literal, builder.postorder());
       }
 #endif
     }
@@ -553,7 +566,7 @@
     // Check for stack-overflow exception.
     if (code.is_null()) {
       caller->SetStackOverflow();
-      return Handle<JSFunction>::null();
+      return Handle<SharedFunctionInfo>::null();
     }
 
     // Function compilation complete.
@@ -569,22 +582,17 @@
   }
 
   // Create a boilerplate function.
-  Handle<JSFunction> function =
-      Factory::NewFunctionBoilerplate(literal->name(),
-                                      literal->materialized_literal_count(),
-                                      code);
-  SetFunctionInfo(function, literal, false, script);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  // Notify debugger that a new function has been added.
-  Debugger::OnNewFunction(function);
-#endif
+  Handle<SharedFunctionInfo> result =
+      Factory::NewSharedFunctionInfo(literal->name(),
+                                     literal->materialized_literal_count(),
+                                     code);
+  SetFunctionInfo(result, literal, false, script);
 
   // Set the expected number of properties for instances and return
   // the resulting function.
-  SetExpectedNofPropertiesFromEstimate(function,
+  SetExpectedNofPropertiesFromEstimate(result,
                                        literal->expected_property_count());
-  return function;
+  return result;
 }
 
 
@@ -592,23 +600,23 @@
 // The start_position points to the first '(' character after the function name
 // in the full script source. When counting characters in the script source the
 // the first character is number 0 (not 1).
-void Compiler::SetFunctionInfo(Handle<JSFunction> fun,
+void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
                                FunctionLiteral* lit,
                                bool is_toplevel,
                                Handle<Script> script) {
-  fun->shared()->set_length(lit->num_parameters());
-  fun->shared()->set_formal_parameter_count(lit->num_parameters());
-  fun->shared()->set_script(*script);
-  fun->shared()->set_function_token_position(lit->function_token_position());
-  fun->shared()->set_start_position(lit->start_position());
-  fun->shared()->set_end_position(lit->end_position());
-  fun->shared()->set_is_expression(lit->is_expression());
-  fun->shared()->set_is_toplevel(is_toplevel);
-  fun->shared()->set_inferred_name(*lit->inferred_name());
-  fun->shared()->SetThisPropertyAssignmentsInfo(
+  function_info->set_length(lit->num_parameters());
+  function_info->set_formal_parameter_count(lit->num_parameters());
+  function_info->set_script(*script);
+  function_info->set_function_token_position(lit->function_token_position());
+  function_info->set_start_position(lit->start_position());
+  function_info->set_end_position(lit->end_position());
+  function_info->set_is_expression(lit->is_expression());
+  function_info->set_is_toplevel(is_toplevel);
+  function_info->set_inferred_name(*lit->inferred_name());
+  function_info->SetThisPropertyAssignmentsInfo(
       lit->has_only_simple_this_property_assignments(),
       *lit->this_property_assignments());
-  fun->shared()->set_try_full_codegen(lit->try_full_codegen());
+  function_info->set_try_full_codegen(lit->try_full_codegen());
 }
 
 
diff --git a/src/compiler.h b/src/compiler.h
index 9492420..e08e26e 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -219,9 +219,9 @@
 // functions, they will be compiled and allocated as part of the compilation
 // of the source code.
 
-// Please note this interface returns function boilerplates.
-// This means you need to call Factory::NewFunctionFromBoilerplate
-// before you have a real function with context.
+// Please note this interface returns shared function infos.
+// This means you need to call Factory::NewFunctionFromSharedFunctionInfo
+// before you have a real function with a context.
 
 class Compiler : public AllStatic {
  public:
@@ -232,34 +232,35 @@
   // the return handle contains NULL.
 
   // Compile a String source within a context.
-  static Handle<JSFunction> Compile(Handle<String> source,
-                                    Handle<Object> script_name,
-                                    int line_offset, int column_offset,
-                                    v8::Extension* extension,
-                                    ScriptDataImpl* pre_data,
-                                    Handle<Object> script_data,
-                                    NativesFlag is_natives_code);
+  static Handle<SharedFunctionInfo> Compile(Handle<String> source,
+                                            Handle<Object> script_name,
+                                            int line_offset,
+                                            int column_offset,
+                                            v8::Extension* extension,
+                                            ScriptDataImpl* pre_data,
+                                            Handle<Object> script_data,
+                                            NativesFlag is_natives_code);
 
   // Compile a String source within a context for Eval.
-  static Handle<JSFunction> CompileEval(Handle<String> source,
-                                        Handle<Context> context,
-                                        bool is_global,
-                                        ValidationState validation);
+  static Handle<SharedFunctionInfo> CompileEval(Handle<String> source,
+                                                Handle<Context> context,
+                                                bool is_global,
+                                                ValidationState validation);
 
   // Compile from function info (used for lazy compilation). Returns
   // true on success and false if the compilation resulted in a stack
   // overflow.
   static bool CompileLazy(CompilationInfo* info);
 
-  // Compile a function boilerplate object (the function is possibly
+  // Compile a shared function info object (the function is possibly
   // lazily compiled). Called recursively from a backend code
-  // generator 'caller' to build the boilerplate.
-  static Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node,
-                                             Handle<Script> script,
-                                             AstVisitor* caller);
+  // generator 'caller' to build the shared function info.
+  static Handle<SharedFunctionInfo> BuildFunctionInfo(FunctionLiteral* node,
+                                                      Handle<Script> script,
+                                                      AstVisitor* caller);
 
   // Set the function info for a newly compiled function.
-  static void SetFunctionInfo(Handle<JSFunction> fun,
+  static void SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
                               FunctionLiteral* lit,
                               bool is_toplevel,
                               Handle<Script> script);
diff --git a/src/contexts.h b/src/contexts.h
index 4997741..44c90b6 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -86,7 +86,6 @@
   V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \
   V(CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, JSFunction, \
     call_as_constructor_delegate) \
-  V(EMPTY_SCRIPT_INDEX, Script, empty_script) \
   V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
   V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \
   V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
@@ -207,7 +206,6 @@
     RUNTIME_CONTEXT_INDEX,
     CALL_AS_FUNCTION_DELEGATE_INDEX,
     CALL_AS_CONSTRUCTOR_DELEGATE_INDEX,
-    EMPTY_SCRIPT_INDEX,
     SCRIPT_FUNCTION_INDEX,
     OPAQUE_REFERENCE_FUNCTION_INDEX,
     CONTEXT_EXTENSION_FUNCTION_INDEX,
diff --git a/src/conversions-inl.h b/src/conversions-inl.h
index 3037085..bf02947 100644
--- a/src/conversions-inl.h
+++ b/src/conversions-inl.h
@@ -41,24 +41,6 @@
 namespace v8 {
 namespace internal {
 
-// The fast double-to-int conversion routine does not guarantee
-// rounding towards zero.
-static inline int FastD2I(double x) {
-#ifdef __USE_ISOC99
-  // The ISO C99 standard defines the lrint() function which rounds a
-  // double to an integer according to the current rounding direction.
-  return lrint(x);
-#else
-  // This is incredibly slow on Intel x86. The reason is that rounding
-  // towards zero is implied by the C standard. This means that the
-  // status register of the FPU has to be changed with the 'fldcw'
-  // instruction. This completely stalls the pipeline and takes many
-  // hundreds of clock cycles.
-  return static_cast<int>(x);
-#endif
-}
-
-
 // The fast double-to-unsigned-int conversion routine does not guarantee
 // rounding towards zero, or any reasonable value if the argument is larger
 // than what fits in an unsigned 32-bit integer.
diff --git a/src/conversions.h b/src/conversions.h
index bdc7e44..4aaf0c0 100644
--- a/src/conversions.h
+++ b/src/conversions.h
@@ -36,7 +36,12 @@
 // rounding towards zero.
 // The result is unspecified if x is infinite or NaN, or if the rounded
 // integer value is outside the range of type int.
-static inline int FastD2I(double x);
+static inline int FastD2I(double x) {
+  // The static_cast convertion from double to int used to be slow, but
+  // as new benchmarks show, now it is much faster than lrint().
+  return static_cast<int>(x);
+}
+
 static inline unsigned int FastD2UI(double x);
 
 
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index d36f511..d16c17f 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -176,7 +176,6 @@
 
 
 void ProfilerEventsProcessor::Run() {
-  ticks_buffer_.SetUpConsumer();
   unsigned dequeue_order = 0;
   running_ = true;
 
@@ -194,7 +193,6 @@
   ticks_buffer_.FlushResidualRecords();
   // Perform processing until we have tick events, skip remaining code events.
   while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { }
-  ticks_buffer_.TearDownConsumer();
 }
 
 
diff --git a/src/cpu-profiler.h b/src/cpu-profiler.h
index ccfac5c..8a7d2fd 100644
--- a/src/cpu-profiler.h
+++ b/src/cpu-profiler.h
@@ -154,14 +154,11 @@
   void FunctionMoveEvent(Address from, Address to);
   void FunctionDeleteEvent(Address from);
 
-  // Tick sampler registration. Called by sampler thread or signal handler.
-  inline void SetUpSamplesProducer() { ticks_buffer_.SetUpProducer(); }
   // Tick sample events are filled directly in the buffer of the circular
   // queue (because the structure is of fixed width, but usually not all
   // stack frame entries are filled.) This method returns a pointer to the
   // next record of the buffer.
   INLINE(TickSample* TickSampleEvent());
-  inline void TearDownSamplesProducer() { ticks_buffer_.TearDownProducer(); }
 
  private:
   union CodeEventsContainer {
diff --git a/src/d8.cc b/src/d8.cc
index dedbd55..73cce46 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -467,9 +467,12 @@
 
   // Mark the d8 shell script as native to avoid it showing up as normal source
   // in the debugger.
-  i::Handle<i::JSFunction> script_fun = Utils::OpenHandle(*script);
-  i::Handle<i::Script> script_object =
-      i::Handle<i::Script>(i::Script::cast(script_fun->shared()->script()));
+  i::Handle<i::Object> compiled_script = Utils::OpenHandle(*script);
+  i::Handle<i::Script> script_object = compiled_script->IsJSFunction()
+      ? i::Handle<i::Script>(i::Script::cast(
+          i::JSFunction::cast(*compiled_script)->shared()->script()))
+      : i::Handle<i::Script>(i::Script::cast(
+          i::SharedFunctionInfo::cast(*compiled_script)->script()));
   script_object->set_type(i::Smi::FromInt(i::Script::TYPE_NATIVE));
 
   // Create the evaluation context
diff --git a/src/data-flow.cc b/src/data-flow.cc
index 141718d..e327c57 100644
--- a/src/data-flow.cc
+++ b/src/data-flow.cc
@@ -195,6 +195,81 @@
 }
 
 
+// This function peels off one iteration of a for-loop. The return value
+// is either a block statement containing the peeled loop or NULL in case
+// there is a stack overflow.
+static Statement* PeelForLoop(ForStatement* stmt) {
+  // Mark this for-statement as processed.
+  stmt->set_peel_this_loop(false);
+
+  // Create new block containing the init statement of the for-loop and
+  // an if-statement containing the peeled iteration and the original
+  // loop without the init-statement.
+  Block* block = new Block(NULL, 2, false);
+  if (stmt->init() != NULL) {
+    Statement* init = stmt->init();
+    // The init statement gets the statement position of the for-loop
+    // to make debugging of peeled loops possible.
+    init->set_statement_pos(stmt->statement_pos());
+    block->AddStatement(init);
+  }
+
+  // Copy the condition.
+  CopyAstVisitor copy_visitor;
+  Expression* cond_copy = stmt->cond() != NULL
+      ? copy_visitor.DeepCopyExpr(stmt->cond())
+      : new Literal(Factory::true_value());
+  if (copy_visitor.HasStackOverflow()) return NULL;
+
+  // Construct a block with the peeled body and the rest of the for-loop.
+  Statement* body_copy = copy_visitor.DeepCopyStmt(stmt->body());
+  if (copy_visitor.HasStackOverflow()) return NULL;
+
+  Statement* next_copy = stmt->next() != NULL
+      ? copy_visitor.DeepCopyStmt(stmt->next())
+      : new EmptyStatement();
+  if (copy_visitor.HasStackOverflow()) return NULL;
+
+  Block* peeled_body = new Block(NULL, 3, false);
+  peeled_body->AddStatement(body_copy);
+  peeled_body->AddStatement(next_copy);
+  peeled_body->AddStatement(stmt);
+
+  // Remove the duplicated init statement from the for-statement.
+  stmt->set_init(NULL);
+
+  // Create new test at the top and add it to the newly created block.
+  IfStatement* test = new IfStatement(cond_copy,
+                                      peeled_body,
+                                      new EmptyStatement());
+  block->AddStatement(test);
+  return block;
+}
+
+
+void FlowGraphBuilder::VisitStatements(ZoneList<Statement*>* stmts) {
+  for (int i = 0, len = stmts->length(); i < len; i++) {
+    stmts->at(i) = ProcessStatement(stmts->at(i));
+  }
+}
+
+
+Statement* FlowGraphBuilder::ProcessStatement(Statement* stmt) {
+  if (FLAG_loop_peeling &&
+      stmt->AsForStatement() != NULL &&
+      stmt->AsForStatement()->peel_this_loop()) {
+    Statement* tmp_stmt = PeelForLoop(stmt->AsForStatement());
+    if (tmp_stmt == NULL) {
+      SetStackOverflow();
+    } else {
+      stmt = tmp_stmt;
+    }
+  }
+  Visit(stmt);
+  return stmt;
+}
+
+
 void FlowGraphBuilder::VisitDeclaration(Declaration* decl) {
   UNREACHABLE();
 }
@@ -221,11 +296,11 @@
   BranchNode* branch = new BranchNode();
   FlowGraph original = graph_;
   graph_ = FlowGraph::Empty();
-  Visit(stmt->then_statement());
+  stmt->set_then_statement(ProcessStatement(stmt->then_statement()));
 
   FlowGraph left = graph_;
   graph_ = FlowGraph::Empty();
-  Visit(stmt->else_statement());
+  stmt->set_else_statement(ProcessStatement(stmt->else_statement()));
 
   if (HasStackOverflow()) return;
   JoinNode* join = new JoinNode();
@@ -275,7 +350,7 @@
 
 
 void FlowGraphBuilder::VisitForStatement(ForStatement* stmt) {
-  if (stmt->init() != NULL) Visit(stmt->init());
+  if (stmt->init() != NULL) stmt->set_init(ProcessStatement(stmt->init()));
 
   JoinNode* join = new JoinNode();
   FlowGraph original = graph_;
@@ -285,9 +360,9 @@
   BranchNode* branch = new BranchNode();
   FlowGraph condition = graph_;
   graph_ = FlowGraph::Empty();
-  Visit(stmt->body());
+  stmt->set_body(ProcessStatement(stmt->body()));
 
-  if (stmt->next() != NULL) Visit(stmt->next());
+  if (stmt->next() != NULL) stmt->set_next(ProcessStatement(stmt->next()));
 
   if (HasStackOverflow()) return;
   original.Loop(join, &condition, branch, &graph_);
@@ -320,8 +395,8 @@
 }
 
 
-void FlowGraphBuilder::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* expr) {
+void FlowGraphBuilder::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
   SetStackOverflow();
 }
 
@@ -376,8 +451,10 @@
     if (expr->is_compound()) Visit(expr->target());
     Visit(expr->value());
     if (var->IsStackAllocated()) {
-      expr->set_num(definitions_.length());
-      definitions_.Add(expr);
+      // The first definition in the body is numbered n, where n is the
+      // number of parameters and stack-allocated locals.
+      expr->set_num(body_definitions_.length() + variable_count_);
+      body_definitions_.Add(expr);
     }
 
   } else if (prop != NULL) {
@@ -454,8 +531,10 @@
   Visit(expr->expression());
   Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
   if (var != NULL && var->IsStackAllocated()) {
-    expr->set_num(definitions_.length());
-    definitions_.Add(expr);
+    // The first definition in the body is numbered n, where n is the number
+    // of parameters and stack-allocated locals.
+    expr->set_num(body_definitions_.length() + variable_count_);
+    body_definitions_.Add(expr);
   }
 
   if (HasStackOverflow()) return;
@@ -638,8 +717,8 @@
 }
 
 
-void AstLabeler::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* expr) {
+void AstLabeler::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
   UNREACHABLE();
 }
 
@@ -1015,8 +1094,8 @@
 }
 
 
-void AssignedVariablesAnalyzer::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* expr) {
+void AssignedVariablesAnalyzer::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
   // Nothing to do.
   ASSERT(av_.IsEmpty());
 }
@@ -1342,9 +1421,9 @@
 }
 
 
-void TextInstructionPrinter::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* expr) {
-  PrintF("FunctionBoilerplateLiteral");
+void TextInstructionPrinter::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
+  PrintF("SharedFunctionInfoLiteral");
 }
 
 
@@ -1611,8 +1690,9 @@
 }
 
 
-void FlowGraph::PrintText(ZoneList<Node*>* postorder) {
+void FlowGraph::PrintText(FunctionLiteral* fun, ZoneList<Node*>* postorder) {
   PrintF("\n========\n");
+  PrintF("name = %s\n", *fun->name()->ToCString());
 
   // Number nodes and instructions in reverse postorder.
   node_count = 0;
@@ -1664,6 +1744,11 @@
   int variable_count = variables->length();
 
   rd_.Initialize(definition_count);
+  // The RD_in set for the entry node has a definition for each parameter
+  // and local.
+  if (predecessor_ == NULL) {
+    for (int i = 0; i < variable_count; i++) rd_.rd_in()->Add(i);
+  }
 
   for (int i = 0; i < instruction_count; i++) {
     Expression* expr = instructions_[i]->AsExpression();
@@ -1859,40 +1944,25 @@
 
 
 void ReachingDefinitions::Compute() {
-  ASSERT(!definitions_->is_empty());
-
-  int variable_count = variables_.length();
-  int definition_count = definitions_->length();
+  // The definitions in the body plus an implicit definition for each
+  // variable at function entry.
+  int definition_count = body_definitions_->length() + variable_count_;
   int node_count = postorder_->length();
 
-  // Step 1: For each variable, identify the set of all its definitions in
-  // the body.
-  for (int i = 0; i < definition_count; i++) {
-    Variable* var = definitions_->at(i)->AssignedVar();
-    variables_[IndexFor(var, variable_count)]->Add(i);
+  // Step 1: For each stack-allocated variable, identify the set of all its
+  // definitions.
+  List<BitVector*> variables;
+  for (int i = 0; i < variable_count_; i++) {
+    // Add the initial definition for each variable.
+    BitVector* initial = new BitVector(definition_count);
+    initial->Add(i);
+    variables.Add(initial);
   }
-
-  if (FLAG_print_graph_text) {
-    for (int i = 0; i < variable_count; i++) {
-      BitVector* def_set = variables_[i];
-      if (!def_set->IsEmpty()) {
-        // At least one definition.
-        bool first = true;
-        for (int j = 0; j < definition_count; j++) {
-          if (def_set->Contains(j)) {
-            if (first) {
-              Variable* var = definitions_->at(j)->AssignedVar();
-              ASSERT(var != NULL);
-              PrintF("Def[%s] = {%d", *var->name()->ToCString(), j);
-              first = false;
-            } else {
-              PrintF(",%d", j);
-            }
-          }
-        }
-        PrintF("}\n");
-      }
-    }
+  for (int i = 0, len = body_definitions_->length(); i < len; i++) {
+    // Account for each definition in the body as a definition of the
+    // defined variable.
+    Variable* var = body_definitions_->at(i)->AssignedVar();
+    variables[IndexFor(var, variable_count_)]->Add(i + variable_count_);
   }
 
   // Step 2: Compute KILL and GEN for each block node, initialize RD_in for
@@ -1902,7 +1972,7 @@
   WorkList<Node> worklist(node_count);
   for (int i = node_count - 1; i >= 0; i--) {
     postorder_->at(i)->InitializeReachingDefinitions(definition_count,
-                                                     &variables_,
+                                                     &variables,
                                                      &worklist,
                                                      mark);
   }
@@ -1919,9 +1989,61 @@
   // Step 4: Based on RD_in for block nodes, propagate reaching definitions
   // to all variable uses in the block.
   for (int i = 0; i < node_count; i++) {
-    postorder_->at(i)->PropagateReachingDefinitions(&variables_);
+    postorder_->at(i)->PropagateReachingDefinitions(&variables);
   }
 }
 
 
+bool TypeAnalyzer::IsPrimitiveDef(int def_num) {
+  if (def_num < param_count_) return false;
+  if (def_num < variable_count_) return true;
+  return body_definitions_->at(def_num - variable_count_)->IsPrimitive();
+}
+
+
+void TypeAnalyzer::Compute() {
+  bool changed;
+  int count = 0;
+
+  do {
+    changed = false;
+
+    if (FLAG_print_graph_text) {
+      PrintF("TypeAnalyzer::Compute - iteration %d\n", count++);
+    }
+
+    for (int i = postorder_->length() - 1; i >= 0; --i) {
+      Node* node = postorder_->at(i);
+      if (node->IsBlockNode()) {
+        BlockNode* block = BlockNode::cast(node);
+        for (int j = 0; j < block->instructions()->length(); j++) {
+          Expression* expr = block->instructions()->at(j)->AsExpression();
+          if (expr != NULL) {
+            // For variable uses: Compute new type from reaching definitions.
+            VariableProxy* proxy = expr->AsVariableProxy();
+            if (proxy != NULL && proxy->reaching_definitions() != NULL) {
+              BitVector* rd = proxy->reaching_definitions();
+              bool prim_type = true;
+              // TODO(fsc): A sparse set representation of reaching
+              // definitions would speed up iterating here.
+              for (int k = 0; k < rd->length(); k++) {
+                if (rd->Contains(k) && !IsPrimitiveDef(k)) {
+                  prim_type = false;
+                  break;
+                }
+              }
+              // Reset changed flag if new type information was computed.
+              if (prim_type != proxy->IsPrimitive()) {
+                changed = true;
+                proxy->SetIsPrimitive(prim_type);
+              }
+            }
+          }
+        }
+      }
+    }
+  } while (changed);
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/data-flow.h b/src/data-flow.h
index 74a370c..9e7e6a4 100644
--- a/src/data-flow.h
+++ b/src/data-flow.h
@@ -305,6 +305,8 @@
 
   bool is_empty() { return instructions_.is_empty(); }
 
+  ZoneList<AstNode*>* instructions() { return &instructions_; }
+
   void AddPredecessor(Node* predecessor) {
     ASSERT(predecessor_ == NULL && predecessor != NULL);
     predecessor_ = predecessor;
@@ -470,7 +472,7 @@
             FlowGraph* body);
 
 #ifdef DEBUG
-  void PrintText(ZoneList<Node*>* postorder);
+  void PrintText(FunctionLiteral* fun, ZoneList<Node*>* postorder);
 #endif
 
  private:
@@ -485,23 +487,28 @@
 // traversal orders as a byproduct.
 class FlowGraphBuilder: public AstVisitor {
  public:
-  FlowGraphBuilder()
+  explicit FlowGraphBuilder(int variable_count)
       : graph_(FlowGraph::Empty()),
         global_exit_(NULL),
         preorder_(4),
         postorder_(4),
-        definitions_(4) {
+        variable_count_(variable_count),
+        body_definitions_(4) {
   }
 
   void Build(FunctionLiteral* lit);
 
   FlowGraph* graph() { return &graph_; }
   ZoneList<Node*>* postorder() { return &postorder_; }
-  ZoneList<Expression*>* definitions() { return &definitions_; }
+  ZoneList<Expression*>* body_definitions() { return &body_definitions_; }
 
  private:
   ExitNode* global_exit() { return global_exit_; }
 
+  // Helpers to allow tranforming the ast during flow graph construction.
+  void VisitStatements(ZoneList<Statement*>* stmts);
+  Statement* ProcessStatement(Statement* stmt);
+
   // AST node visit functions.
 #define DECLARE_VISIT(type) virtual void Visit##type(type* node);
   AST_NODE_LIST(DECLARE_VISIT)
@@ -512,11 +519,13 @@
   ZoneList<Node*> preorder_;
   ZoneList<Node*> postorder_;
 
-  // The flow graph builder collects a list of definitions (assignments and
-  // count operations) to stack-allocated variables to use for reaching
-  // definitions analysis.  AST node numbers in the AST are used to refer
-  // into this list.
-  ZoneList<Expression*> definitions_;
+  // The flow graph builder collects a list of explicit definitions
+  // (assignments and count operations) to stack-allocated variables to use
+  // for reaching definitions analysis.  It does not count the implicit
+  // definition at function entry.  AST node numbers in the AST are used to
+  // refer into this list.
+  int variable_count_;
+  ZoneList<Expression*> body_definitions_;
 
   DISALLOW_COPY_AND_ASSIGN(FlowGraphBuilder);
 };
@@ -589,15 +598,11 @@
 class ReachingDefinitions BASE_EMBEDDED {
  public:
   ReachingDefinitions(ZoneList<Node*>* postorder,
-                      ZoneList<Expression*>* definitions,
+                      ZoneList<Expression*>* body_definitions,
                       int variable_count)
       : postorder_(postorder),
-        definitions_(definitions),
-        variables_(variable_count) {
-    int definition_count = definitions->length();
-    for (int i = 0; i < variable_count; i++) {
-      variables_.Add(new BitVector(definition_count));
-    }
+        body_definitions_(body_definitions),
+        variable_count_(variable_count) {
   }
 
   static int IndexFor(Variable* var, int variable_count);
@@ -609,15 +614,39 @@
   ZoneList<Node*>* postorder_;
 
   // A list of all the definitions in the body.
-  ZoneList<Expression*>* definitions_;
+  ZoneList<Expression*>* body_definitions_;
 
-  // For each variable, the set of all its definitions.
-  List<BitVector*> variables_;
+  int variable_count_;
 
   DISALLOW_COPY_AND_ASSIGN(ReachingDefinitions);
 };
 
 
+
+class TypeAnalyzer BASE_EMBEDDED {
+ public:
+  TypeAnalyzer(ZoneList<Node*>* postorder,
+              ZoneList<Expression*>* body_definitions,
+               int variable_count,
+               int param_count)
+      : postorder_(postorder),
+        body_definitions_(body_definitions),
+        variable_count_(variable_count),
+        param_count_(param_count) {}
+
+  void Compute();
+
+ private:
+  // Get the primitity of definition number i. Definitions are numbered
+  // by the flow graph builder.
+  bool IsPrimitiveDef(int def_num);
+
+  ZoneList<Node*>* postorder_;
+  ZoneList<Expression*>* body_definitions_;
+  int variable_count_;
+  int param_count_;
+};
+
 } }  // namespace v8::internal
 
 
diff --git a/src/date-delay.js b/src/date.js
similarity index 98%
rename from src/date-delay.js
rename to src/date.js
index 6c27d69..c7c3940 100644
--- a/src/date-delay.js
+++ b/src/date.js
@@ -223,6 +223,10 @@
 }
 
 function LocalTimeNoCheck(time) {
+  if (time < -MAX_TIME_MS || time > MAX_TIME_MS) {
+    return $NaN;
+  }
+
   // Inline the DST offset cache checks for speed.
   var cache = DST_offset_cache;
   if (cache.start <= time && time <= cache.end) {
@@ -265,8 +269,7 @@
 
 function YearFromTime(t) {
   if (t !== ymd_from_time_cached_time) {
-    // Limits according to ECMA 262 15.9.1.1
-    if (!$isFinite(t) || t < -8640000000000000 || t > 8640000000000000) {
+    if (!$isFinite(t)) {
       return $NaN;
     }
 
@@ -279,8 +282,7 @@
 
 function MonthFromTime(t) {
   if (t !== ymd_from_time_cached_time) {
-    // Limits according to ECMA 262 15.9.1.1
-    if (!$isFinite(t) || t < -8640000000000000 || t > 8640000000000000) {
+    if (!$isFinite(t)) {
       return $NaN;
     }
     %DateYMDFromTime(t, ymd_from_time_cache);
@@ -292,8 +294,7 @@
 
 function DateFromTime(t) {
   if (t !== ymd_from_time_cached_time) {
-    // Limits according to ECMA 262 15.9.1.1
-    if (!$isFinite(t) || t < -8640000000000000 || t > 8640000000000000) {
+    if (!$isFinite(t)) {
       return $NaN;
     }
 
diff --git a/src/debug-delay.js b/src/debug-debugger.js
similarity index 100%
rename from src/debug-delay.js
rename to src/debug-debugger.js
diff --git a/src/debug.cc b/src/debug.cc
index 2a7a9c8..e5d42b9 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -685,29 +685,26 @@
   // Compile the script.
   bool allow_natives_syntax = FLAG_allow_natives_syntax;
   FLAG_allow_natives_syntax = true;
-  Handle<JSFunction> boilerplate;
-  boilerplate = Compiler::Compile(source_code,
-                                  script_name,
-                                  0,
-                                  0,
-                                  NULL,
-                                  NULL,
-                                  Handle<String>::null(),
-                                  NATIVES_CODE);
+  Handle<SharedFunctionInfo> function_info;
+  function_info = Compiler::Compile(source_code,
+                                    script_name,
+                                    0, 0, NULL, NULL,
+                                    Handle<String>::null(),
+                                    NATIVES_CODE);
   FLAG_allow_natives_syntax = allow_natives_syntax;
 
   // Silently ignore stack overflows during compilation.
-  if (boilerplate.is_null()) {
+  if (function_info.is_null()) {
     ASSERT(Top::has_pending_exception());
     Top::clear_pending_exception();
     return false;
   }
 
-  // Execute the boilerplate function in the debugger context.
+  // Execute the shared function in the debugger context.
   Handle<Context> context = Top::global_context();
   bool caught_exception = false;
   Handle<JSFunction> function =
-      Factory::NewFunctionFromBoilerplate(boilerplate, context);
+      Factory::NewFunctionFromSharedFunctionInfo(function_info, context);
   Handle<Object> result =
       Execution::TryCall(function, Handle<Object>(context->global()),
                          0, NULL, &caught_exception);
@@ -1685,7 +1682,7 @@
 
   // Perform two GCs to get rid of all unreferenced scripts. The first GC gets
   // rid of all the cached script wrappers and the second gets rid of the
-  // scripts which is no longer referenced.
+  // scripts which are no longer referenced.
   Heap::CollectAllGarbage(false);
   Heap::CollectAllGarbage(false);
 
@@ -1999,7 +1996,7 @@
   // If debugging there might be script break points registered for this
   // script. Make sure that these break points are set.
 
-  // Get the function UpdateScriptBreakPoints (defined in debug-delay.js).
+  // Get the function UpdateScriptBreakPoints (defined in debug-debugger.js).
   Handle<Object> update_script_break_points =
       Handle<Object>(Debug::debug_context()->global()->GetProperty(
           *Factory::LookupAsciiSymbol("UpdateScriptBreakPoints")));
@@ -2042,31 +2039,6 @@
 }
 
 
-void Debugger::OnNewFunction(Handle<JSFunction> function) {
-  return;
-  HandleScope scope;
-
-  // Bail out based on state or if there is no listener for this event
-  if (Debug::InDebugger()) return;
-  if (compiling_natives()) return;
-  if (!Debugger::EventActive(v8::NewFunction)) return;
-
-  // Enter the debugger.
-  EnterDebugger debugger;
-  if (debugger.FailedToEnter()) return;
-
-  // Create the event object.
-  bool caught_exception = false;
-  Handle<Object> event_data = MakeNewFunctionEvent(function, &caught_exception);
-  // Bail out and don't call debugger if exception.
-  if (caught_exception) {
-    return;
-  }
-  // Process debug event.
-  ProcessDebugEvent(v8::NewFunction, Handle<JSObject>::cast(event_data), true);
-}
-
-
 void Debugger::OnScriptCollected(int id) {
   HandleScope scope;
 
diff --git a/src/factory.cc b/src/factory.cc
index 45124e6..dbcb4ec 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -282,31 +282,26 @@
 }
 
 
-Handle<JSFunction> Factory::BaseNewFunctionFromBoilerplate(
-    Handle<JSFunction> boilerplate,
+Handle<JSFunction> Factory::BaseNewFunctionFromSharedFunctionInfo(
+    Handle<SharedFunctionInfo> function_info,
     Handle<Map> function_map,
     PretenureFlag pretenure) {
-  ASSERT(boilerplate->IsBoilerplate());
-  ASSERT(!boilerplate->has_initial_map());
-  ASSERT(!boilerplate->has_prototype());
-  ASSERT(boilerplate->properties() == Heap::empty_fixed_array());
-  ASSERT(boilerplate->elements() == Heap::empty_fixed_array());
   CALL_HEAP_FUNCTION(Heap::AllocateFunction(*function_map,
-                                            boilerplate->shared(),
+                                            *function_info,
                                             Heap::the_hole_value(),
                                             pretenure),
                      JSFunction);
 }
 
 
-Handle<JSFunction> Factory::NewFunctionFromBoilerplate(
-    Handle<JSFunction> boilerplate,
+Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
+    Handle<SharedFunctionInfo> function_info,
     Handle<Context> context,
     PretenureFlag pretenure) {
-  Handle<JSFunction> result = BaseNewFunctionFromBoilerplate(
-      boilerplate, Top::function_map(), pretenure);
+  Handle<JSFunction> result = BaseNewFunctionFromSharedFunctionInfo(
+      function_info, Top::function_map(), pretenure);
   result->set_context(*context);
-  int number_of_literals = boilerplate->NumberOfLiterals();
+  int number_of_literals = function_info->num_literals();
   Handle<FixedArray> literals =
       Factory::NewFixedArray(number_of_literals, pretenure);
   if (number_of_literals > 0) {
@@ -490,36 +485,6 @@
 }
 
 
-Handle<JSFunction> Factory::NewFunctionBoilerplate(Handle<String> name,
-                                                   int number_of_literals,
-                                                   Handle<Code> code) {
-  Handle<JSFunction> function = NewFunctionBoilerplate(name);
-  function->set_code(*code);
-  int literals_array_size = number_of_literals;
-  // If the function contains object, regexp or array literals,
-  // allocate extra space for a literals array prefix containing the
-  // object, regexp and array constructor functions.
-  if (number_of_literals > 0) {
-    literals_array_size += JSFunction::kLiteralsPrefixSize;
-  }
-  Handle<FixedArray> literals =
-      Factory::NewFixedArray(literals_array_size, TENURED);
-  function->set_literals(*literals);
-  ASSERT(!function->has_initial_map());
-  ASSERT(!function->has_prototype());
-  return function;
-}
-
-
-Handle<JSFunction> Factory::NewFunctionBoilerplate(Handle<String> name) {
-  Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name);
-  CALL_HEAP_FUNCTION(Heap::AllocateFunction(Heap::boilerplate_function_map(),
-                                            *shared,
-                                            Heap::the_hole_value()),
-                     JSFunction);
-}
-
-
 Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
                                                      InstanceType type,
                                                      int instance_size,
@@ -686,6 +651,22 @@
 }
 
 
+Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
+    Handle<String> name, int number_of_literals, Handle<Code> code) {
+  Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name);
+  shared->set_code(*code);
+  int literals_array_size = number_of_literals;
+  // If the function contains object, regexp or array literals,
+  // allocate extra space for a literals array prefix containing the
+  // context.
+  if (number_of_literals > 0) {
+    literals_array_size += JSFunction::kLiteralsPrefixSize;
+  }
+  shared->set_num_literals(literals_array_size);
+  return shared;
+}
+
+
 Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(Handle<String> name) {
   CALL_HEAP_FUNCTION(Heap::AllocateSharedFunctionInfo(*name),
                      SharedFunctionInfo);
diff --git a/src/factory.h b/src/factory.h
index cc96e11..4307289 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -218,8 +218,13 @@
 
   static Handle<JSFunction> NewFunction(Handle<Object> super, bool is_global);
 
-  static Handle<JSFunction> NewFunctionFromBoilerplate(
-      Handle<JSFunction> boilerplate,
+  static Handle<JSFunction> BaseNewFunctionFromSharedFunctionInfo(
+      Handle<SharedFunctionInfo> function_info,
+      Handle<Map> function_map,
+      PretenureFlag pretenure);
+
+  static Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
+      Handle<SharedFunctionInfo> function_info,
       Handle<Context> context,
       PretenureFlag pretenure = TENURED);
 
@@ -273,12 +278,6 @@
                                         Handle<Code> code,
                                         bool force_initial_map);
 
-  static Handle<JSFunction> NewFunctionBoilerplate(Handle<String> name,
-                                                   int number_of_literals,
-                                                   Handle<Code> code);
-
-  static Handle<JSFunction> NewFunctionBoilerplate(Handle<String> name);
-
   static Handle<JSFunction> NewFunction(Handle<Map> function_map,
       Handle<SharedFunctionInfo> shared, Handle<Object> prototype);
 
@@ -337,6 +336,8 @@
     return Handle<String>(&Heap::hidden_symbol_);
   }
 
+  static Handle<SharedFunctionInfo> NewSharedFunctionInfo(
+      Handle<String> name, int number_of_literals, Handle<Code> code);
   static Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name);
 
   static Handle<NumberDictionary> DictionaryAtNumberPut(
@@ -377,11 +378,6 @@
       Handle<DescriptorArray> array,
       Handle<Object> descriptors);
 
-  static Handle<JSFunction> BaseNewFunctionFromBoilerplate(
-      Handle<JSFunction> boilerplate,
-      Handle<Map> function_map,
-      PretenureFlag pretenure);
-
   // Create a new map cache.
   static Handle<MapCache> NewMapCache(int at_least_space_for);
 
diff --git a/src/fast-codegen.cc b/src/fast-codegen.cc
index 5d0b9c1..832cf74 100644
--- a/src/fast-codegen.cc
+++ b/src/fast-codegen.cc
@@ -195,9 +195,9 @@
 }
 
 
-void FastCodeGenSyntaxChecker::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* expr) {
-  BAILOUT("FunctionBoilerplateLiteral");
+void FastCodeGenSyntaxChecker::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
+  BAILOUT("SharedFunctionInfoLiteral");
 }
 
 
@@ -560,8 +560,8 @@
 }
 
 
-void FastCodeGenerator::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* expr) {
+void FastCodeGenerator::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
   UNREACHABLE();
 }
 
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 021362d..89c4ca4 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -122,9 +122,9 @@
 // bootstrapper.cc
 DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
 DEFINE_string(expose_debug_as, NULL, "expose debug in global object")
-DEFINE_string(natives_file, NULL, "alternative natives file")
 DEFINE_bool(expose_gc, false, "expose gc extension")
 DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture")
+DEFINE_bool(disable_native_files, false, "disable builtin natives files")
 
 // builtins-ia32.cc
 DEFINE_bool(inline_new, true, "use fast inline allocation")
@@ -160,6 +160,9 @@
 // compilation-cache.cc
 DEFINE_bool(compilation_cache, true, "enable compilation cache")
 
+// data-flow.cc
+DEFINE_bool(loop_peeling, false, "Peel off the first iteration of loops.")
+
 // debug.cc
 DEFINE_bool(remote_debugging, false, "enable remote debugging")
 DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
diff --git a/src/frames.cc b/src/frames.cc
index 24550a2..5e81a54 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -346,6 +346,7 @@
 
 void StackFrame::Cook() {
   Code* code = this->code();
+  ASSERT(code->IsCode());
   for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
     it.handler()->Cook(code);
   }
@@ -356,6 +357,7 @@
 
 void StackFrame::Uncook() {
   Code* code = this->code();
+  ASSERT(code->IsCode());
   for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
     it.handler()->Uncook(code);
   }
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 6e9a3ff..2d6deb3 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -212,9 +212,9 @@
 }
 
 
-void FullCodeGenSyntaxChecker::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* expr) {
-  BAILOUT("FunctionBoilerplateLiteral");
+void FullCodeGenSyntaxChecker::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
+  BAILOUT("SharedFunctionInfoLiteral");
 }
 
 
@@ -524,8 +524,8 @@
             array->set_undefined(j++);
           }
         } else {
-          Handle<JSFunction> function =
-              Compiler::BuildBoilerplate(decl->fun(), script(), this);
+          Handle<SharedFunctionInfo> function =
+              Compiler::BuildFunctionInfo(decl->fun(), script(), this);
           // Check for stack-overflow exception.
           if (HasStackOverflow()) return;
           array->set(j++, *function);
@@ -998,8 +998,8 @@
 }
 
 
-void FullCodeGenerator::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* expr) {
+void FullCodeGenerator::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
   UNREACHABLE();
 }
 
diff --git a/src/globals.h b/src/globals.h
index cb7f27e..b85e19d 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -174,6 +174,15 @@
 const int kBitsPerPointer = kPointerSize * kBitsPerByte;
 const int kBitsPerInt = kIntSize * kBitsPerByte;
 
+// IEEE 754 single precision floating point number bit layout.
+const uint32_t kBinary32SignMask = 0x80000000u;
+const uint32_t kBinary32ExponentMask = 0x7f800000u;
+const uint32_t kBinary32MantissaMask = 0x007fffffu;
+const int kBinary32ExponentBias = 127;
+const int kBinary32MaxExponent  = 0xFE;
+const int kBinary32MinExponent  = 0x01;
+const int kBinary32MantissaBits = 23;
+const int kBinary32ExponentShift = 23;
 
 // Zap-value: The value used for zapping dead objects.
 // Should be a recognizable hex value tagged as a heap object pointer.
@@ -195,6 +204,10 @@
 // gives 8K bytes per page.
 const int kPageSizeBits = 13;
 
+// On Intel architecture, cache line size is 64 bytes.
+// On ARM it may be less (32 bytes), but as far this constant is
+// used for aligning data, it doesn't hurt to align on a greater value.
+const int kProcessorCacheLineSize = 64;
 
 // Constants relevant to double precision floating point numbers.
 
@@ -321,7 +334,6 @@
 
 enum VisitMode { VISIT_ALL, VISIT_ALL_IN_SCAVENGE, VISIT_ONLY_STRONG };
 
-
 // Flag indicating whether code is built into the VM (one of the natives files).
 enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE };
 
@@ -404,7 +416,7 @@
 // Type of properties.
 // Order of properties is significant.
 // Must fit in the BitField PropertyDetails::TypeField.
-// A copy of this is in mirror-delay.js.
+// A copy of this is in mirror-debugger.js.
 enum PropertyType {
   NORMAL              = 0,  // only in slow mode
   FIELD               = 1,  // only in fast mode
diff --git a/src/handles.cc b/src/handles.cc
index 4ebeaa7..f8a679b 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -174,13 +174,6 @@
 }
 
 
-void SetExpectedNofPropertiesFromEstimate(Handle<JSFunction> func,
-                                          int estimate) {
-  SetExpectedNofProperties(
-      func, ExpectedNofPropertiesFromEstimate(estimate));
-}
-
-
 void NormalizeProperties(Handle<JSObject> object,
                          PropertyNormalizationMode mode,
                          int expected_additional_properties) {
@@ -243,6 +236,15 @@
 }
 
 
+Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
+                                     Handle<String> key,
+                                     Handle<Object> value,
+                                     PropertyDetails details) {
+  CALL_HEAP_FUNCTION(object->SetNormalizedProperty(*key, *value, details),
+                     Object);
+}
+
+
 Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
                                    Handle<Object> key) {
   CALL_HEAP_FUNCTION(Runtime::ForceDeleteObjectProperty(object, key), Object);
@@ -784,88 +786,4 @@
   }
 }
 
-
-void LoadLazy(Handle<JSObject> obj, bool* pending_exception) {
-  HandleScope scope;
-  Handle<FixedArray> info(FixedArray::cast(obj->map()->constructor()));
-  int index = Smi::cast(info->get(0))->value();
-  ASSERT(index >= 0);
-  Handle<Context> compile_context(Context::cast(info->get(1)));
-  Handle<Context> function_context(Context::cast(info->get(2)));
-  Handle<Object> receiver(compile_context->global()->builtins());
-
-  Vector<const char> name = Natives::GetScriptName(index);
-
-  Handle<JSFunction> boilerplate;
-
-  if (!Bootstrapper::NativesCacheLookup(name, &boilerplate)) {
-    Handle<String> source_code = Bootstrapper::NativesSourceLookup(index);
-    Handle<String> script_name = Factory::NewStringFromAscii(name);
-    bool allow_natives_syntax = FLAG_allow_natives_syntax;
-    FLAG_allow_natives_syntax = true;
-    boilerplate = Compiler::Compile(source_code, script_name, 0, 0, NULL, NULL,
-                                    Handle<String>::null(), NATIVES_CODE);
-    FLAG_allow_natives_syntax = allow_natives_syntax;
-    // If the compilation failed (possibly due to stack overflows), we
-    // should never enter the result in the natives cache. Instead we
-    // return from the function without marking the function as having
-    // been lazily loaded.
-    if (boilerplate.is_null()) {
-      *pending_exception = true;
-      return;
-    }
-    Bootstrapper::NativesCacheAdd(name, boilerplate);
-  }
-
-  // We shouldn't get here if compiling the script failed.
-  ASSERT(!boilerplate.is_null());
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  // When the debugger running in its own context touches lazy loaded
-  // functions loading can be triggered. In that case ensure that the
-  // execution of the boilerplate is in the correct context.
-  SaveContext save;
-  if (!Debug::debug_context().is_null() &&
-      Top::context() == *Debug::debug_context()) {
-    Top::set_context(*compile_context);
-  }
-#endif
-
-  // Reset the lazy load data before running the script to make sure
-  // not to get recursive lazy loading.
-  obj->map()->set_needs_loading(false);
-  obj->map()->set_constructor(info->get(3));
-
-  // Run the script.
-  Handle<JSFunction> script_fun(
-      Factory::NewFunctionFromBoilerplate(boilerplate, function_context));
-  Execution::Call(script_fun, receiver, 0, NULL, pending_exception);
-
-  // If lazy loading failed, restore the unloaded state of obj.
-  if (*pending_exception) {
-    obj->map()->set_needs_loading(true);
-    obj->map()->set_constructor(*info);
-  }
-}
-
-
-void SetupLazy(Handle<JSObject> obj,
-               int index,
-               Handle<Context> compile_context,
-               Handle<Context> function_context) {
-  Handle<FixedArray> arr = Factory::NewFixedArray(4);
-  arr->set(0, Smi::FromInt(index));
-  arr->set(1, *compile_context);  // Compile in this context
-  arr->set(2, *function_context);  // Set function context to this
-  arr->set(3, obj->map()->constructor());  // Remember the constructor
-  Handle<Map> old_map(obj->map());
-  Handle<Map> new_map = Factory::CopyMapDropTransitions(old_map);
-  obj->set_map(*new_map);
-  new_map->set_needs_loading(true);
-  // Store the lazy loading info in the constructor field.  We'll
-  // reestablish the constructor from the fixed array after loading.
-  new_map->set_constructor(*arr);
-  ASSERT(!obj->IsLoaded());
-}
-
 } }  // namespace v8::internal
diff --git a/src/handles.h b/src/handles.h
index f241da2..54c3b45 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -210,6 +210,11 @@
                                 Handle<Object> value,
                                 PropertyAttributes attributes);
 
+Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
+                                     Handle<String> key,
+                                     Handle<Object> value,
+                                     PropertyDetails details);
+
 Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
                                    Handle<Object> key);
 
@@ -307,8 +312,6 @@
 // Sets the expected number of properties based on estimate from compiler.
 void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
                                           int estimate);
-void SetExpectedNofPropertiesFromEstimate(Handle<JSFunction> func,
-                                          int estimate);
 
 
 Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
@@ -340,13 +343,6 @@
 // Returns the lazy compilation stub for argc arguments.
 Handle<Code> ComputeLazyCompile(int argc);
 
-// These deal with lazily loaded properties.
-void SetupLazy(Handle<JSObject> obj,
-               int index,
-               Handle<Context> compile_context,
-               Handle<Context> function_context);
-void LoadLazy(Handle<JSObject> obj, bool* pending_exception);
-
 class NoHandleAllocation BASE_EMBEDDED {
  public:
 #ifndef DEBUG
diff --git a/src/heap-inl.h b/src/heap-inl.h
index c4676fd..892c289 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -283,11 +283,11 @@
   const int length = str->length();
   Object* obj = str->TryFlatten();
   if (length <= kMaxAlwaysFlattenLength ||
-      unflattended_strings_length_ >= kFlattenLongThreshold) {
+      unflattened_strings_length_ >= kFlattenLongThreshold) {
     return obj;
   }
   if (obj->IsFailure()) {
-    unflattended_strings_length_ += length;
+    unflattened_strings_length_ += length;
   }
   return str;
 }
diff --git a/src/heap.cc b/src/heap.cc
index b477b45..13ffa29 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -98,6 +98,9 @@
 // set up by ConfigureHeap otherwise.
 int Heap::reserved_semispace_size_ = Heap::max_semispace_size_;
 
+List<Heap::GCPrologueCallbackPair> Heap::gc_prologue_callbacks_;
+List<Heap::GCEpilogueCallbackPair> Heap::gc_epilogue_callbacks_;
+
 GCCallback Heap::global_gc_prologue_callback_ = NULL;
 GCCallback Heap::global_gc_epilogue_callback_ = NULL;
 
@@ -114,7 +117,7 @@
 int Heap::mc_count_ = 0;
 int Heap::gc_count_ = 0;
 
-int Heap::unflattended_strings_length_ = 0;
+int Heap::unflattened_strings_length_ = 0;
 
 int Heap::always_allocate_scope_depth_ = 0;
 int Heap::linear_allocation_scope_depth_ = 0;
@@ -304,7 +307,7 @@
 void Heap::GarbageCollectionPrologue() {
   TranscendentalCache::Clear();
   gc_count_++;
-  unflattended_strings_length_ = 0;
+  unflattened_strings_length_ = 0;
 #ifdef DEBUG
   ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
   allow_allocation(false);
@@ -547,6 +550,16 @@
     GCTracer::ExternalScope scope(tracer);
     global_gc_prologue_callback_();
   }
+
+  GCType gc_type =
+      collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
+
+  for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
+    if (gc_type & gc_prologue_callbacks_[i].gc_type) {
+      gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
+    }
+  }
+
   EnsureFromSpaceIsCommitted();
 
   // Perform mark-sweep with optional compaction.
@@ -585,6 +598,15 @@
         amount_of_external_allocated_memory_;
   }
 
+  GCCallbackFlags callback_flags = tracer->is_compacting()
+      ? kGCCallbackFlagCompacted
+      : kNoGCCallbackFlags;
+  for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
+    if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
+      gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
+    }
+  }
+
   if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
     ASSERT(!allocation_allowed_);
     GCTracer::ExternalScope scope(tracer);
@@ -1269,7 +1291,7 @@
   if (obj->IsFailure()) return false;
   set_oddball_map(Map::cast(obj));
 
-  // Allocate the empty array
+  // Allocate the empty array.
   obj = AllocateEmptyFixedArray();
   if (obj->IsFailure()) return false;
   set_empty_fixed_array(FixedArray::cast(obj));
@@ -1415,7 +1437,8 @@
   if (obj->IsFailure()) return false;
   set_boilerplate_function_map(Map::cast(obj));
 
-  obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kSize);
+  obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
+                    SharedFunctionInfo::kAlignedSize);
   if (obj->IsFailure()) return false;
   set_shared_function_info_map(Map::cast(obj));
 
@@ -3786,6 +3809,46 @@
 #endif
 
 
+void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
+  ASSERT(callback != NULL);
+  GCPrologueCallbackPair pair(callback, gc_type);
+  ASSERT(!gc_prologue_callbacks_.Contains(pair));
+  return gc_prologue_callbacks_.Add(pair);
+}
+
+
+void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
+  ASSERT(callback != NULL);
+  for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
+    if (gc_prologue_callbacks_[i].callback == callback) {
+      gc_prologue_callbacks_.Remove(i);
+      return;
+    }
+  }
+  UNREACHABLE();
+}
+
+
+void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
+  ASSERT(callback != NULL);
+  GCEpilogueCallbackPair pair(callback, gc_type);
+  ASSERT(!gc_epilogue_callbacks_.Contains(pair));
+  return gc_epilogue_callbacks_.Add(pair);
+}
+
+
+void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
+  ASSERT(callback != NULL);
+  for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
+    if (gc_epilogue_callbacks_[i].callback == callback) {
+      gc_epilogue_callbacks_.Remove(i);
+      return;
+    }
+  }
+  UNREACHABLE();
+}
+
+
 #ifdef DEBUG
 
 class PrintHandleVisitor: public ObjectVisitor {
diff --git a/src/heap.h b/src/heap.h
index 1aee874..d37641c 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -108,6 +108,7 @@
   V(FixedArray, single_character_string_cache, SingleCharacterStringCache)     \
   V(FixedArray, natives_source_cache, NativesSourceCache)                      \
   V(Object, last_script_id, LastScriptId)                                      \
+  V(Script, empty_script, EmptyScript)                                         \
   V(Smi, real_stack_limit, RealStackLimit)                                     \
 
 #if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
@@ -673,10 +674,20 @@
   static bool GarbageCollectionGreedyCheck();
 #endif
 
+  static void AddGCPrologueCallback(
+      GCEpilogueCallback callback, GCType gc_type_filter);
+  static void RemoveGCPrologueCallback(GCEpilogueCallback callback);
+
+  static void AddGCEpilogueCallback(
+      GCEpilogueCallback callback, GCType gc_type_filter);
+  static void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
+
   static void SetGlobalGCPrologueCallback(GCCallback callback) {
+    ASSERT((callback == NULL) ^ (global_gc_prologue_callback_ == NULL));
     global_gc_prologue_callback_ = callback;
   }
   static void SetGlobalGCEpilogueCallback(GCCallback callback) {
+    ASSERT((callback == NULL) ^ (global_gc_epilogue_callback_ == NULL));
     global_gc_epilogue_callback_ = callback;
   }
 
@@ -758,6 +769,10 @@
     roots_[kNonMonomorphicCacheRootIndex] = value;
   }
 
+  static void public_set_empty_script(Script* script) {
+    roots_[kEmptyScriptRootIndex] = script;
+  }
+
   // Update the next script id.
   static inline void SetLastScriptId(Object* last_script_id);
 
@@ -965,7 +980,7 @@
   static int gc_count_;  // how many gc happened
 
   // Total length of the strings we failed to flatten since the last GC.
-  static int unflattended_strings_length_;
+  static int unflattened_strings_length_;
 
 #define ROOT_ACCESSOR(type, name, camel_name)                                  \
   static inline void set_##name(type* value) {                                 \
@@ -1041,6 +1056,30 @@
 
   // GC callback function, called before and after mark-compact GC.
   // Allocations in the callback function are disallowed.
+  struct GCPrologueCallbackPair {
+    GCPrologueCallbackPair(GCPrologueCallback callback, GCType gc_type)
+        : callback(callback), gc_type(gc_type) {
+    }
+    bool operator==(const GCPrologueCallbackPair& pair) const {
+      return pair.callback == callback;
+    }
+    GCPrologueCallback callback;
+    GCType gc_type;
+  };
+  static List<GCPrologueCallbackPair> gc_prologue_callbacks_;
+
+  struct GCEpilogueCallbackPair {
+    GCEpilogueCallbackPair(GCEpilogueCallback callback, GCType gc_type)
+        : callback(callback), gc_type(gc_type) {
+    }
+    bool operator==(const GCEpilogueCallbackPair& pair) const {
+      return pair.callback == callback;
+    }
+    GCEpilogueCallback callback;
+    GCType gc_type;
+  };
+  static List<GCEpilogueCallbackPair> gc_epilogue_callbacks_;
+
   static GCCallback global_gc_prologue_callback_;
   static GCCallback global_gc_epilogue_callback_;
 
@@ -1583,6 +1622,7 @@
 
   // Sets the flag that this is a compacting full GC.
   void set_is_compacting() { is_compacting_ = true; }
+  bool is_compacting() const { return is_compacting_; }
 
   // Increment and decrement the count of marked objects.
   void increment_marked_count() { ++marked_count_; }
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 67c9cc1..7c115c5 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -911,6 +911,7 @@
   // operand in register number. Returns operand as floating point number
   // on FPU stack.
   static void LoadFloatOperand(MacroAssembler* masm, Register number);
+
   // Code pattern for loading floating point values. Input values must
   // be either smi or heap number objects (fp values). Requirements:
   // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
@@ -929,6 +930,7 @@
   static void CheckFloatOperands(MacroAssembler* masm,
                                  Label* non_float,
                                  Register scratch);
+
   // Takes the operands in edx and eax and loads them as integers in eax
   // and ecx.
   static void LoadAsIntegers(MacroAssembler* masm,
@@ -947,6 +949,7 @@
   // into xmm0 and xmm1 if they are. Operands are in edx and eax.
   // Leaves operands unchanged.
   static void LoadSSE2Operands(MacroAssembler* masm);
+
   // Test if operands are numbers (smi or HeapNumber objects), and load
   // them into xmm0 and xmm1 if they are.  Jump to label not_numbers if
   // either operand is not a number.  Operands are in edx and eax.
@@ -2361,6 +2364,22 @@
 }
 
 
+// Convert from signed to unsigned comparison to match the way EFLAGS are set
+// by FPU and XMM compare instructions.
+static Condition DoubleCondition(Condition cc) {
+  switch (cc) {
+    case less:          return below;
+    case equal:         return equal;
+    case less_equal:    return below_equal;
+    case greater:       return above;
+    case greater_equal: return above_equal;
+    default:            UNREACHABLE();
+  }
+  UNREACHABLE();
+  return equal;
+}
+
+
 void CodeGenerator::Comparison(AstNode* node,
                                Condition cc,
                                bool strict,
@@ -2431,7 +2450,7 @@
         left_side = right_side;
         right_side = temp;
         cc = ReverseCondition(cc);
-        // This may reintroduce greater or less_equal as the value of cc.
+        // This may re-introduce greater or less_equal as the value of cc.
         // CompareStub and the inline code both support all values of cc.
       }
       // Implement comparison against a constant Smi, inlining the case
@@ -2453,11 +2472,11 @@
         __ test(left_side.reg(), Immediate(kSmiTagMask));
         is_smi.Branch(zero, taken);
 
-        bool is_for_loop_compare = (node->AsCompareOperation() != NULL)
-            && node->AsCompareOperation()->is_for_loop_condition();
-        if (!is_for_loop_compare
-            && CpuFeatures::IsSupported(SSE2)
-            && right_val->IsSmi()) {
+        bool is_loop_condition = (node->AsExpression() != NULL) &&
+            node->AsExpression()->is_loop_condition();
+        if (!is_loop_condition &&
+            CpuFeatures::IsSupported(SSE2) &&
+            right_val->IsSmi()) {
           // Right side is a constant smi and left side has been checked
           // not to be a smi.
           CpuFeatures::Scope use_sse2(SSE2);
@@ -2480,16 +2499,7 @@
           // Jump to builtin for NaN.
           not_number.Branch(parity_even, &left_side);
           left_side.Unuse();
-          Condition double_cc = cc;
-          switch (cc) {
-            case less:          double_cc = below;       break;
-            case equal:         double_cc = equal;       break;
-            case less_equal:    double_cc = below_equal; break;
-            case greater:       double_cc = above;       break;
-            case greater_equal: double_cc = above_equal; break;
-            default: UNREACHABLE();
-          }
-          dest->true_target()->Branch(double_cc);
+          dest->true_target()->Branch(DoubleCondition(cc));
           dest->false_target()->Jump();
           not_number.Bind(&left_side);
         }
@@ -2688,21 +2698,53 @@
       dest->Split(cc);
     }
   } else {
-    // Neither side is a constant Smi or null.
-    // If either side is a non-smi constant, skip the smi check.
+    // Neither side is a constant Smi, constant 1-char string or constant null.
+    // If either side is a non-smi constant, or known to be a heap number skip
+    // the smi check.
     bool known_non_smi =
         (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
-        (right_side.is_constant() && !right_side.handle()->IsSmi());
+        (right_side.is_constant() && !right_side.handle()->IsSmi()) ||
+        left_side.number_info().IsHeapNumber() ||
+        right_side.number_info().IsHeapNumber();
     NaNInformation nan_info =
         (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
         kBothCouldBeNaN :
         kCantBothBeNaN;
+
+    // Inline number comparison handling any combination of smi's and heap
+    // numbers if:
+    //   code is in a loop
+    //   the compare operation is different from equal
+    //   compare is not a for-loop comparison
+    // The reason for excluding equal is that it will most likely be done
+    // with smi's (not heap numbers) and the code to comparing smi's is inlined
+    // separately. The same reason applies for for-loop comparison which will
+    // also most likely be smi comparisons.
+    bool is_loop_condition = (node->AsExpression() != NULL)
+        && node->AsExpression()->is_loop_condition();
+    bool inline_number_compare =
+        loop_nesting() > 0 && cc != equal && !is_loop_condition;
+
+    // Left and right needed in registers for the following code.
     left_side.ToRegister();
     right_side.ToRegister();
 
     if (known_non_smi) {
-      // When non-smi, call out to the compare stub.
-      CompareStub stub(cc, strict, nan_info);
+      // Inline the equality check if both operands can't be a NaN. If both
+      // objects are the same they are equal.
+      if (nan_info == kCantBothBeNaN && cc == equal) {
+        __ cmp(left_side.reg(), Operand(right_side.reg()));
+        dest->true_target()->Branch(equal);
+      }
+
+      // Inline number comparison.
+      if (inline_number_compare) {
+        GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
+      }
+
+      // End of in-line compare, call out to the compare stub. Don't include
+      // number comparison in the stub if it was inlined.
+      CompareStub stub(cc, strict, nan_info, !inline_number_compare);
       Result answer = frame_->CallStub(&stub, &left_side, &right_side);
       if (cc == equal) {
         __ test(answer.reg(), Operand(answer.reg()));
@@ -2721,6 +2763,7 @@
       Register left_reg = left_side.reg();
       Register right_reg = right_side.reg();
 
+      // In-line check for comparing two smis.
       Result temp = allocator_->Allocate();
       ASSERT(temp.is_valid());
       __ mov(temp.reg(), left_side.reg());
@@ -2728,8 +2771,22 @@
       __ test(temp.reg(), Immediate(kSmiTagMask));
       temp.Unuse();
       is_smi.Branch(zero, taken);
-      // When non-smi, call out to the compare stub.
-      CompareStub stub(cc, strict, nan_info);
+
+      // Inline the equality check if both operands can't be a NaN. If both
+      // objects are the same they are equal.
+      if (nan_info == kCantBothBeNaN && cc == equal) {
+        __ cmp(left_side.reg(), Operand(right_side.reg()));
+        dest->true_target()->Branch(equal);
+      }
+
+      // Inline number comparison.
+      if (inline_number_compare) {
+        GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
+      }
+
+      // End of in-line compare, call out to the compare stub. Don't include
+      // number comparison in the stub if it was inlined.
+      CompareStub stub(cc, strict, nan_info, !inline_number_compare);
       Result answer = frame_->CallStub(&stub, &left_side, &right_side);
       if (cc == equal) {
         __ test(answer.reg(), Operand(answer.reg()));
@@ -2752,6 +2809,150 @@
 }
 
 
+// Check that the comparison operand is a number. Jump to not_numbers jump
+// target passing the left and right result if the operand is not a number.
+static void CheckComparisonOperand(MacroAssembler* masm_,
+                                   Result* operand,
+                                   Result* left_side,
+                                   Result* right_side,
+                                   JumpTarget* not_numbers) {
+  // Perform check if operand is not known to be a number.
+  if (!operand->number_info().IsNumber()) {
+    Label done;
+    __ test(operand->reg(), Immediate(kSmiTagMask));
+    __ j(zero, &done);
+    __ cmp(FieldOperand(operand->reg(), HeapObject::kMapOffset),
+           Immediate(Factory::heap_number_map()));
+    not_numbers->Branch(not_equal, left_side, right_side, not_taken);
+    __ bind(&done);
+  }
+}
+
+
+// Load a comparison operand to the FPU stack. This assumes that the operand has
+// already been checked and is a number.
+static void LoadComparisonOperand(MacroAssembler* masm_,
+                                  Result* operand,
+                                  Result* left_side,
+                                  Result* right_side) {
+  Label done;
+  if (operand->number_info().IsHeapNumber()) {
+    // Operand is known to be a heap number, just load it.
+    __ fld_d(FieldOperand(operand->reg(), HeapNumber::kValueOffset));
+  } else if (operand->number_info().IsSmi()) {
+    // Operand is known to be a smi. Convert it to double and keep the original
+    // smi.
+    __ SmiUntag(operand->reg());
+    __ push(operand->reg());
+    __ fild_s(Operand(esp, 0));
+    __ pop(operand->reg());
+    __ SmiTag(operand->reg());
+  } else {
+    // Operand type not known, check for smi otherwise assume heap number.
+    Label smi;
+    __ test(operand->reg(), Immediate(kSmiTagMask));
+    __ j(zero, &smi);
+    __ fld_d(FieldOperand(operand->reg(), HeapNumber::kValueOffset));
+    __ jmp(&done);
+    __ bind(&smi);
+    __ SmiUntag(operand->reg());
+    __ push(operand->reg());
+    __ fild_s(Operand(esp, 0));
+    __ pop(operand->reg());
+    __ SmiTag(operand->reg());
+    __ jmp(&done);
+  }
+  __ bind(&done);
+}
+
+
+// Load a comparison operand into into a XMM register. Jump to not_numbers jump
+// target passing the left and right result if the operand is not a number.
+static void LoadComparisonOperandSSE2(MacroAssembler* masm_,
+                                      Result* operand,
+                                      XMMRegister reg,
+                                      Result* left_side,
+                                      Result* right_side,
+                                      JumpTarget* not_numbers) {
+  Label done;
+  if (operand->number_info().IsHeapNumber()) {
+    // Operand is known to be a heap number, just load it.
+    __ movdbl(reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
+  } else if (operand->number_info().IsSmi()) {
+    // Operand is known to be a smi. Convert it to double and keep the original
+    // smi.
+    __ SmiUntag(operand->reg());
+    __ cvtsi2sd(reg, Operand(operand->reg()));
+    __ SmiTag(left_side->reg());
+  } else {
+    // Operand type not known, check for smi or heap number.
+    Label smi;
+    __ test(operand->reg(), Immediate(kSmiTagMask));
+    __ j(zero, &smi);
+    if (!operand->number_info().IsNumber()) {
+      __ cmp(FieldOperand(operand->reg(), HeapObject::kMapOffset),
+             Immediate(Factory::heap_number_map()));
+      not_numbers->Branch(not_equal, left_side, right_side, taken);
+    }
+    __ movdbl(reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
+    __ jmp(&done);
+
+    __ bind(&smi);
+    // Comvert smi to float and keep the original smi.
+    __ SmiUntag(operand->reg());
+    __ cvtsi2sd(reg, Operand(operand->reg()));
+    __ SmiTag(operand->reg());
+    __ jmp(&done);
+  }
+  __ bind(&done);
+}
+
+
+void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
+                                                   Result* right_side,
+                                                   Condition cc,
+                                                   ControlDestination* dest) {
+  ASSERT(left_side->is_register());
+  ASSERT(right_side->is_register());
+
+  JumpTarget not_numbers;
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope use_sse2(SSE2);
+
+    // Load left and right operand into registers xmm0 and xmm1 and compare.
+    LoadComparisonOperandSSE2(masm_, left_side, xmm0, left_side, right_side,
+                              &not_numbers);
+    LoadComparisonOperandSSE2(masm_, right_side, xmm1, left_side, right_side,
+                              &not_numbers);
+    __ comisd(xmm0, xmm1);
+  } else {
+    Label check_right, compare;
+
+    // Make sure that both comparison operands are numbers.
+    CheckComparisonOperand(masm_, left_side, left_side, right_side,
+                           &not_numbers);
+    CheckComparisonOperand(masm_, right_side, left_side, right_side,
+                           &not_numbers);
+
+    // Load right and left operand to FPU stack and compare.
+    LoadComparisonOperand(masm_, right_side, left_side, right_side);
+    LoadComparisonOperand(masm_, left_side, left_side, right_side);
+    __ FCmp();
+  }
+
+  // Bail out if a NaN is involved.
+  not_numbers.Branch(parity_even, left_side, right_side, not_taken);
+
+  // Split to destination targets based on comparison.
+  left_side->Unuse();
+  right_side->Unuse();
+  dest->true_target()->Branch(DoubleCondition(cc));
+  dest->false_target()->Jump();
+
+  not_numbers.Bind(left_side, right_side);
+}
+
+
 // Call the function just below TOS on the stack with the given
 // arguments. The receiver is the TOS.
 void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
@@ -4421,9 +4622,8 @@
 }
 
 
-Result CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
-  ASSERT(boilerplate->IsBoilerplate());
-
+Result CodeGenerator::InstantiateFunction(
+    Handle<SharedFunctionInfo> function_info) {
   // The inevitable call will sync frame elements to memory anyway, so
   // we do it eagerly to allow us to push the arguments directly into
   // place.
@@ -4431,15 +4631,15 @@
 
   // Use the fast case closure allocation code that allocates in new
   // space for nested functions that don't need literals cloning.
-  if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) {
+  if (scope()->is_function_scope() && function_info->num_literals() == 0) {
     FastNewClosureStub stub;
-    frame()->EmitPush(Immediate(boilerplate));
+    frame()->EmitPush(Immediate(function_info));
     return frame()->CallStub(&stub, 1);
   } else {
     // Call the runtime to instantiate the function boilerplate
     // object.
     frame()->EmitPush(esi);
-    frame()->EmitPush(Immediate(boilerplate));
+    frame()->EmitPush(Immediate(function_info));
     return frame()->CallRuntime(Runtime::kNewClosure, 2);
   }
 }
@@ -4448,21 +4648,21 @@
 void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
   Comment cmnt(masm_, "[ FunctionLiteral");
   ASSERT(!in_safe_int32_mode());
-  // Build the function boilerplate and instantiate it.
-  Handle<JSFunction> boilerplate =
-      Compiler::BuildBoilerplate(node, script(), this);
+  // Build the function info and instantiate it.
+  Handle<SharedFunctionInfo> function_info =
+      Compiler::BuildFunctionInfo(node, script(), this);
   // Check for stack-overflow exception.
   if (HasStackOverflow()) return;
-  Result result = InstantiateBoilerplate(boilerplate);
+  Result result = InstantiateFunction(function_info);
   frame()->Push(&result);
 }
 
 
-void CodeGenerator::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* node) {
+void CodeGenerator::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* node) {
   ASSERT(!in_safe_int32_mode());
-  Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
-  Result result = InstantiateBoilerplate(node->boilerplate());
+  Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
+  Result result = InstantiateFunction(node->shared_function_info());
   frame()->Push(&result);
 }
 
@@ -7023,15 +7223,39 @@
     case Token::BIT_OR:
     case Token::BIT_XOR:
     case Token::BIT_AND:
-      left.ToRegister();
-      right.ToRegister();
-      if (op == Token::BIT_OR) {
-        __ or_(left.reg(), Operand(right.reg()));
-      } else if (op == Token::BIT_XOR) {
-        __ xor_(left.reg(), Operand(right.reg()));
+      if (left.is_constant() || right.is_constant()) {
+        int32_t value;  // Put constant in value, non-constant in left.
+        // Constants are known to be int32 values, from static analysis,
+        // or else will be converted to int32 by implicit ECMA [[ToInt32]].
+        if (left.is_constant()) {
+          ASSERT(left.handle()->IsSmi() || left.handle()->IsHeapNumber());
+          value = NumberToInt32(*left.handle());
+          left = right;
+        } else {
+          ASSERT(right.handle()->IsSmi() || right.handle()->IsHeapNumber());
+          value = NumberToInt32(*right.handle());
+        }
+
+        left.ToRegister();
+        if (op == Token::BIT_OR) {
+          __ or_(Operand(left.reg()), Immediate(value));
+        } else if (op == Token::BIT_XOR) {
+          __ xor_(Operand(left.reg()), Immediate(value));
+        } else {
+          ASSERT(op == Token::BIT_AND);
+          __ and_(Operand(left.reg()), Immediate(value));
+        }
       } else {
-        ASSERT(op == Token::BIT_AND);
-        __ and_(left.reg(), Operand(right.reg()));
+        ASSERT(left.is_register());
+        ASSERT(right.is_register());
+        if (op == Token::BIT_OR) {
+          __ or_(left.reg(), Operand(right.reg()));
+        } else if (op == Token::BIT_XOR) {
+          __ xor_(left.reg(), Operand(right.reg()));
+        } else {
+          ASSERT(op == Token::BIT_AND);
+          __ and_(left.reg(), Operand(right.reg()));
+        }
       }
       frame_->Push(&left);
       right.Unuse();
@@ -7090,16 +7314,39 @@
     case Token::ADD:
     case Token::SUB:
     case Token::MUL:
-      left.ToRegister();
-      right.ToRegister();
-      if (op == Token::ADD) {
-        __ add(left.reg(), Operand(right.reg()));
-      } else if (op == Token::SUB) {
-        __ sub(left.reg(), Operand(right.reg()));
+      if ((left.is_constant() && op != Token::SUB) || right.is_constant()) {
+        int32_t value;  // Put constant in value, non-constant in left.
+        if (right.is_constant()) {
+          ASSERT(right.handle()->IsSmi() || right.handle()->IsHeapNumber());
+          value = NumberToInt32(*right.handle());
+        } else {
+          ASSERT(left.handle()->IsSmi() || left.handle()->IsHeapNumber());
+          value = NumberToInt32(*left.handle());
+          left = right;
+        }
+
+        left.ToRegister();
+        if (op == Token::ADD) {
+          __ add(Operand(left.reg()), Immediate(value));
+        } else if (op == Token::SUB) {
+          __ sub(Operand(left.reg()), Immediate(value));
+        } else {
+          ASSERT(op == Token::MUL);
+          __ imul(left.reg(), left.reg(), value);
+        }
       } else {
-        ASSERT(op == Token::MUL);
-        // We have statically verified that a negative zero can be ignored.
-        __ imul(left.reg(), Operand(right.reg()));
+        left.ToRegister();
+        ASSERT(left.is_register());
+        ASSERT(right.is_register());
+        if (op == Token::ADD) {
+          __ add(left.reg(), Operand(right.reg()));
+        } else if (op == Token::SUB) {
+          __ sub(left.reg(), Operand(right.reg()));
+        } else {
+          ASSERT(op == Token::MUL);
+          // We have statically verified that a negative zero can be ignored.
+          __ imul(left.reg(), Operand(right.reg()));
+        }
       }
       right.Unuse();
       frame_->Push(&left);
@@ -7133,6 +7380,15 @@
       __ cdq();  // Sign-extend eax into edx:eax
       __ idiv(right_reg);
       if (op == Token::MOD) {
+        // Negative zero can arise as a negative divident with a zero result.
+        if (!node->no_negative_zero()) {
+          Label not_negative_zero;
+          __ test(edx, Operand(edx));
+          __ j(not_zero, &not_negative_zero);
+          __ test(eax, Operand(eax));
+          unsafe_bailout_->Branch(negative);
+          __ bind(&not_negative_zero);
+        }
         Result edx_result(edx, NumberInfo::Integer32());
         edx_result.set_untagged_int32(true);
         frame_->Push(&edx_result);
@@ -8205,12 +8461,12 @@
 
 
 void FastNewClosureStub::Generate(MacroAssembler* masm) {
-  // Clone the boilerplate in new space. Set the context to the
-  // current context in esi.
+  // Create a new closure from the given function info in new
+  // space. Set the context to the current context in esi.
   Label gc;
   __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
 
-  // Get the boilerplate function from the stack.
+  // Get the function info from the stack.
   __ mov(edx, Operand(esp, 1 * kPointerSize));
 
   // Compute the function map in the current global context and set that
@@ -8220,18 +8476,16 @@
   __ mov(ecx, Operand(ecx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
   __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
 
-  // Clone the rest of the boilerplate fields. We don't have to update
-  // the write barrier because the allocated object is in new space.
-  for (int offset = kPointerSize;
-       offset < JSFunction::kSize;
-       offset += kPointerSize) {
-    if (offset == JSFunction::kContextOffset) {
-      __ mov(FieldOperand(eax, offset), esi);
-    } else {
-      __ mov(ebx, FieldOperand(edx, offset));
-      __ mov(FieldOperand(eax, offset), ebx);
-    }
-  }
+  // Initialize the rest of the function. We don't have to update the
+  // write barrier because the allocated object is in new space.
+  __ mov(ebx, Immediate(Factory::empty_fixed_array()));
+  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx);
+  __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
+  __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset),
+         Immediate(Factory::the_hole_value()));
+  __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
+  __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
+  __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
 
   // Return and remove the on-stack parameter.
   __ ret(1 * kPointerSize);
@@ -10824,63 +11078,70 @@
   __ push(edx);
   __ push(ecx);
 
-  // Inlined floating point compare.
-  // Call builtin if operands are not floating point or smi.
-  Label check_for_symbols;
-  Label unordered;
-  if (CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope use_sse2(SSE2);
-    CpuFeatures::Scope use_cmov(CMOV);
+  // Generate the number comparison code.
+  if (include_number_compare_) {
+    Label non_number_comparison;
+    Label unordered;
+    if (CpuFeatures::IsSupported(SSE2)) {
+      CpuFeatures::Scope use_sse2(SSE2);
+      CpuFeatures::Scope use_cmov(CMOV);
 
-    FloatingPointHelper::LoadSSE2Operands(masm, &check_for_symbols);
-    __ comisd(xmm0, xmm1);
+      FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
+      __ comisd(xmm0, xmm1);
 
-    // Jump to builtin for NaN.
-    __ j(parity_even, &unordered, not_taken);
-    __ mov(eax, 0);  // equal
-    __ mov(ecx, Immediate(Smi::FromInt(1)));
-    __ cmov(above, eax, Operand(ecx));
-    __ mov(ecx, Immediate(Smi::FromInt(-1)));
-    __ cmov(below, eax, Operand(ecx));
-    __ ret(2 * kPointerSize);
-  } else {
-    FloatingPointHelper::CheckFloatOperands(masm, &check_for_symbols, ebx);
-    FloatingPointHelper::LoadFloatOperands(masm, ecx);
-    __ FCmp();
+      // Don't base result on EFLAGS when a NaN is involved.
+      __ j(parity_even, &unordered, not_taken);
+      // Return a result of -1, 0, or 1, based on EFLAGS.
+      __ mov(eax, 0);  // equal
+      __ mov(ecx, Immediate(Smi::FromInt(1)));
+      __ cmov(above, eax, Operand(ecx));
+      __ mov(ecx, Immediate(Smi::FromInt(-1)));
+      __ cmov(below, eax, Operand(ecx));
+      __ ret(2 * kPointerSize);
+    } else {
+      FloatingPointHelper::CheckFloatOperands(
+          masm, &non_number_comparison, ebx);
+      FloatingPointHelper::LoadFloatOperands(masm, ecx);
+      __ FCmp();
 
-    // Jump to builtin for NaN.
-    __ j(parity_even, &unordered, not_taken);
+      // Don't base result on EFLAGS when a NaN is involved.
+      __ j(parity_even, &unordered, not_taken);
 
-    Label below_lbl, above_lbl;
-    // Return a result of -1, 0, or 1, to indicate result of comparison.
-    __ j(below, &below_lbl, not_taken);
-    __ j(above, &above_lbl, not_taken);
+      Label below_label, above_label;
+      // Return a result of -1, 0, or 1, based on EFLAGS. In all cases remove
+      // two arguments from the stack as they have been pushed in preparation
+      // of a possible runtime call.
+      __ j(below, &below_label, not_taken);
+      __ j(above, &above_label, not_taken);
 
-    __ xor_(eax, Operand(eax));  // equal
-    // Both arguments were pushed in case a runtime call was needed.
-    __ ret(2 * kPointerSize);
+      __ xor_(eax, Operand(eax));
+      __ ret(2 * kPointerSize);
 
-    __ bind(&below_lbl);
-    __ mov(eax, Immediate(Smi::FromInt(-1)));
-    __ ret(2 * kPointerSize);
+      __ bind(&below_label);
+      __ mov(eax, Immediate(Smi::FromInt(-1)));
+      __ ret(2 * kPointerSize);
 
-    __ bind(&above_lbl);
-    __ mov(eax, Immediate(Smi::FromInt(1)));
+      __ bind(&above_label);
+      __ mov(eax, Immediate(Smi::FromInt(1)));
+      __ ret(2 * kPointerSize);
+    }
+
+    // If one of the numbers was NaN, then the result is always false.
+    // The cc is never not-equal.
+    __ bind(&unordered);
+    ASSERT(cc_ != not_equal);
+    if (cc_ == less || cc_ == less_equal) {
+      __ mov(eax, Immediate(Smi::FromInt(1)));
+    } else {
+      __ mov(eax, Immediate(Smi::FromInt(-1)));
+    }
     __ ret(2 * kPointerSize);  // eax, edx were pushed
+
+    // The number comparison code did not provide a valid result.
+    __ bind(&non_number_comparison);
   }
-  // If one of the numbers was NaN, then the result is always false.
-  // The cc is never not-equal.
-  __ bind(&unordered);
-  ASSERT(cc_ != not_equal);
-  if (cc_ == less || cc_ == less_equal) {
-    __ mov(eax, Immediate(Smi::FromInt(1)));
-  } else {
-    __ mov(eax, Immediate(Smi::FromInt(-1)));
-  }
-  __ ret(2 * kPointerSize);  // eax, edx were pushed
 
   // Fast negative check for symbol-to-symbol equality.
-  __ bind(&check_for_symbols);
   Label check_for_strings;
   if (cc_ == equal) {
     BranchIfNonSymbol(masm, &check_for_strings, eax, ecx);
@@ -11490,55 +11751,59 @@
 }
 
 
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-const char* CompareStub::GetName() {
-  switch (cc_) {
-    case less: return "CompareStub_LT";
-    case greater: return "CompareStub_GT";
-    case less_equal: return "CompareStub_LE";
-    case greater_equal: return "CompareStub_GE";
-    case not_equal: {
-      if (strict_) {
-        if (never_nan_nan_) {
-          return "CompareStub_NE_STRICT_NO_NAN";
-        } else {
-          return "CompareStub_NE_STRICT";
-        }
-      } else {
-        if (never_nan_nan_) {
-          return "CompareStub_NE_NO_NAN";
-        } else {
-          return "CompareStub_NE";
-        }
-      }
-    }
-    case equal: {
-      if (strict_) {
-        if (never_nan_nan_) {
-          return "CompareStub_EQ_STRICT_NO_NAN";
-        } else {
-          return "CompareStub_EQ_STRICT";
-        }
-      } else {
-        if (never_nan_nan_) {
-          return "CompareStub_EQ_NO_NAN";
-        } else {
-          return "CompareStub_EQ";
-        }
-      }
-    }
-    default: return "CompareStub";
-  }
+int CompareStub::MinorKey() {
+  // Encode the three parameters in a unique 16 bit value. To avoid duplicate
+  // stubs the never NaN NaN condition is only taken into account if the
+  // condition is equals.
+  ASSERT(static_cast<unsigned>(cc_) < (1 << 13));
+  return ConditionField::encode(static_cast<unsigned>(cc_))
+         | StrictField::encode(strict_)
+         | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
+         | IncludeNumberCompareField::encode(include_number_compare_);
 }
 
 
-int CompareStub::MinorKey() {
-  // Encode the three parameters in a unique 16 bit value.
-  ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
-  int nnn_value = (never_nan_nan_ ? 2 : 0);
-  if (cc_ != equal) nnn_value = 0;  // Avoid duplicate stubs.
-  return (static_cast<unsigned>(cc_) << 2) | nnn_value | (strict_ ? 1 : 0);
+// Unfortunately you have to run without snapshots to see most of these
+// names in the profile since most compare stubs end up in the snapshot.
+const char* CompareStub::GetName() {
+  if (name_ != NULL) return name_;
+  const int kMaxNameLength = 100;
+  name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+  if (name_ == NULL) return "OOM";
+
+  const char* cc_name;
+  switch (cc_) {
+    case less: cc_name = "LT"; break;
+    case greater: cc_name = "GT"; break;
+    case less_equal: cc_name = "LE"; break;
+    case greater_equal: cc_name = "GE"; break;
+    case equal: cc_name = "EQ"; break;
+    case not_equal: cc_name = "NE"; break;
+    default: cc_name = "UnknownCondition"; break;
+  }
+
+  const char* strict_name = "";
+  if (strict_ && (cc_ == equal || cc_ == not_equal)) {
+    strict_name = "_STRICT";
+  }
+
+  const char* never_nan_nan_name = "";
+  if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
+    never_nan_nan_name = "_NO_NAN";
+  }
+
+  const char* include_number_compare_name = "";
+  if (!include_number_compare_) {
+    include_number_compare_name = "_NO_NUMBER";
+  }
+
+  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+               "CompareStub_%s%s%s%s",
+               cc_name,
+               strict_name,
+               never_nan_nan_name,
+               include_number_compare_name);
+  return name_;
 }
 
 
@@ -12172,6 +12437,9 @@
   Label result_not_equal;
   Label result_greater;
   Label compare_lengths;
+
+  __ IncrementCounter(&Counters::string_compare_native, 1);
+
   // Find minimum length.
   Label left_shorter;
   __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
@@ -12269,7 +12537,6 @@
   __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
 
   // Compare flat ascii strings.
-  __ IncrementCounter(&Counters::string_compare_native, 1);
   GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
 
   // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index c7ff2e8..e661a41 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -528,6 +528,10 @@
                   Condition cc,
                   bool strict,
                   ControlDestination* destination);
+  void GenerateInlineNumberComparison(Result* left_side,
+                                      Result* right_side,
+                                      Condition cc,
+                                      ControlDestination* dest);
 
   // To prevent long attacker-controlled byte sequences, integer constants
   // from the JavaScript source are loaded in two parts if they are larger
@@ -574,8 +578,8 @@
   // name/value pairs.
   void DeclareGlobals(Handle<FixedArray> pairs);
 
-  // Instantiate the function boilerplate.
-  Result InstantiateBoilerplate(Handle<JSFunction> boilerplate);
+  // Instantiate the function based on the shared function info.
+  Result InstantiateFunction(Handle<SharedFunctionInfo> function_info);
 
   // Support for type checks.
   void GenerateIsSmi(ZoneList<Expression*>* args);
diff --git a/src/ia32/fast-codegen-ia32.cc b/src/ia32/fast-codegen-ia32.cc
index 01190a5..61e2b5e 100644
--- a/src/ia32/fast-codegen-ia32.cc
+++ b/src/ia32/fast-codegen-ia32.cc
@@ -195,9 +195,9 @@
 }
 
 
-void FastCodeGenSyntaxChecker::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* expr) {
-  BAILOUT("FunctionBoilerplateLiteral");
+void FastCodeGenSyntaxChecker::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
+  BAILOUT("SharedFunctionInfoLiteral");
 }
 
 
@@ -764,8 +764,8 @@
 }
 
 
-void FastCodeGenerator::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* expr) {
+void FastCodeGenerator::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
   UNREACHABLE();
 }
 
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index cedf9c9..3cc56e1 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -777,15 +777,13 @@
   Comment cmnt(masm_, "[ FunctionLiteral");
 
   // Build the function boilerplate and instantiate it.
-  Handle<JSFunction> boilerplate =
-      Compiler::BuildBoilerplate(expr, script(), this);
+  Handle<SharedFunctionInfo> function_info =
+      Compiler::BuildFunctionInfo(expr, script(), this);
   if (HasStackOverflow()) return;
 
-  ASSERT(boilerplate->IsBoilerplate());
-
   // Create a new closure.
   __ push(esi);
-  __ push(Immediate(boilerplate));
+  __ push(Immediate(function_info));
   __ CallRuntime(Runtime::kNewClosure, 2);
   Apply(context_, eax);
 }
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 3928661..8d6c346 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -254,23 +254,6 @@
 }
 
 
-// Helper function used to check that a value is either not an object
-// or is loaded if it is an object.
-static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss,
-                                           Register value, Register scratch) {
-  Label done;
-  // Check if the value is a Smi.
-  __ test(value, Immediate(kSmiTagMask));
-  __ j(zero, &done, not_taken);
-  // Check if the object has been loaded.
-  __ mov(scratch, FieldOperand(value, JSFunction::kMapOffset));
-  __ mov(scratch, FieldOperand(scratch, Map::kBitField2Offset));
-  __ test(scratch, Immediate(1 << Map::kNeedsLoading));
-  __ j(not_zero, miss, not_taken);
-  __ bind(&done);
-}
-
-
 // The offset from the inlined patch site to the start of the
 // inlined load instruction.  It is 7 bytes (test eax, imm) plus
 // 6 bytes (jne slow_label).
@@ -495,7 +478,6 @@
                          ecx,
                          edi,
                          DICTIONARY_CHECK_DONE);
-  GenerateCheckNonObjectOrLoaded(masm, &slow, ecx, ebx);
   __ mov(eax, ecx);
   __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
   __ ret(0);
@@ -1146,11 +1128,6 @@
   __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
   __ j(not_equal, miss, not_taken);
 
-  // Check that the function has been loaded.  eax holds function's map.
-  __ mov(eax, FieldOperand(eax, Map::kBitField2Offset));
-  __ test(eax, Immediate(1 << Map::kNeedsLoading));
-  __ j(not_zero, miss, not_taken);
-
   // Patch the receiver on stack with the global proxy if necessary.
   if (is_global_object) {
     __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
@@ -1341,7 +1318,6 @@
                          edi,
                          ebx,
                          CHECK_DICTIONARY);
-  GenerateCheckNonObjectOrLoaded(masm, &miss, edi, edx);
   __ mov(eax, edi);
   __ ret(0);
 
diff --git a/src/ic.cc b/src/ic.cc
index 2b97a8b..a6d2020 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -436,7 +436,7 @@
   }
 
   // Lookup is valid: Update inline cache and stub cache.
-  if (FLAG_use_ic && lookup.IsLoaded()) {
+  if (FLAG_use_ic) {
     UpdateCaches(&lookup, state, object, name);
   }
 
@@ -484,7 +484,6 @@
                           State state,
                           Handle<Object> object,
                           Handle<String> name) {
-  ASSERT(lookup->IsLoaded());
   // Bail out if we didn't find a result.
   if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
 
@@ -647,7 +646,6 @@
       FLAG_use_ic &&
       state == PREMONOMORPHIC &&
       lookup.IsProperty() &&
-      lookup.IsLoaded() &&
       lookup.IsCacheable() &&
       lookup.holder() == *object &&
       lookup.type() == FIELD &&
@@ -669,7 +667,7 @@
   }
 
   // Update inline cache and stub cache.
-  if (FLAG_use_ic && lookup.IsLoaded()) {
+  if (FLAG_use_ic) {
     UpdateCaches(&lookup, state, object, name);
   }
 
@@ -695,7 +693,6 @@
                           State state,
                           Handle<Object> object,
                           Handle<String> name) {
-  ASSERT(lookup->IsLoaded());
   // Bail out if we didn't find a result.
   if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
 
@@ -857,7 +854,7 @@
       }
     }
 
-    if (FLAG_use_ic && lookup.IsLoaded()) {
+    if (FLAG_use_ic) {
       UpdateCaches(&lookup, state, object, name);
     }
 
@@ -912,7 +909,6 @@
 
 void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state,
                                Handle<Object> object, Handle<String> name) {
-  ASSERT(lookup->IsLoaded());
   // Bail out if we didn't find a result.
   if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
 
@@ -993,8 +989,6 @@
   // state.
   if (lookup->IsReadOnly()) return false;
 
-  if (!lookup->IsLoaded()) return false;
-
   return true;
 }
 
@@ -1073,7 +1067,6 @@
                            Handle<JSObject> receiver,
                            Handle<String> name,
                            Handle<Object> value) {
-  ASSERT(lookup->IsLoaded());
   // Skip JSGlobalProxy.
   ASSERT(!receiver->IsJSGlobalProxy());
 
@@ -1181,7 +1174,7 @@
     receiver->LocalLookup(*name, &lookup);
 
     // Update inline cache and stub cache.
-    if (FLAG_use_ic && lookup.IsLoaded()) {
+    if (FLAG_use_ic) {
       UpdateCaches(&lookup, state, receiver, name, value);
     }
 
@@ -1215,8 +1208,6 @@
                                 Handle<JSObject> receiver,
                                 Handle<String> name,
                                 Handle<Object> value) {
-  ASSERT(lookup->IsLoaded());
-
   // Skip JSGlobalProxy.
   if (receiver->IsJSGlobalProxy()) return;
 
diff --git a/src/json-delay.js b/src/json.js
similarity index 100%
rename from src/json-delay.js
rename to src/json.js
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index f3c801c..5a0a482 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -66,11 +66,6 @@
                                                Handle<String> pattern,
                                                Handle<String> flags,
                                                bool* has_pending_exception) {
-  // Ensure that the constructor function has been loaded.
-  if (!constructor->IsLoaded()) {
-    LoadLazy(constructor, has_pending_exception);
-    if (*has_pending_exception) return Handle<Object>();
-  }
   // Call the construct code with 2 arguments.
   Object** argv[2] = { Handle<Object>::cast(pattern).location(),
                        Handle<Object>::cast(flags).location() };
diff --git a/src/jump-target.cc b/src/jump-target.cc
index 7b1ced7..8e949fb 100644
--- a/src/jump-target.cc
+++ b/src/jump-target.cc
@@ -290,6 +290,25 @@
 }
 
 
+void JumpTarget::Branch(Condition cc, Result* arg0, Result* arg1, Hint hint) {
+  ASSERT(cgen()->has_valid_frame());
+
+  // We want to check that non-frame registers at the call site stay in
+  // the same registers on the fall-through branch.
+  DECLARE_ARGCHECK_VARS(arg0);
+  DECLARE_ARGCHECK_VARS(arg1);
+
+  cgen()->frame()->Push(arg0);
+  cgen()->frame()->Push(arg1);
+  DoBranch(cc, hint);
+  *arg1 = cgen()->frame()->Pop();
+  *arg0 = cgen()->frame()->Pop();
+
+  ASSERT_ARGCHECK(arg0);
+  ASSERT_ARGCHECK(arg1);
+}
+
+
 void BreakTarget::Branch(Condition cc, Result* arg, Hint hint) {
   ASSERT(cgen()->has_valid_frame());
 
@@ -331,6 +350,17 @@
 }
 
 
+void JumpTarget::Bind(Result* arg0, Result* arg1) {
+  if (cgen()->has_valid_frame()) {
+    cgen()->frame()->Push(arg0);
+    cgen()->frame()->Push(arg1);
+  }
+  DoBind();
+  *arg1 = cgen()->frame()->Pop();
+  *arg0 = cgen()->frame()->Pop();
+}
+
+
 void JumpTarget::AddReachingFrame(VirtualFrame* frame) {
   ASSERT(reaching_frames_.length() == merge_labels_.length());
   ASSERT(entry_frame_ == NULL);
diff --git a/src/jump-target.h b/src/jump-target.h
index db7c115..db523b5 100644
--- a/src/jump-target.h
+++ b/src/jump-target.h
@@ -117,12 +117,17 @@
   // the target and the fall-through.
   virtual void Branch(Condition cc, Hint hint = no_hint);
   virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
+  virtual void Branch(Condition cc,
+                      Result* arg0,
+                      Result* arg1,
+                      Hint hint = no_hint);
 
   // Bind a jump target.  If there is no current frame at the binding
   // site, there must be at least one frame reaching via a forward
   // jump.
   virtual void Bind();
   virtual void Bind(Result* arg);
+  virtual void Bind(Result* arg0, Result* arg1);
 
   // Emit a call to a jump target.  There must be a current frame at
   // the call.  The frame at the target is the same as the current
diff --git a/src/liveedit-delay.js b/src/liveedit-debugger.js
similarity index 99%
rename from src/liveedit-delay.js
rename to src/liveedit-debugger.js
index 41f894a..e336db7 100644
--- a/src/liveedit-delay.js
+++ b/src/liveedit-debugger.js
@@ -26,7 +26,7 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // LiveEdit feature implementation. The script should be executed after
-// debug-delay.js.
+// debug-debugger.js.
 
 
 // Changes script text and recompiles all relevant functions if possible.
diff --git a/src/liveedit.h b/src/liveedit.h
index efbcd74..2a9cc62 100644
--- a/src/liveedit.h
+++ b/src/liveedit.h
@@ -91,7 +91,7 @@
   static void PatchFunctionPositions(Handle<JSArray> shared_info_array,
                                      Handle<JSArray> position_change_array);
 
-  // A copy of this is in liveedit-delay.js.
+  // A copy of this is in liveedit-debugger.js.
   enum FunctionPatchabilityStatus {
     FUNCTION_AVAILABLE_FOR_PATCH = 0,
     FUNCTION_BLOCKED_ON_STACK = 1
diff --git a/src/macros.py b/src/macros.py
index 122b057..414b4c0 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -120,10 +120,6 @@
 # Macros implemented in Python.
 python macro CHAR_CODE(str) = ord(str[1]);
 
-# Accessors for original global properties that ensure they have been loaded.
-const ORIGINAL_REGEXP = (global.RegExp, $RegExp);
-const ORIGINAL_DATE   = (global.Date, $Date);
-
 # Constants used on an array to implement the properties of the RegExp object.
 const REGEXP_NUMBER_OF_CAPTURES = 0;
 const REGEXP_FIRST_CAPTURE = 3;
@@ -132,6 +128,9 @@
 # REGEXP_NUMBER_OF_CAPTURES
 macro NUMBER_OF_CAPTURES(array) = ((array)[0]);
 
+# Limit according to ECMA 262 15.9.1.1
+const MAX_TIME_MS = 8640000000000000;
+
 # Gets the value of a Date object. If arg is not a Date object
 # a type error is thrown.
 macro DATE_VALUE(arg) = (%_ClassOf(arg) === 'Date' ? %_ValueOf(arg) : ThrowDateTypeError());
diff --git a/src/messages.js b/src/messages.js
index cb392ff..b8a1070 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -27,6 +27,16 @@
 
 
 // -------------------------------------------------------------------
+//
+// Matches Script::Type from objects.h
+var TYPE_NATIVE = 0;
+var TYPE_EXTENSION = 1;
+var TYPE_NORMAL = 2;
+
+// Matches Script::CompilationType from objects.h
+var COMPILATION_TYPE_HOST = 0;
+var COMPILATION_TYPE_EVAL = 1;
+var COMPILATION_TYPE_JSON = 2;
 
 // Lazily initialized.
 var kVowelSounds = 0;
@@ -634,7 +644,7 @@
 
 CallSite.prototype.isEval = function () {
   var script = %FunctionGetScript(this.fun);
-  return script && script.compilation_type == 1;
+  return script && script.compilation_type == COMPILATION_TYPE_EVAL;
 };
 
 CallSite.prototype.getEvalOrigin = function () {
@@ -656,7 +666,7 @@
   }
   // Maybe this is an evaluation?
   var script = %FunctionGetScript(this.fun);
-  if (script && script.compilation_type == 1)
+  if (script && script.compilation_type == COMPILATION_TYPE_EVAL)
     return "eval";
   return null;
 };
@@ -712,7 +722,7 @@
 
 CallSite.prototype.isNative = function () {
   var script = %FunctionGetScript(this.fun);
-  return script ? (script.type == 0) : false;
+  return script ? (script.type == TYPE_NATIVE) : false;
 };
 
 CallSite.prototype.getPosition = function () {
@@ -736,7 +746,7 @@
   
   var eval_from_script = script.eval_from_script;
   if (eval_from_script) {
-    if (eval_from_script.compilation_type == 1) {
+    if (eval_from_script.compilation_type == COMPILATION_TYPE_EVAL) {
       // eval script originated from another eval.
       eval_origin += " (eval at " + FormatEvalOrigin(eval_from_script) + ")";
     } else {
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index 3bd42ed..04bcfeb 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -74,7 +74,99 @@
 
 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
                                              bool is_construct) {
-  UNIMPLEMENTED_MIPS();
+  // Called from JSEntryStub::GenerateBody
+
+  // Registers:
+  // a0: entry_address
+  // a1: function
+  // a2: reveiver_pointer
+  // a3: argc
+  // s0: argv
+  //
+  // Stack:
+  // arguments slots
+  // handler frame
+  // entry frame
+  // callee saved registers + ra
+  // 4 args slots
+  // args
+
+  // Clear the context before we push it when entering the JS frame.
+  __ li(cp, Operand(0));
+
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Set up the context from the function argument.
+  __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+  // Set up the roots register.
+  ExternalReference roots_address = ExternalReference::roots_address();
+  __ li(s6, Operand(roots_address));
+
+  // Push the function and the receiver onto the stack.
+  __ MultiPushReversed(a1.bit() | a2.bit());
+
+  // Copy arguments to the stack in a loop.
+  // a3: argc
+  // s0: argv, ie points to first arg
+  Label loop, entry;
+  __ sll(t0, a3, kPointerSizeLog2);
+  __ add(t2, s0, t0);
+  __ b(&entry);
+  __ nop();   // Branch delay slot nop.
+  // t2 points past last arg.
+  __ bind(&loop);
+  __ lw(t0, MemOperand(s0));  // Read next parameter.
+  __ addiu(s0, s0, kPointerSize);
+  __ lw(t0, MemOperand(t0));  // Dereference handle.
+  __ Push(t0);  // Push parameter.
+  __ bind(&entry);
+  __ Branch(ne, &loop, s0, Operand(t2));
+
+  // Registers:
+  // a0: entry_address
+  // a1: function
+  // a2: reveiver_pointer
+  // a3: argc
+  // s0: argv
+  // s6: roots_address
+  //
+  // Stack:
+  // arguments
+  // receiver
+  // function
+  // arguments slots
+  // handler frame
+  // entry frame
+  // callee saved registers + ra
+  // 4 args slots
+  // args
+
+  // Initialize all JavaScript callee-saved registers, since they will be seen
+  // by the garbage collector as part of handlers.
+  __ LoadRoot(t4, Heap::kUndefinedValueRootIndex);
+  __ mov(s1, t4);
+  __ mov(s2, t4);
+  __ mov(s3, t4);
+  __ mov(s4, s4);
+  __ mov(s5, t4);
+  // s6 holds the root address. Do not clobber.
+  // s7 is cp. Do not init.
+
+  // Invoke the code and pass argc as a0.
+  __ mov(a0, a3);
+  if (is_construct) {
+    UNIMPLEMENTED_MIPS();
+    __ break_(0x164);
+  } else {
+    ParameterCount actual(a0);
+    __ InvokeFunction(a1, actual, CALL_FUNCTION);
+  }
+
+  __ LeaveInternalFrame();
+
+  __ Jump(ra);
 }
 
 
@@ -100,6 +192,7 @@
 
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   UNIMPLEMENTED_MIPS();
+  __ break_(0x201);
 }
 
 
diff --git a/src/mips/codegen-mips-inl.h b/src/mips/codegen-mips-inl.h
index 904dd74..3a511b8 100644
--- a/src/mips/codegen-mips-inl.h
+++ b/src/mips/codegen-mips-inl.h
@@ -36,7 +36,31 @@
 
 // Platform-specific inline functions.
 
-void DeferredCode::Jump() { __ b(&entry_label_); }
+void DeferredCode::Jump() {
+  __ b(&entry_label_);
+  __ nop();
+}
+
+
+void Reference::GetValueAndSpill() {
+  GetValue();
+}
+
+
+void CodeGenerator::VisitAndSpill(Statement* statement) {
+  Visit(statement);
+}
+
+
+void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
+  VisitStatements(statements);
+}
+
+
+void CodeGenerator::LoadAndSpill(Expression* expression) {
+  Load(expression);
+}
+
 
 #undef __
 
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index 7b32180..0936a6d 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -30,13 +30,14 @@
 
 #include "bootstrapper.h"
 #include "codegen-inl.h"
+#include "compiler.h"
 #include "debug.h"
 #include "ic-inl.h"
 #include "parser.h"
 #include "register-allocator-inl.h"
 #include "runtime.h"
 #include "scopes.h"
-#include "compiler.h"
+#include "virtual-frame-inl.h"
 
 
 
@@ -47,7 +48,7 @@
 
 
 
-// -------------------------------------------------------------------------
+// -----------------------------------------------------------------------------
 // Platform-specific DeferredCode functions.
 
 
@@ -61,13 +62,41 @@
 }
 
 
-// -------------------------------------------------------------------------
+// -----------------------------------------------------------------------------
+// CodeGenState implementation.
+
+CodeGenState::CodeGenState(CodeGenerator* owner)
+    : owner_(owner),
+      true_target_(NULL),
+      false_target_(NULL),
+      previous_(NULL) {
+  owner_->set_state(this);
+}
+
+
+CodeGenState::CodeGenState(CodeGenerator* owner,
+                           JumpTarget* true_target,
+                           JumpTarget* false_target)
+    : owner_(owner),
+      true_target_(true_target),
+      false_target_(false_target),
+      previous_(owner->state()) {
+  owner_->set_state(this);
+}
+
+
+CodeGenState::~CodeGenState() {
+  ASSERT(owner_->state() == this);
+  owner_->set_state(previous_);
+}
+
+
+// -----------------------------------------------------------------------------
 // CodeGenerator implementation
 
 CodeGenerator::CodeGenerator(MacroAssembler* masm)
     : deferred_(8),
       masm_(masm),
-      scope_(NULL),
       frame_(NULL),
       allocator_(NULL),
       cc_reg_(cc_always),
@@ -77,18 +106,362 @@
 
 
 // Calling conventions:
-// s8_fp: caller's frame pointer
+// fp: caller's frame pointer
 // sp: stack pointer
 // a1: called JS function
 // cp: callee's context
 
-void CodeGenerator::Generate(CompilationInfo* infomode) {
-  UNIMPLEMENTED_MIPS();
+void CodeGenerator::Generate(CompilationInfo* info) {
+  // Record the position for debugging purposes.
+  CodeForFunctionPosition(info->function());
+
+  // Initialize state.
+  info_ = info;
+  ASSERT(allocator_ == NULL);
+  RegisterAllocator register_allocator(this);
+  allocator_ = &register_allocator;
+  ASSERT(frame_ == NULL);
+  frame_ = new VirtualFrame();
+  cc_reg_ = cc_always;
+
+  {
+    CodeGenState state(this);
+
+    // Registers:
+    // a1: called JS function
+    // ra: return address
+    // fp: caller's frame pointer
+    // sp: stack pointer
+    // cp: callee's context
+    //
+    // Stack:
+    // arguments
+    // receiver
+
+    frame_->Enter();
+
+    // Allocate space for locals and initialize them.
+    frame_->AllocateStackSlots();
+
+    // Initialize the function return target.
+    function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
+    function_return_is_shadowed_ = false;
+
+    VirtualFrame::SpilledScope spilled_scope;
+    if (scope()->num_heap_slots() > 0) {
+      UNIMPLEMENTED_MIPS();
+    }
+
+    {
+      Comment cmnt2(masm_, "[ copy context parameters into .context");
+
+      // Note that iteration order is relevant here! If we have the same
+      // parameter twice (e.g., function (x, y, x)), and that parameter
+      // needs to be copied into the context, it must be the last argument
+      // passed to the parameter that needs to be copied. This is a rare
+      // case so we don't check for it, instead we rely on the copying
+      // order: such a parameter is copied repeatedly into the same
+      // context location and thus the last value is what is seen inside
+      // the function.
+      for (int i = 0; i < scope()->num_parameters(); i++) {
+        UNIMPLEMENTED_MIPS();
+      }
+    }
+
+    // Store the arguments object.  This must happen after context
+    // initialization because the arguments object may be stored in the
+    // context.
+    if (scope()->arguments() != NULL) {
+      UNIMPLEMENTED_MIPS();
+    }
+
+    // Generate code to 'execute' declarations and initialize functions
+    // (source elements). In case of an illegal redeclaration we need to
+    // handle that instead of processing the declarations.
+    if (scope()->HasIllegalRedeclaration()) {
+      Comment cmnt(masm_, "[ illegal redeclarations");
+      scope()->VisitIllegalRedeclaration(this);
+    } else {
+      Comment cmnt(masm_, "[ declarations");
+      ProcessDeclarations(scope()->declarations());
+      // Bail out if a stack-overflow exception occurred when processing
+      // declarations.
+      if (HasStackOverflow()) return;
+    }
+
+    if (FLAG_trace) {
+      UNIMPLEMENTED_MIPS();
+    }
+
+    // Compile the body of the function in a vanilla state. Don't
+    // bother compiling all the code if the scope has an illegal
+    // redeclaration.
+    if (!scope()->HasIllegalRedeclaration()) {
+      Comment cmnt(masm_, "[ function body");
+#ifdef DEBUG
+      bool is_builtin = Bootstrapper::IsActive();
+      bool should_trace =
+          is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
+      if (should_trace) {
+        UNIMPLEMENTED_MIPS();
+      }
+#endif
+      VisitStatementsAndSpill(info->function()->body());
+    }
+  }
+
+  if (has_valid_frame() || function_return_.is_linked()) {
+    if (!function_return_.is_linked()) {
+      CodeForReturnPosition(info->function());
+    }
+    // Registers:
+    // v0: result
+    // sp: stack pointer
+    // fp: frame pointer
+    // cp: callee's context
+
+    __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+
+    function_return_.Bind();
+    if (FLAG_trace) {
+      UNIMPLEMENTED_MIPS();
+    }
+
+    // Add a label for checking the size of the code used for returning.
+    Label check_exit_codesize;
+    masm_->bind(&check_exit_codesize);
+
+    masm_->mov(sp, fp);
+    masm_->lw(fp, MemOperand(sp, 0));
+    masm_->lw(ra, MemOperand(sp, 4));
+    masm_->addiu(sp, sp, 8);
+
+    // Here we use masm_-> instead of the __ macro to avoid the code coverage
+    // tool from instrumenting as we rely on the code size here.
+    // TODO(MIPS): Should we be able to use more than 0x1ffe parameters?
+    masm_->addiu(sp, sp, (scope()->num_parameters() + 1) * kPointerSize);
+    masm_->Jump(ra);
+    // The Jump automatically generates a nop in the branch delay slot.
+
+    // Check that the size of the code used for returning matches what is
+    // expected by the debugger.
+    ASSERT_EQ(kJSReturnSequenceLength,
+              masm_->InstructionsGeneratedSince(&check_exit_codesize));
+  }
+
+  // Code generation state must be reset.
+  ASSERT(!has_cc());
+  ASSERT(state_ == NULL);
+  ASSERT(!function_return_is_shadowed_);
+  function_return_.Unuse();
+  DeleteFrame();
+
+  // Process any deferred code using the register allocator.
+  if (!HasStackOverflow()) {
+    ProcessDeferred();
+  }
+
+  allocator_ = NULL;
+}
+
+
+void CodeGenerator::LoadReference(Reference* ref) {
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ LoadReference");
+  Expression* e = ref->expression();
+  Property* property = e->AsProperty();
+  Variable* var = e->AsVariableProxy()->AsVariable();
+
+  if (property != NULL) {
+    UNIMPLEMENTED_MIPS();
+  } else if (var != NULL) {
+    // The expression is a variable proxy that does not rewrite to a
+    // property.  Global variables are treated as named property references.
+    if (var->is_global()) {
+      LoadGlobal();
+      ref->set_type(Reference::NAMED);
+    } else {
+      ASSERT(var->slot() != NULL);
+      ref->set_type(Reference::SLOT);
+    }
+  } else {
+    UNIMPLEMENTED_MIPS();
+  }
+}
+
+
+void CodeGenerator::UnloadReference(Reference* ref) {
+  VirtualFrame::SpilledScope spilled_scope;
+  // Pop a reference from the stack while preserving TOS.
+  Comment cmnt(masm_, "[ UnloadReference");
+  int size = ref->size();
+  if (size > 0) {
+    frame_->EmitPop(a0);
+    frame_->Drop(size);
+    frame_->EmitPush(a0);
+  }
+  ref->set_unloaded();
+}
+
+
+MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
+  // Currently, this assertion will fail if we try to assign to
+  // a constant variable that is constant because it is read-only
+  // (such as the variable referring to a named function expression).
+  // We need to implement assignments to read-only variables.
+  // Ideally, we should do this during AST generation (by converting
+  // such assignments into expression statements); however, in general
+  // we may not be able to make the decision until past AST generation,
+  // that is when the entire program is known.
+  ASSERT(slot != NULL);
+  int index = slot->index();
+  switch (slot->type()) {
+    case Slot::PARAMETER:
+      UNIMPLEMENTED_MIPS();
+      return MemOperand(no_reg, 0);
+
+    case Slot::LOCAL:
+      return frame_->LocalAt(index);
+
+    case Slot::CONTEXT: {
+      UNIMPLEMENTED_MIPS();
+      return MemOperand(no_reg, 0);
+    }
+
+    default:
+      UNREACHABLE();
+      return MemOperand(no_reg, 0);
+  }
+}
+
+
+// Loads a value on TOS. If it is a boolean value, the result may have been
+// (partially) translated into branches, or it may have set the condition
+// code register. If force_cc is set, the value is forced to set the
+// condition code register and no value is pushed. If the condition code
+// register was set, has_cc() is true and cc_reg_ contains the condition to
+// test for 'true'.
+void CodeGenerator::LoadCondition(Expression* x,
+                                  JumpTarget* true_target,
+                                  JumpTarget* false_target,
+                                  bool force_cc) {
+  ASSERT(!has_cc());
+  int original_height = frame_->height();
+
+  { CodeGenState new_state(this, true_target, false_target);
+    Visit(x);
+
+    // If we hit a stack overflow, we may not have actually visited
+    // the expression. In that case, we ensure that we have a
+    // valid-looking frame state because we will continue to generate
+    // code as we unwind the C++ stack.
+    //
+    // It's possible to have both a stack overflow and a valid frame
+    // state (eg, a subexpression overflowed, visiting it returned
+    // with a dummied frame state, and visiting this expression
+    // returned with a normal-looking state).
+    if (HasStackOverflow() &&
+        has_valid_frame() &&
+        !has_cc() &&
+        frame_->height() == original_height) {
+      true_target->Jump();
+    }
+  }
+  if (force_cc && frame_ != NULL && !has_cc()) {
+    // Convert the TOS value to a boolean in the condition code register.
+    UNIMPLEMENTED_MIPS();
+  }
+  ASSERT(!force_cc || !has_valid_frame() || has_cc());
+  ASSERT(!has_valid_frame() ||
+         (has_cc() && frame_->height() == original_height) ||
+         (!has_cc() && frame_->height() == original_height + 1));
+}
+
+
+void CodeGenerator::Load(Expression* x) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  JumpTarget true_target;
+  JumpTarget false_target;
+  LoadCondition(x, &true_target, &false_target, false);
+
+  if (has_cc()) {
+    UNIMPLEMENTED_MIPS();
+  }
+
+  if (true_target.is_linked() || false_target.is_linked()) {
+    UNIMPLEMENTED_MIPS();
+  }
+  ASSERT(has_valid_frame());
+  ASSERT(!has_cc());
+  ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::LoadGlobal() {
+  VirtualFrame::SpilledScope spilled_scope;
+  __ lw(a0, GlobalObject());
+  frame_->EmitPush(a0);
+}
+
+
+void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
+  VirtualFrame::SpilledScope spilled_scope;
+  if (slot->type() == Slot::LOOKUP) {
+    UNIMPLEMENTED_MIPS();
+  } else {
+    __ lw(a0, SlotOperand(slot, a2));
+    frame_->EmitPush(a0);
+    if (slot->var()->mode() == Variable::CONST) {
+      UNIMPLEMENTED_MIPS();
+    }
+  }
+}
+
+
+void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
+  ASSERT(slot != NULL);
+  if (slot->type() == Slot::LOOKUP) {
+      UNIMPLEMENTED_MIPS();
+  } else {
+    ASSERT(!slot->var()->is_dynamic());
+
+    JumpTarget exit;
+    if (init_state == CONST_INIT) {
+      UNIMPLEMENTED_MIPS();
+    }
+
+    // We must execute the store. Storing a variable must keep the
+    // (new) value on the stack. This is necessary for compiling
+    // assignment expressions.
+    //
+    // Note: We will reach here even with slot->var()->mode() ==
+    // Variable::CONST because of const declarations which will
+    // initialize consts to 'the hole' value and by doing so, end up
+    // calling this code. a2 may be loaded with context; used below in
+    // RecordWrite.
+    frame_->EmitPop(a0);
+    __ sw(a0, SlotOperand(slot, a2));
+    frame_->EmitPush(a0);
+    if (slot->type() == Slot::CONTEXT) {
+      UNIMPLEMENTED_MIPS();
+    }
+    // If we definitely did not jump over the assignment, we do not need
+    // to bind the exit label. Doing so can defeat peephole
+    // optimization.
+    if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
+      exit.Bind();
+    }
+  }
 }
 
 
 void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
-  UNIMPLEMENTED_MIPS();
+  VirtualFrame::SpilledScope spilled_scope;
+  for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
+    VisitAndSpill(statements->at(i));
+  }
 }
 
 
@@ -98,7 +471,14 @@
 
 
 void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
-  UNIMPLEMENTED_MIPS();
+  VirtualFrame::SpilledScope spilled_scope;
+  frame_->EmitPush(cp);
+  __ li(t0, Operand(pairs));
+  frame_->EmitPush(t0);
+  __ li(t0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
+  frame_->EmitPush(t0);
+  frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
+  // The result is discarded.
 }
 
 
@@ -108,7 +488,17 @@
 
 
 void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
-  UNIMPLEMENTED_MIPS();
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ ExpressionStatement");
+  CodeForStatementPosition(node);
+  Expression* expression = node->expression();
+  expression->MarkAsStatement();
+  LoadAndSpill(expression);
+  frame_->Drop();
+  ASSERT(frame_->height() == original_height);
 }
 
 
@@ -133,7 +523,22 @@
 
 
 void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
-  UNIMPLEMENTED_MIPS();
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ ReturnStatement");
+
+  CodeForStatementPosition(node);
+  LoadAndSpill(node->expression());
+  if (function_return_is_shadowed_) {
+    frame_->EmitPop(v0);
+    function_return_.Jump();
+  } else {
+    // Pop the result from the frame and prepare the frame for
+    // returning thus making it easier to merge.
+    frame_->EmitPop(v0);
+    frame_->PrepareForReturn();
+
+    function_return_.Jump();
+  }
 }
 
 
@@ -192,8 +597,8 @@
 }
 
 
-void CodeGenerator::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* node) {
+void CodeGenerator::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* node) {
   UNIMPLEMENTED_MIPS();
 }
 
@@ -204,17 +609,45 @@
 
 
 void CodeGenerator::VisitSlot(Slot* node) {
-  UNIMPLEMENTED_MIPS();
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ Slot");
+  LoadFromSlot(node, typeof_state());
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
 void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
-  UNIMPLEMENTED_MIPS();
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ VariableProxy");
+
+  Variable* var = node->var();
+  Expression* expr = var->rewrite();
+  if (expr != NULL) {
+    Visit(expr);
+  } else {
+    ASSERT(var->is_global());
+    Reference ref(this, node);
+    ref.GetValueAndSpill();
+  }
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
 void CodeGenerator::VisitLiteral(Literal* node) {
-  UNIMPLEMENTED_MIPS();
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ Literal");
+  __ li(t0, Operand(node->handle()));
+  frame_->EmitPush(t0);
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
@@ -239,7 +672,47 @@
 
 
 void CodeGenerator::VisitAssignment(Assignment* node) {
-  UNIMPLEMENTED_MIPS();
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ Assignment");
+
+  { Reference target(this, node->target());
+    if (target.is_illegal()) {
+      // Fool the virtual frame into thinking that we left the assignment's
+      // value on the frame.
+      frame_->EmitPush(zero_reg);
+      ASSERT(frame_->height() == original_height + 1);
+      return;
+    }
+
+    if (node->op() == Token::ASSIGN ||
+        node->op() == Token::INIT_VAR ||
+        node->op() == Token::INIT_CONST) {
+      LoadAndSpill(node->value());
+    } else {
+      UNIMPLEMENTED_MIPS();
+    }
+
+    Variable* var = node->target()->AsVariableProxy()->AsVariable();
+    if (var != NULL &&
+        (var->mode() == Variable::CONST) &&
+        node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
+      // Assignment ignored - leave the value on the stack.
+    } else {
+      CodeForSourcePosition(node->position());
+      if (node->op() == Token::INIT_CONST) {
+        // Dynamic constant initializations must use the function context
+        // and initialize the actual constant declared. Dynamic variable
+        // initializations are simply assignments and use SetValue.
+        target.SetValue(CONST_INIT);
+      } else {
+        target.SetValue(NOT_CONST_INIT);
+      }
+    }
+  }
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
@@ -254,7 +727,73 @@
 
 
 void CodeGenerator::VisitCall(Call* node) {
-  UNIMPLEMENTED_MIPS();
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ Call");
+
+  Expression* function = node->expression();
+  ZoneList<Expression*>* args = node->arguments();
+
+  // Standard function call.
+  // Check if the function is a variable or a property.
+  Variable* var = function->AsVariableProxy()->AsVariable();
+  Property* property = function->AsProperty();
+
+  // ------------------------------------------------------------------------
+  // Fast-case: Use inline caching.
+  // ---
+  // According to ECMA-262, section 11.2.3, page 44, the function to call
+  // must be resolved after the arguments have been evaluated. The IC code
+  // automatically handles this by loading the arguments before the function
+  // is resolved in cache misses (this also holds for megamorphic calls).
+  // ------------------------------------------------------------------------
+
+  if (var != NULL && var->is_possibly_eval()) {
+    UNIMPLEMENTED_MIPS();
+  } else if (var != NULL && !var->is_this() && var->is_global()) {
+    // ----------------------------------
+    // JavaScript example: 'foo(1, 2, 3)'  // foo is global
+    // ----------------------------------
+
+    int arg_count = args->length();
+
+    // We need sp to be 8 bytes aligned when calling the stub.
+    __ SetupAlignedCall(t0, arg_count);
+
+    // Pass the global object as the receiver and let the IC stub
+    // patch the stack to use the global proxy as 'this' in the
+    // invoked function.
+    LoadGlobal();
+
+    // Load the arguments.
+    for (int i = 0; i < arg_count; i++) {
+      LoadAndSpill(args->at(i));
+    }
+
+    // Setup the receiver register and call the IC initialization code.
+    __ li(a2, Operand(var->name()));
+    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+    Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
+    CodeForSourcePosition(node->position());
+    frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
+                           arg_count + 1);
+    __ ReturnFromAlignedCall();
+    __ lw(cp, frame_->Context());
+    // Remove the function from the stack.
+    frame_->EmitPush(v0);
+
+  } else if (var != NULL && var->slot() != NULL &&
+             var->slot()->type() == Slot::LOOKUP) {
+    UNIMPLEMENTED_MIPS();
+  } else if (property != NULL) {
+    UNIMPLEMENTED_MIPS();
+  } else {
+    UNIMPLEMENTED_MIPS();
+  }
+
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
@@ -439,13 +978,108 @@
 #undef __
 #define __ ACCESS_MASM(masm)
 
+// -----------------------------------------------------------------------------
+// Reference support
 
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
-  return Handle<Code>::null();
+Reference::Reference(CodeGenerator* cgen,
+                     Expression* expression,
+                     bool persist_after_get)
+    : cgen_(cgen),
+      expression_(expression),
+      type_(ILLEGAL),
+      persist_after_get_(persist_after_get) {
+  cgen->LoadReference(this);
 }
 
 
-// On entry a0 and a1 are the things to be compared.  On exit v0 is 0,
+Reference::~Reference() {
+  ASSERT(is_unloaded() || is_illegal());
+}
+
+
+Handle<String> Reference::GetName() {
+  ASSERT(type_ == NAMED);
+  Property* property = expression_->AsProperty();
+  if (property == NULL) {
+    // Global variable reference treated as a named property reference.
+    VariableProxy* proxy = expression_->AsVariableProxy();
+    ASSERT(proxy->AsVariable() != NULL);
+    ASSERT(proxy->AsVariable()->is_global());
+    return proxy->name();
+  } else {
+    Literal* raw_name = property->key()->AsLiteral();
+    ASSERT(raw_name != NULL);
+    return Handle<String>(String::cast(*raw_name->handle()));
+  }
+}
+
+
+void Reference::GetValue() {
+  ASSERT(cgen_->HasValidEntryRegisters());
+  ASSERT(!is_illegal());
+  ASSERT(!cgen_->has_cc());
+  Property* property = expression_->AsProperty();
+  if (property != NULL) {
+    cgen_->CodeForSourcePosition(property->position());
+  }
+
+  switch (type_) {
+    case SLOT: {
+      UNIMPLEMENTED_MIPS();
+      break;
+    }
+
+    case NAMED: {
+      UNIMPLEMENTED_MIPS();
+      break;
+    }
+
+    case KEYED: {
+      UNIMPLEMENTED_MIPS();
+      break;
+    }
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void Reference::SetValue(InitState init_state) {
+  ASSERT(!is_illegal());
+  ASSERT(!cgen_->has_cc());
+  MacroAssembler* masm = cgen_->masm();
+  Property* property = expression_->AsProperty();
+  if (property != NULL) {
+    cgen_->CodeForSourcePosition(property->position());
+  }
+
+  switch (type_) {
+    case SLOT: {
+      Comment cmnt(masm, "[ Store to Slot");
+      Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+      cgen_->StoreToSlot(slot, init_state);
+      cgen_->UnloadReference(this);
+      break;
+    }
+
+    case NAMED: {
+      UNIMPLEMENTED_MIPS();
+      break;
+    }
+
+    case KEYED: {
+      UNIMPLEMENTED_MIPS();
+      break;
+    }
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+// On entry a0 and a1 are the things to be compared. On exit v0 is 0,
 // positive or negative to indicate the result of the comparison.
 void CompareStub::Generate(MacroAssembler* masm) {
   UNIMPLEMENTED_MIPS();
@@ -453,6 +1087,12 @@
 }
 
 
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
+  UNIMPLEMENTED_MIPS();
+  return Handle<Code>::null();
+}
+
+
 void StackCheckStub::Generate(MacroAssembler* masm) {
   UNIMPLEMENTED_MIPS();
   __ break_(0x790);
@@ -477,55 +1117,274 @@
                               Label* throw_out_of_memory_exception,
                               bool do_gc,
                               bool always_allocate) {
-  UNIMPLEMENTED_MIPS();
-  __ break_(0x826);
+  // s0: number of arguments including receiver (C callee-saved)
+  // s1: pointer to the first argument          (C callee-saved)
+  // s2: pointer to builtin function            (C callee-saved)
+
+  if (do_gc) {
+    UNIMPLEMENTED_MIPS();
+  }
+
+  ExternalReference scope_depth =
+      ExternalReference::heap_always_allocate_scope_depth();
+  if (always_allocate) {
+    UNIMPLEMENTED_MIPS();
+  }
+
+  // Call C built-in.
+  // a0 = argc, a1 = argv
+  __ mov(a0, s0);
+  __ mov(a1, s1);
+
+  __ CallBuiltin(s2);
+
+  if (always_allocate) {
+    UNIMPLEMENTED_MIPS();
+  }
+
+  // Check for failure result.
+  Label failure_returned;
+  ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+  __ addiu(a2, v0, 1);
+  __ andi(t0, a2, kFailureTagMask);
+  __ Branch(eq, &failure_returned, t0, Operand(zero_reg));
+
+  // Exit C frame and return.
+  // v0:v1: result
+  // sp: stack pointer
+  // fp: frame pointer
+  __ LeaveExitFrame(mode_);
+
+  // Check if we should retry or throw exception.
+  Label retry;
+  __ bind(&failure_returned);
+  ASSERT(Failure::RETRY_AFTER_GC == 0);
+  __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
+  __ Branch(eq, &retry, t0, Operand(zero_reg));
+
+  // Special handling of out of memory exceptions.
+  Failure* out_of_memory = Failure::OutOfMemoryException();
+  __ Branch(eq, throw_out_of_memory_exception,
+            v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+
+  // Retrieve the pending exception and clear the variable.
+  __ LoadExternalReference(t0, ExternalReference::the_hole_value_location());
+  __ lw(a3, MemOperand(t0));
+  __ LoadExternalReference(t0,
+      ExternalReference(Top::k_pending_exception_address));
+  __ lw(v0, MemOperand(t0));
+  __ sw(a3, MemOperand(t0));
+
+  // Special handling of termination exceptions which are uncatchable
+  // by javascript code.
+  __ Branch(eq, throw_termination_exception,
+            v0, Operand(Factory::termination_exception()));
+
+  // Handle normal exception.
+  __ b(throw_normal_exception);
+  __ nop();   // Branch delay slot nop.
+
+  __ bind(&retry);  // pass last failure (r0) as parameter (r0) when retrying
 }
 
 void CEntryStub::Generate(MacroAssembler* masm) {
-  UNIMPLEMENTED_MIPS();
-  __ break_(0x831);
+  // Called from JavaScript; parameters are on stack as if calling JS function
+  // a0: number of arguments including receiver
+  // a1: pointer to builtin function
+  // fp: frame pointer    (restored after C call)
+  // sp: stack pointer    (restored as callee's sp after C call)
+  // cp: current context  (C callee-saved)
+
+  // NOTE: Invocations of builtins may return failure objects
+  // instead of a proper result. The builtin entry handles
+  // this by performing a garbage collection and retrying the
+  // builtin once.
+
+  // Enter the exit frame that transitions from JavaScript to C++.
+  __ EnterExitFrame(mode_, s0, s1, s2);
+
+  // s0: number of arguments (C callee-saved)
+  // s1: pointer to first argument (C callee-saved)
+  // s2: pointer to builtin function (C callee-saved)
+
+  Label throw_normal_exception;
+  Label throw_termination_exception;
+  Label throw_out_of_memory_exception;
+
+  // Call into the runtime system.
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_termination_exception,
+               &throw_out_of_memory_exception,
+               false,
+               false);
+
+  // Do space-specific GC and retry runtime call.
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_termination_exception,
+               &throw_out_of_memory_exception,
+               true,
+               false);
+
+  // Do full GC and retry runtime call one final time.
+  Failure* failure = Failure::InternalError();
+  __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_termination_exception,
+               &throw_out_of_memory_exception,
+               true,
+               true);
+
+  __ bind(&throw_out_of_memory_exception);
+  GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
+
+  __ bind(&throw_termination_exception);
+  GenerateThrowUncatchable(masm, TERMINATION);
+
+  __ bind(&throw_normal_exception);
+  GenerateThrowTOS(masm);
 }
 
 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
-  UNIMPLEMENTED_MIPS();
+  Label invoke, exit;
+
+  // Registers:
+  // a0: entry address
+  // a1: function
+  // a2: reveiver
+  // a3: argc
+  //
+  // Stack:
+  // 4 args slots
+  // args
 
   // Save callee saved registers on the stack.
-  __ MultiPush(kCalleeSaved | ra.bit());
+  __ MultiPush((kCalleeSaved | ra.bit()) & ~sp.bit());
 
-  // ********** State **********
-  //
-  // * Registers:
+  // We build an EntryFrame.
+  __ li(t3, Operand(-1));  // Push a bad frame pointer to fail if it is used.
+  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+  __ li(t2, Operand(Smi::FromInt(marker)));
+  __ li(t1, Operand(Smi::FromInt(marker)));
+  __ LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
+  __ lw(t0, MemOperand(t0));
+  __ MultiPush(t0.bit() | t1.bit() | t2.bit() | t3.bit());
+
+  // Setup frame pointer for the frame to be pushed.
+  __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+  // Load argv in s0 register.
+  __ lw(s0, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize +
+                           StandardFrameConstants::kCArgsSlotsSize));
+
+  // Registers:
   // a0: entry_address
   // a1: function
   // a2: reveiver_pointer
   // a3: argc
+  // s0: argv
   //
-  // * Stack:
-  // ---------------------------
-  // args
-  // ---------------------------
-  // 4 args slots
-  // ---------------------------
+  // Stack:
+  // caller fp          |
+  // function slot      | entry frame
+  // context slot       |
+  // bad fp (0xff...f)  |
   // callee saved registers + ra
-  // ---------------------------
-  //
-  // ***************************
+  // 4 args slots
+  // args
 
-  __ break_(0x1234);
+  // Call a faked try-block that does the invoke.
+  __ bal(&invoke);
+  __ nop();   // Branch delay slot nop.
+
+  // Caught exception: Store result (exception) in the pending
+  // exception field in the JSEnv and return a failure sentinel.
+  // Coming in here the fp will be invalid because the PushTryHandler below
+  // sets it to 0 to signal the existence of the JSEntry frame.
+  __ LoadExternalReference(t0,
+      ExternalReference(Top::k_pending_exception_address));
+  __ sw(v0, MemOperand(t0));  // We come back from 'invoke'. result is in v0.
+  __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
+  __ b(&exit);
+  __ nop();   // Branch delay slot nop.
+
+  // Invoke: Link this frame into the handler chain.
+  __ bind(&invoke);
+  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+  // If an exception not caught by another handler occurs, this handler
+  // returns control to the code after the bal(&invoke) above, which
+  // restores all kCalleeSaved registers (including cp and fp) to their
+  // saved values before returning a failure to C.
+
+  // Clear any pending exceptions.
+  __ LoadExternalReference(t0, ExternalReference::the_hole_value_location());
+  __ lw(t1, MemOperand(t0));
+  __ LoadExternalReference(t0,
+      ExternalReference(Top::k_pending_exception_address));
+  __ sw(t1, MemOperand(t0));
+
+  // Invoke the function by calling through JS entry trampoline builtin.
+  // Notice that we cannot store a reference to the trampoline code directly in
+  // this stub, because runtime stubs are not traversed when doing GC.
+
+  // Registers:
+  // a0: entry_address
+  // a1: function
+  // a2: reveiver_pointer
+  // a3: argc
+  // s0: argv
+  //
+  // Stack:
+  // handler frame
+  // entry frame
+  // callee saved registers + ra
+  // 4 args slots
+  // args
+
+  if (is_construct) {
+    ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
+    __ LoadExternalReference(t0, construct_entry);
+  } else {
+    ExternalReference entry(Builtins::JSEntryTrampoline);
+    __ LoadExternalReference(t0, entry);
+  }
+  __ lw(t9, MemOperand(t0));  // deref address
+
+  // Call JSEntryTrampoline.
+  __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
+  __ CallBuiltin(t9);
+
+  // Unlink this frame from the handler chain. When reading the
+  // address of the next handler, there is no need to use the address
+  // displacement since the current stack pointer (sp) points directly
+  // to the stack handler.
+  __ lw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset));
+  __ LoadExternalReference(t0, ExternalReference(Top::k_handler_address));
+  __ sw(t1, MemOperand(t0));
+
+  // This restores sp to its position before PushTryHandler.
+  __ addiu(sp, sp, StackHandlerConstants::kSize);
+
+  __ bind(&exit);  // v0 holds result
+  // Restore the top frame descriptors from the stack.
+  __ Pop(t1);
+  __ LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
+  __ sw(t1, MemOperand(t0));
+
+  // Reset the stack to the callee saved registers.
+  __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
 
   // Restore callee saved registers from the stack.
-  __ MultiPop(kCalleeSaved | ra.bit());
-
-  // Load a result.
-  __ li(v0, Operand(0x1234));
-  __ jr(ra);
-  // Return
-  __ nop();
+  __ MultiPop((kCalleeSaved | ra.bit()) & ~sp.bit());
+  // Return.
+  __ Jump(ra);
 }
 
 
 // This stub performs an instanceof, calling the builtin function if
-// necessary.  Uses a1 for the object, a0 for the function that it may
+// necessary. Uses a1 for the object, a0 for the function that it may
 // be an instance of (these are fetched from the stack).
 void InstanceofStub::Generate(MacroAssembler* masm) {
   UNIMPLEMENTED_MIPS();
diff --git a/src/mips/codegen-mips.h b/src/mips/codegen-mips.h
index 3f78fcd..44a4a62 100644
--- a/src/mips/codegen-mips.h
+++ b/src/mips/codegen-mips.h
@@ -42,7 +42,77 @@
 enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
 
 
-// -------------------------------------------------------------------------
+// -----------------------------------------------------------------------------
+// Reference support
+
+// A reference is a C++ stack-allocated object that keeps an ECMA
+// reference on the execution stack while in scope. For variables
+// the reference is empty, indicating that it isn't necessary to
+// store state on the stack for keeping track of references to those.
+// For properties, we keep either one (named) or two (indexed) values
+// on the execution stack to represent the reference.
+class Reference BASE_EMBEDDED {
+ public:
+  // The values of the types is important, see size().
+  enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
+  Reference(CodeGenerator* cgen,
+            Expression* expression,
+            bool persist_after_get = false);
+  ~Reference();
+
+  Expression* expression() const { return expression_; }
+  Type type() const { return type_; }
+  void set_type(Type value) {
+    ASSERT_EQ(ILLEGAL, type_);
+    type_ = value;
+  }
+
+  void set_unloaded() {
+    ASSERT_NE(ILLEGAL, type_);
+    ASSERT_NE(UNLOADED, type_);
+    type_ = UNLOADED;
+  }
+  // The size the reference takes up on the stack.
+  int size() const {
+    return (type_ < SLOT) ? 0 : type_;
+  }
+
+  bool is_illegal() const { return type_ == ILLEGAL; }
+  bool is_slot() const { return type_ == SLOT; }
+  bool is_property() const { return type_ == NAMED || type_ == KEYED; }
+  bool is_unloaded() const { return type_ == UNLOADED; }
+
+  // Return the name. Only valid for named property references.
+  Handle<String> GetName();
+
+  // Generate code to push the value of the reference on top of the
+  // expression stack.  The reference is expected to be already on top of
+  // the expression stack, and it is consumed by the call unless the
+  // reference is for a compound assignment.
+  // If the reference is not consumed, it is left in place under its value.
+  void GetValue();
+
+  // Generate code to pop a reference, push the value of the reference,
+  // and then spill the stack frame.
+  inline void GetValueAndSpill();
+
+  // Generate code to store the value on top of the expression stack in the
+  // reference.  The reference is expected to be immediately below the value
+  // on the expression stack.  The  value is stored in the location specified
+  // by the reference, and is left on top of the stack, after the reference
+  // is popped from beneath it (unloaded).
+  void SetValue(InitState init_state);
+
+ private:
+  CodeGenerator* cgen_;
+  Expression* expression_;
+  Type type_;
+  // Keep the reference on the stack after get, so it can be used by set later.
+  bool persist_after_get_;
+};
+
+
+// -----------------------------------------------------------------------------
 // Code generation state
 
 // The state is passed down the AST by the code generator (and back up, in
@@ -89,7 +159,7 @@
 
 
 
-// -------------------------------------------------------------------------
+// -----------------------------------------------------------------------------
 // CodeGenerator
 
 class CodeGenerator: public AstVisitor {
@@ -152,7 +222,7 @@
 
   // Number of instructions used for the JS return sequence. The constant is
   // used by the debugger to patch the JS return sequence.
-  static const int kJSReturnSequenceLength = 6;
+  static const int kJSReturnSequenceLength = 7;
 
   // If the name is an inline runtime function call return the number of
   // expected arguments. Otherwise return -1.
@@ -186,9 +256,51 @@
   AST_NODE_LIST(DEF_VISIT)
 #undef DEF_VISIT
 
+  // Visit a statement and then spill the virtual frame if control flow can
+  // reach the end of the statement (ie, it does not exit via break,
+  // continue, return, or throw).  This function is used temporarily while
+  // the code generator is being transformed.
+  inline void VisitAndSpill(Statement* statement);
+
+  // Visit a list of statements and then spill the virtual frame if control
+  // flow can reach the end of the list.
+  inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
+
   // Main code generation function
   void Generate(CompilationInfo* info);
 
+  // The following are used by class Reference.
+  void LoadReference(Reference* ref);
+  void UnloadReference(Reference* ref);
+
+  MemOperand ContextOperand(Register context, int index) const {
+    return MemOperand(context, Context::SlotOffset(index));
+  }
+
+  MemOperand SlotOperand(Slot* slot, Register tmp);
+
+  // Expressions
+  MemOperand GlobalObject() const  {
+    return ContextOperand(cp, Context::GLOBAL_INDEX);
+  }
+
+  void LoadCondition(Expression* x,
+                     JumpTarget* true_target,
+                     JumpTarget* false_target,
+                     bool force_cc);
+  void Load(Expression* x);
+  void LoadGlobal();
+
+  // Generate code to push the value of an expression on top of the frame
+  // and then spill the frame fully to memory.  This function is used
+  // temporarily while the code generator is being transformed.
+  inline void LoadAndSpill(Expression* expression);
+
+  // Read a value from a slot and leave it on top of the expression stack.
+  void LoadFromSlot(Slot* slot, TypeofState typeof_state);
+  // Store the value on top of the stack to a slot.
+  void StoreToSlot(Slot* slot, InitState init_state);
+
   struct InlineRuntimeLUT {
     void (CodeGenerator::*method)(ZoneList<Expression*>*);
     const char* name;
@@ -290,7 +402,6 @@
   CompilationInfo* info_;
 
   // Code generation state
-  Scope* scope_;
   VirtualFrame* frame_;
   RegisterAllocator* allocator_;
   Condition cc_reg_;
diff --git a/src/mips/frames-mips.cc b/src/mips/frames-mips.cc
index d2c717c..cdc880d 100644
--- a/src/mips/frames-mips.cc
+++ b/src/mips/frames-mips.cc
@@ -91,8 +91,7 @@
 
 
 Address InternalFrame::GetCallerStackPointer() const {
-  UNIMPLEMENTED_MIPS();
-  return static_cast<Address>(NULL);  // UNIMPLEMENTED RETURN
+  return fp() + StandardFrameConstants::kCallerSPOffset;
 }
 
 
diff --git a/src/mips/frames-mips.h b/src/mips/frames-mips.h
index ec1949d..06e9979 100644
--- a/src/mips/frames-mips.h
+++ b/src/mips/frames-mips.h
@@ -104,7 +104,7 @@
   static const int kCallerPCOffset = +1 * kPointerSize;
 
   // FP-relative displacement of the caller's SP.
-  static const int kCallerSPDisplacement = +4 * kPointerSize;
+  static const int kCallerSPDisplacement = +3 * kPointerSize;
 };
 
 
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index 6056166..8c90921 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -74,6 +74,47 @@
 
 void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
   UNIMPLEMENTED_MIPS();
+    // Registers:
+    // a2: name
+    // ra: return address
+
+  // Get the receiver of the function from the stack.
+  __ lw(a3, MemOperand(sp, argc*kPointerSize));
+
+  __ EnterInternalFrame();
+
+  // Push the receiver and the name of the function.
+  __ MultiPush(a2.bit() | a3.bit());
+
+  // Call the entry.
+  __ li(a0, Operand(2));
+  __ li(a1, Operand(ExternalReference(IC_Utility(kCallIC_Miss))));
+
+  CEntryStub stub(1);
+  __ CallStub(&stub);
+
+  // Move result to r1 and leave the internal frame.
+  __ mov(a1, v0);
+  __ LeaveInternalFrame();
+
+  // Check if the receiver is a global object of some sort.
+  Label invoke, global;
+  __ lw(a2, MemOperand(sp, argc * kPointerSize));
+  __ andi(t0, a2, kSmiTagMask);
+  __ Branch(eq, &invoke, t0, Operand(zero_reg));
+  __ GetObjectType(a2, a3, a3);
+  __ Branch(eq, &global, a3, Operand(JS_GLOBAL_OBJECT_TYPE));
+  __ Branch(ne, &invoke, a3, Operand(JS_BUILTINS_OBJECT_TYPE));
+
+  // Patch the receiver on the stack.
+  __ bind(&global);
+  __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
+  __ sw(a2, MemOperand(sp, argc * kPointerSize));
+
+  // Invoke the function.
+  ParameterCount actual(argc);
+  __ bind(&invoke);
+  __ InvokeFunction(a1, actual, JUMP_FUNCTION);
 }
 
 // Defined in ic.cc.
diff --git a/src/mips/jump-target-mips.cc b/src/mips/jump-target-mips.cc
index e8398a8..4bd9102 100644
--- a/src/mips/jump-target-mips.cc
+++ b/src/mips/jump-target-mips.cc
@@ -42,7 +42,37 @@
 #define __ ACCESS_MASM(cgen()->masm())
 
 void JumpTarget::DoJump() {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(cgen()->has_valid_frame());
+  // Live non-frame registers are not allowed at unconditional jumps
+  // because we have no way of invalidating the corresponding results
+  // which are still live in the C++ code.
+  ASSERT(cgen()->HasValidEntryRegisters());
+
+  if (is_bound()) {
+    // Backward jump.  There already a frame expectation at the target.
+    ASSERT(direction_ == BIDIRECTIONAL);
+    cgen()->frame()->MergeTo(entry_frame_);
+    cgen()->DeleteFrame();
+  } else {
+    // Use the current frame as the expected one at the target if necessary.
+    if (entry_frame_ == NULL) {
+      entry_frame_ = cgen()->frame();
+      RegisterFile empty;
+      cgen()->SetFrame(NULL, &empty);
+    } else {
+      cgen()->frame()->MergeTo(entry_frame_);
+      cgen()->DeleteFrame();
+    }
+
+    // The predicate is_linked() should be made true.  Its implementation
+    // detects the presence of a frame pointer in the reaching_frames_ list.
+    if (!is_linked()) {
+      reaching_frames_.Add(NULL);
+      ASSERT(is_linked());
+    }
+  }
+  __ b(&entry_label_);
+  __ nop();   // Branch delay slot nop.
 }
 
 
@@ -57,12 +87,47 @@
 
 
 void JumpTarget::DoBind() {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(!is_bound());
+
+  // Live non-frame registers are not allowed at the start of a basic
+  // block.
+  ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
+
+  if (cgen()->has_valid_frame()) {
+    // If there is a current frame we can use it on the fall through.
+    if (entry_frame_ == NULL) {
+      entry_frame_ = new VirtualFrame(cgen()->frame());
+    } else {
+      ASSERT(cgen()->frame()->Equals(entry_frame_));
+    }
+  } else {
+    // If there is no current frame we must have an entry frame which we can
+    // copy.
+    ASSERT(entry_frame_ != NULL);
+    RegisterFile empty;
+    cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
+  }
+
+  // The predicate is_linked() should be made false.  Its implementation
+  // detects the presence (or absence) of frame pointers in the
+  // reaching_frames_ list.  If we inserted a bogus frame to make
+  // is_linked() true, remove it now.
+  if (is_linked()) {
+    reaching_frames_.Clear();
+  }
+
+  __ bind(&entry_label_);
 }
 
 
 void BreakTarget::Jump() {
-  UNIMPLEMENTED_MIPS();
+  // On ARM we do not currently emit merge code for jumps, so we need to do
+  // it explicitly here.  The only merging necessary is to drop extra
+  // statement state from the stack.
+  ASSERT(cgen()->has_valid_frame());
+  int count = cgen()->frame()->height() - expected_height_;
+  cgen()->frame()->Drop(count);
+  DoJump();
 }
 
 
@@ -72,7 +137,26 @@
 
 
 void BreakTarget::Bind() {
-  UNIMPLEMENTED_MIPS();
+#ifdef DEBUG
+  // All the forward-reaching frames should have been adjusted at the
+  // jumps to this target.
+  for (int i = 0; i < reaching_frames_.length(); i++) {
+    ASSERT(reaching_frames_[i] == NULL ||
+           reaching_frames_[i]->height() == expected_height_);
+  }
+#endif
+  // Drop leftover statement state from the frame before merging, even
+  // on the fall through.  This is so we can bind the return target
+  // with state on the frame.
+  if (cgen()->has_valid_frame()) {
+    int count = cgen()->frame()->height() - expected_height_;
+    // On ARM we do not currently emit merge code at binding sites, so we need
+    // to do it explicitly here.  The only merging necessary is to drop extra
+    // statement state from the stack.
+    cgen()->frame()->Drop(count);
+  }
+
+  DoBind();
 }
 
 
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index e49858b..c276af5 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -55,7 +55,7 @@
 
 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
                           Condition cond, Register r1, const Operand& r2) {
-  Jump(Operand(target), cond, r1, r2);
+  Jump(Operand(target, rmode), cond, r1, r2);
 }
 
 
@@ -81,7 +81,7 @@
 
 void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
                           Condition cond, Register r1, const Operand& r2) {
-  Call(Operand(target), cond, r1, r2);
+  Call(Operand(target, rmode), cond, r1, r2);
 }
 
 
@@ -106,7 +106,7 @@
 
 void MacroAssembler::LoadRoot(Register destination,
                               Heap::RootListIndex index) {
-  lw(destination, MemOperand(s4, index << kPointerSizeLog2));
+  lw(destination, MemOperand(s6, index << kPointerSizeLog2));
 }
 
 void MacroAssembler::LoadRoot(Register destination,
@@ -114,8 +114,7 @@
                               Condition cond,
                               Register src1, const Operand& src2) {
   Branch(NegateCondition(cond), 2, src1, src2);
-  nop();
-  lw(destination, MemOperand(s4, index << kPointerSizeLog2));
+  lw(destination, MemOperand(s6, index << kPointerSizeLog2));
 }
 
 
@@ -320,7 +319,6 @@
 }
 
 
-// load wartd in a register
 void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
   ASSERT(!j.is_reg());
 
@@ -372,7 +370,7 @@
   int16_t NumToPush = NumberOfBitsSet(regs);
 
   addiu(sp, sp, -4 * NumToPush);
-  for (int16_t i = 0; i < kNumRegisters; i++) {
+  for (int16_t i = kNumRegisters; i > 0; i--) {
     if ((regs & (1 << i)) != 0) {
       sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
     }
@@ -385,7 +383,7 @@
   int16_t NumToPush = NumberOfBitsSet(regs);
 
   addiu(sp, sp, -4 * NumToPush);
-  for (int16_t i = kNumRegisters; i > 0; i--) {
+  for (int16_t i = 0; i < kNumRegisters; i++) {
     if ((regs & (1 << i)) != 0) {
       sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
     }
@@ -396,7 +394,7 @@
 void MacroAssembler::MultiPop(RegList regs) {
   int16_t NumSaved = 0;
 
-  for (int16_t i = kNumRegisters; i > 0; i--) {
+  for (int16_t i = 0; i < kNumRegisters; i++) {
     if ((regs & (1 << i)) != 0) {
       lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
     }
@@ -408,7 +406,7 @@
 void MacroAssembler::MultiPopReversed(RegList regs) {
   int16_t NumSaved = 0;
 
-  for (int16_t i = 0; i < kNumRegisters; i++) {
+  for (int16_t i = kNumRegisters; i > 0; i--) {
     if ((regs & (1 << i)) != 0) {
       lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
     }
@@ -484,6 +482,8 @@
     default:
       UNREACHABLE();
   }
+  // Emit a nop in the branch delay slot.
+  nop();
 }
 
 
@@ -550,6 +550,8 @@
     default:
       UNREACHABLE();
   }
+  // Emit a nop in the branch delay slot.
+  nop();
 }
 
 
@@ -629,6 +631,8 @@
     default:
       UNREACHABLE();
   }
+  // Emit a nop in the branch delay slot.
+  nop();
 }
 
 
@@ -704,6 +708,8 @@
     default:
       UNREACHABLE();
   }
+  // Emit a nop in the branch delay slot.
+  nop();
 }
 
 
@@ -714,7 +720,6 @@
       jr(target.rm());
     } else {
       Branch(NegateCondition(cond), 2, rs, rt);
-      nop();
       jr(target.rm());
     }
   } else {    // !target.is_reg()
@@ -723,20 +728,20 @@
         j(target.imm32_);
       } else {
         Branch(NegateCondition(cond), 2, rs, rt);
-        nop();
-        j(target.imm32_);  // will generate only one instruction.
+        j(target.imm32_);  // Will generate only one instruction.
       }
     } else {  // MustUseAt(target)
-      li(at, rt);
+      li(at, target);
       if (cond == cc_always) {
         jr(at);
       } else {
         Branch(NegateCondition(cond), 2, rs, rt);
-        nop();
-        jr(at);  // will generate only one instruction.
+        jr(at);  // Will generate only one instruction.
       }
     }
   }
+  // Emit a nop in the branch delay slot.
+  nop();
 }
 
 
@@ -747,7 +752,6 @@
       jalr(target.rm());
     } else {
       Branch(NegateCondition(cond), 2, rs, rt);
-      nop();
       jalr(target.rm());
     }
   } else {    // !target.is_reg()
@@ -756,20 +760,20 @@
         jal(target.imm32_);
       } else {
         Branch(NegateCondition(cond), 2, rs, rt);
-        nop();
-        jal(target.imm32_);  // will generate only one instruction.
+        jal(target.imm32_);  // Will generate only one instruction.
       }
     } else {  // MustUseAt(target)
-      li(at, rt);
+      li(at, target);
       if (cond == cc_always) {
         jalr(at);
       } else {
         Branch(NegateCondition(cond), 2, rs, rt);
-        nop();
-        jalr(at);  // will generate only one instruction.
+        jalr(at);  // Will generate only one instruction.
       }
     }
   }
+  // Emit a nop in the branch delay slot.
+  nop();
 }
 
 void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
@@ -802,7 +806,58 @@
 
 void MacroAssembler::PushTryHandler(CodeLocation try_location,
                                     HandlerType type) {
-  UNIMPLEMENTED_MIPS();
+  // Adjust this code if not the case.
+  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+  // The return address is passed in register ra.
+  if (try_location == IN_JAVASCRIPT) {
+    if (type == TRY_CATCH_HANDLER) {
+      li(t0, Operand(StackHandler::TRY_CATCH));
+    } else {
+      li(t0, Operand(StackHandler::TRY_FINALLY));
+    }
+    ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
+           && StackHandlerConstants::kFPOffset == 2 * kPointerSize
+           && StackHandlerConstants::kPCOffset == 3 * kPointerSize
+           && StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+    // Save the current handler as the next handler.
+    LoadExternalReference(t2, ExternalReference(Top::k_handler_address));
+    lw(t1, MemOperand(t2));
+
+    addiu(sp, sp, -StackHandlerConstants::kSize);
+    sw(ra, MemOperand(sp, 12));
+    sw(fp, MemOperand(sp, 8));
+    sw(t0, MemOperand(sp, 4));
+    sw(t1, MemOperand(sp, 0));
+
+    // Link this handler as the new current one.
+    sw(sp, MemOperand(t2));
+
+  } else {
+    // Must preserve a0-a3, and s0 (argv).
+    ASSERT(try_location == IN_JS_ENTRY);
+    ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
+           && StackHandlerConstants::kFPOffset == 2 * kPointerSize
+           && StackHandlerConstants::kPCOffset == 3 * kPointerSize
+           && StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+
+    // The frame pointer does not point to a JS frame so we save NULL
+    // for fp. We expect the code throwing an exception to check fp
+    // before dereferencing it to restore the context.
+    li(t0, Operand(StackHandler::ENTRY));
+
+    // Save the current handler as the next handler.
+    LoadExternalReference(t2, ExternalReference(Top::k_handler_address));
+    lw(t1, MemOperand(t2));
+
+    addiu(sp, sp, -StackHandlerConstants::kSize);
+    sw(ra, MemOperand(sp, 12));
+    sw(zero_reg, MemOperand(sp, 8));
+    sw(t0, MemOperand(sp, 4));
+    sw(t1, MemOperand(sp, 0));
+
+    // Link this handler as the new current one.
+    sw(sp, MemOperand(t2));
+  }
 }
 
 
@@ -812,12 +867,233 @@
 
 
 
-// ---------------------------------------------------------------------------
+// -----------------------------------------------------------------------------
 // Activation frames
 
+void MacroAssembler::SetupAlignedCall(Register scratch, int arg_count) {
+  Label extra_push, end;
+
+  andi(scratch, sp, 7);
+
+  // We check for args and receiver size on the stack, all of them word sized.
+  // We add one for sp, that we also want to store on the stack.
+  if (((arg_count + 1) % kPointerSizeLog2) == 0) {
+    Branch(ne, &extra_push, at, Operand(zero_reg));
+  } else {  // ((arg_count + 1) % 2) == 1
+    Branch(eq, &extra_push, at, Operand(zero_reg));
+  }
+
+  // Save sp on the stack.
+  mov(scratch, sp);
+  Push(scratch);
+  b(&end);
+
+  // Align before saving sp on the stack.
+  bind(&extra_push);
+  mov(scratch, sp);
+  addiu(sp, sp, -8);
+  sw(scratch, MemOperand(sp));
+
+  // The stack is aligned and sp is stored on the top.
+  bind(&end);
+}
+
+
+void MacroAssembler::ReturnFromAlignedCall() {
+  lw(sp, MemOperand(sp));
+}
+
+
+// -----------------------------------------------------------------------------
+// JavaScript invokes
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+                                    const ParameterCount& actual,
+                                    Handle<Code> code_constant,
+                                    Register code_reg,
+                                    Label* done,
+                                    InvokeFlag flag) {
+  bool definitely_matches = false;
+  Label regular_invoke;
+
+  // Check whether the expected and actual arguments count match. If not,
+  // setup registers according to contract with ArgumentsAdaptorTrampoline:
+  //  a0: actual arguments count
+  //  a1: function (passed through to callee)
+  //  a2: expected arguments count
+  //  a3: callee code entry
+
+  // The code below is made a lot easier because the calling code already sets
+  // up actual and expected registers according to the contract if values are
+  // passed in registers.
+  ASSERT(actual.is_immediate() || actual.reg().is(a0));
+  ASSERT(expected.is_immediate() || expected.reg().is(a2));
+  ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
+
+  if (expected.is_immediate()) {
+    ASSERT(actual.is_immediate());
+    if (expected.immediate() == actual.immediate()) {
+      definitely_matches = true;
+    } else {
+      li(a0, Operand(actual.immediate()));
+      const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+      if (expected.immediate() == sentinel) {
+        // Don't worry about adapting arguments for builtins that
+        // don't want that done. Skip adaption code by making it look
+        // like we have a match between expected and actual number of
+        // arguments.
+        definitely_matches = true;
+      } else {
+        li(a2, Operand(expected.immediate()));
+      }
+    }
+  } else if (actual.is_immediate()) {
+    Branch(eq, &regular_invoke, expected.reg(), Operand(actual.immediate()));
+    li(a0, Operand(actual.immediate()));
+  } else {
+    Branch(eq, &regular_invoke, expected.reg(), Operand(actual.reg()));
+  }
+
+  if (!definitely_matches) {
+    if (!code_constant.is_null()) {
+      li(a3, Operand(code_constant));
+      addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
+    }
+
+    ExternalReference adaptor(Builtins::ArgumentsAdaptorTrampoline);
+    if (flag == CALL_FUNCTION) {
+      CallBuiltin(adaptor);
+      b(done);
+      nop();
+    } else {
+      JumpToBuiltin(adaptor);
+    }
+    bind(&regular_invoke);
+  }
+}
+
+void MacroAssembler::InvokeCode(Register code,
+                                const ParameterCount& expected,
+                                const ParameterCount& actual,
+                                InvokeFlag flag) {
+  Label done;
+
+  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
+  if (flag == CALL_FUNCTION) {
+    Call(code);
+  } else {
+    ASSERT(flag == JUMP_FUNCTION);
+    Jump(code);
+  }
+  // Continue here if InvokePrologue does handle the invocation due to
+  // mismatched parameter counts.
+  bind(&done);
+}
+
+
+void MacroAssembler::InvokeCode(Handle<Code> code,
+                                const ParameterCount& expected,
+                                const ParameterCount& actual,
+                                RelocInfo::Mode rmode,
+                                InvokeFlag flag) {
+  Label done;
+
+  InvokePrologue(expected, actual, code, no_reg, &done, flag);
+  if (flag == CALL_FUNCTION) {
+    Call(code, rmode);
+  } else {
+    Jump(code, rmode);
+  }
+  // Continue here if InvokePrologue does handle the invocation due to
+  // mismatched parameter counts.
+  bind(&done);
+}
+
+
+void MacroAssembler::InvokeFunction(Register function,
+                                    const ParameterCount& actual,
+                                    InvokeFlag flag) {
+  // Contract with called JS functions requires that function is passed in a1.
+  ASSERT(function.is(a1));
+  Register expected_reg = a2;
+  Register code_reg = a3;
+
+  lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+  lw(expected_reg,
+      FieldMemOperand(code_reg,
+                      SharedFunctionInfo::kFormalParameterCountOffset));
+  lw(code_reg,
+      MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
+  addiu(code_reg, code_reg, Code::kHeaderSize - kHeapObjectTag);
+
+  ParameterCount expected(expected_reg);
+  InvokeCode(code_reg, expected, actual, flag);
+}
+
+
+// ---------------------------------------------------------------------------
+// Support functions.
+
+  void MacroAssembler::GetObjectType(Register function,
+                                     Register map,
+                                     Register type_reg) {
+    lw(map, FieldMemOperand(function, HeapObject::kMapOffset));
+    lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  }
+
+
+  void MacroAssembler::CallBuiltin(ExternalReference builtin_entry) {
+    // Load builtin address.
+    LoadExternalReference(t9, builtin_entry);
+    lw(t9, MemOperand(t9));  // Deref address.
+    addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
+    // Call and allocate arguments slots.
+    jalr(t9);
+    // Use the branch delay slot to allocated argument slots.
+    addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
+    addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize);
+  }
+
+
+  void MacroAssembler::CallBuiltin(Register target) {
+    // Target already holds target address.
+    // Call and allocate arguments slots.
+    jalr(target);
+    // Use the branch delay slot to allocated argument slots.
+    addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
+    addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize);
+  }
+
+
+  void MacroAssembler::JumpToBuiltin(ExternalReference builtin_entry) {
+    // Load builtin address.
+    LoadExternalReference(t9, builtin_entry);
+    lw(t9, MemOperand(t9));  // Deref address.
+    addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
+    // Call and allocate arguments slots.
+    jr(t9);
+    // Use the branch delay slot to allocated argument slots.
+    addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
+  }
+
+
+  void MacroAssembler::JumpToBuiltin(Register target) {
+    // t9 already holds target address.
+    // Call and allocate arguments slots.
+    jr(t9);
+    // Use the branch delay slot to allocated argument slots.
+    addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
+  }
+
+
+// -----------------------------------------------------------------------------
+// Runtime calls
+
 void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
                               Register r1, const Operand& r2) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2);
 }
 
 
@@ -826,13 +1102,38 @@
 }
 
 
+void MacroAssembler::IllegalOperation(int num_arguments) {
+  if (num_arguments > 0) {
+    addiu(sp, sp, num_arguments * kPointerSize);
+  }
+  LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+}
+
+
 void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
-  UNIMPLEMENTED_MIPS();
+  // All parameters are on the stack. v0 has the return value after call.
+
+  // If the expected number of arguments of the runtime function is
+  // constant, we check that the actual number of arguments match the
+  // expectation.
+  if (f->nargs >= 0 && f->nargs != num_arguments) {
+    IllegalOperation(num_arguments);
+    return;
+  }
+
+  // TODO(1236192): Most runtime routines don't need the number of
+  // arguments passed in because it is constant. At some point we
+  // should remove this need and make the runtime routine entry code
+  // smarter.
+  li(a0, num_arguments);
+  LoadExternalReference(a1, ExternalReference(f));
+  CEntryStub stub(1);
+  CallStub(&stub);
 }
 
 
 void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
-  UNIMPLEMENTED_MIPS();
+  CallRuntime(Runtime::FunctionForId(fid), num_arguments);
 }
 
 
@@ -891,6 +1192,8 @@
 }
 
 
+// -----------------------------------------------------------------------------
+// Debugging
 
 void MacroAssembler::Assert(Condition cc, const char* msg,
                             Register rs, Operand rt) {
@@ -908,5 +1211,113 @@
   UNIMPLEMENTED_MIPS();
 }
 
+
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
+  addiu(sp, sp, -5 * kPointerSize);
+  li(t0, Operand(Smi::FromInt(type)));
+  li(t1, Operand(CodeObject()));
+  sw(ra, MemOperand(sp, 4 * kPointerSize));
+  sw(fp, MemOperand(sp, 3 * kPointerSize));
+  sw(cp, MemOperand(sp, 2 * kPointerSize));
+  sw(t0, MemOperand(sp, 1 * kPointerSize));
+  sw(t1, MemOperand(sp, 0 * kPointerSize));
+  addiu(fp, sp, 3 * kPointerSize);
+}
+
+
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
+  mov(sp, fp);
+  lw(fp, MemOperand(sp, 0 * kPointerSize));
+  lw(ra, MemOperand(sp, 1 * kPointerSize));
+  addiu(sp, sp, 2 * kPointerSize);
+}
+
+
+void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode,
+                                    Register hold_argc,
+                                    Register hold_argv,
+                                    Register hold_function) {
+  // Compute the argv pointer and keep it in a callee-saved register.
+  // a0 is argc.
+  sll(t0, a0, kPointerSizeLog2);
+  add(hold_argv, sp, t0);
+  addi(hold_argv, hold_argv, -kPointerSize);
+
+  // Compute callee's stack pointer before making changes and save it as
+  // t1 register so that it is restored as sp register on exit, thereby
+  // popping the args.
+  // t1 = sp + kPointerSize * #args
+  add(t1, sp, t0);
+
+  // Align the stack at this point.
+  AlignStack(0);
+
+  // Save registers.
+  addiu(sp, sp, -12);
+  sw(t1, MemOperand(sp, 8));
+  sw(ra, MemOperand(sp, 4));
+  sw(fp, MemOperand(sp, 0));
+  mov(fp, sp);  // Setup new frame pointer.
+
+  // Push debug marker.
+  if (mode == ExitFrame::MODE_DEBUG) {
+    Push(zero_reg);
+  } else {
+    li(t0, Operand(CodeObject()));
+    Push(t0);
+  }
+
+  // Save the frame pointer and the context in top.
+  LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
+  sw(fp, MemOperand(t0));
+  LoadExternalReference(t0, ExternalReference(Top::k_context_address));
+  sw(cp, MemOperand(t0));
+
+  // Setup argc and the builtin function in callee-saved registers.
+  mov(hold_argc, a0);
+  mov(hold_function, a1);
+}
+
+
+void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
+  // Clear top frame.
+  LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
+  sw(zero_reg, MemOperand(t0));
+
+  // Restore current context from top and clear it in debug mode.
+  LoadExternalReference(t0, ExternalReference(Top::k_context_address));
+  lw(cp, MemOperand(t0));
+#ifdef DEBUG
+  sw(a3, MemOperand(t0));
+#endif
+
+  // Pop the arguments, restore registers, and return.
+  mov(sp, fp);  // Respect ABI stack constraint.
+  lw(fp, MemOperand(sp, 0));
+  lw(ra, MemOperand(sp, 4));
+  lw(sp, MemOperand(sp, 8));
+  jr(ra);
+  nop();  // Branch delay slot nop.
+}
+
+
+void MacroAssembler::AlignStack(int offset) {
+  // On MIPS an offset of 0 aligns to 0 modulo 8 bytes,
+  //     and an offset of 1 aligns to 4 modulo 8 bytes.
+  int activation_frame_alignment = OS::ActivationFrameAlignment();
+  if (activation_frame_alignment != kPointerSize) {
+    // This code needs to be made more general if this assert doesn't hold.
+    ASSERT(activation_frame_alignment == 2 * kPointerSize);
+    if (offset == 0) {
+      andi(t0, sp, activation_frame_alignment - 1);
+      Push(zero_reg, eq, t0, zero_reg);
+    } else {
+      andi(t0, sp, activation_frame_alignment - 1);
+      addiu(t0, t0, -4);
+      Push(zero_reg, eq, t0, zero_reg);
+    }
+  }
+}
+
 } }  // namespace v8::internal
 
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index b34488c..0f0365b 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -41,6 +41,7 @@
 // unless we know exactly what we do.
 
 // Registers aliases
+// cp is assumed to be a callee saved register.
 const Register cp = s7;     // JavaScript context pointer
 const Register fp = s8_fp;  // Alias fp
 
@@ -102,10 +103,10 @@
   // Jump unconditionally to given label.
   // We NEED a nop in the branch delay slot, as it used by v8, for example in
   // CodeGenerator::ProcessDeferred().
+  // Currently the branch delay slot is filled by the MacroAssembler.
   // Use rather b(Label) for code generation.
   void jmp(Label* L) {
     Branch(cc_always, L);
-    nop();
   }
 
   // Load an object from the root table.
@@ -115,6 +116,11 @@
                 Heap::RootListIndex index,
                 Condition cond, Register src1, const Operand& src2);
 
+  // Load an external reference.
+  void LoadExternalReference(Register reg, ExternalReference ext) {
+    li(reg, Operand(ext));
+  }
+
   // Sets the remembered set bit for [address+offset].
   void RecordWrite(Register object, Register offset, Register scratch);
 
@@ -191,7 +197,6 @@
   void Push(Register src, Condition cond, Register tst1, Register tst2) {
     // Since we don't have conditionnal execution we use a Branch.
     Branch(cond, 3, tst1, Operand(tst2));
-    nop();
     Addu(sp, sp, Operand(-kPointerSize));
     sw(src, MemOperand(sp, 0));
   }
@@ -209,6 +214,53 @@
   }
 
 
+  // ---------------------------------------------------------------------------
+  // Activation frames
+
+  void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
+  void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
+
+  // Enter specific kind of exit frame; either EXIT or
+  // EXIT_DEBUG. Expects the number of arguments in register a0 and
+  // the builtin function to call in register a1.
+  // On output hold_argc, hold_function, and hold_argv are setup.
+  void EnterExitFrame(ExitFrame::Mode mode,
+                      Register hold_argc,
+                      Register hold_argv,
+                      Register hold_function);
+
+  // Leave the current exit frame. Expects the return value in v0.
+  void LeaveExitFrame(ExitFrame::Mode mode);
+
+  // Align the stack by optionally pushing a Smi zero.
+  void AlignStack(int offset);
+
+  void SetupAlignedCall(Register scratch, int arg_count = 0);
+  void ReturnFromAlignedCall();
+
+
+  // ---------------------------------------------------------------------------
+  // JavaScript invokes
+
+  // Invoke the JavaScript function code by either calling or jumping.
+  void InvokeCode(Register code,
+                  const ParameterCount& expected,
+                  const ParameterCount& actual,
+                  InvokeFlag flag);
+
+  void InvokeCode(Handle<Code> code,
+                  const ParameterCount& expected,
+                  const ParameterCount& actual,
+                  RelocInfo::Mode rmode,
+                  InvokeFlag flag);
+
+  // Invoke the JavaScript function in the given register. Changes the
+  // current context to the context in the function before invoking.
+  void InvokeFunction(Register function,
+                      const ParameterCount& actual,
+                      InvokeFlag flag);
+
+
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // ---------------------------------------------------------------------------
   // Debugger Support
@@ -227,8 +279,7 @@
   // Exception handling
 
   // Push a new try handler and link into try handler chain.
-  // The return address must be passed in register lr.
-  // On exit, r0 contains TOS (code slot).
+  // The return address must be passed in register ra.
   void PushTryHandler(CodeLocation try_location, HandlerType type);
 
   // Unlink the stack handler on top of the stack from the try handler chain.
@@ -239,6 +290,10 @@
   // ---------------------------------------------------------------------------
   // Support functions.
 
+  void GetObjectType(Register function,
+                     Register map,
+                     Register type_reg);
+
   inline void BranchOnSmi(Register value, Label* smi_label,
                           Register scratch = at) {
     ASSERT_EQ(0, kSmiTag);
@@ -254,6 +309,15 @@
     Branch(ne, not_smi_label, scratch, Operand(zero_reg));
   }
 
+  void CallBuiltin(ExternalReference builtin_entry);
+  void CallBuiltin(Register target);
+  void JumpToBuiltin(ExternalReference builtin_entry);
+  void JumpToBuiltin(Register target);
+
+  // Generates code for reporting that an illegal operation has
+  // occurred.
+  void IllegalOperation(int num_arguments);
+
 
   // ---------------------------------------------------------------------------
   // Runtime calls
@@ -342,20 +406,33 @@
   bool allow_stub_calls() { return allow_stub_calls_; }
 
  private:
-  void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
-            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
-  void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
-            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
-
-  // Get the code for the given builtin. Returns if able to resolve
-  // the function in the 'resolved' flag.
-  Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
-
   List<Unresolved> unresolved_;
   bool generating_stub_;
   bool allow_stub_calls_;
   // This handle will be patched with the code object on installation.
   Handle<Object> code_object_;
+
+  void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
+            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+  void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
+            Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+
+  // Helper functions for generating invokes.
+  void InvokePrologue(const ParameterCount& expected,
+                      const ParameterCount& actual,
+                      Handle<Code> code_constant,
+                      Register code_reg,
+                      Label* done,
+                      InvokeFlag flag);
+
+  // Get the code for the given builtin. Returns if able to resolve
+  // the function in the 'resolved' flag.
+  Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
+
+  // Activation support.
+  // EnterFrame clobbers t0 and t1.
+  void EnterFrame(StackFrame::Type type);
+  void LeaveFrame(StackFrame::Type type);
 };
 
 
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 669fdaa..0b2d2c3 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -160,8 +160,31 @@
 
 
 Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
-  UNIMPLEMENTED_MIPS();
-  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+  // Registers:
+  // a1: function
+  // ra: return address
+
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+  // Preserve the function.
+  __ Push(a1);
+  // Setup aligned call.
+  __ SetupAlignedCall(t0, 1);
+  // Push the function on the stack as the argument to the runtime function.
+  __ Push(a1);
+  // Call the runtime function
+  __ CallRuntime(Runtime::kLazyCompile, 1);
+  __ ReturnFromAlignedCall();
+  // Calculate the entry point.
+  __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
+  // Restore saved function.
+  __ Pop(a1);
+  // Tear down temporary frame.
+  __ LeaveInternalFrame();
+  // Do a tail-call of the compiled function.
+  __ Jump(t9);
+
+  return GetCodeWithFlags(flags, "LazyCompileStub");
 }
 
 
@@ -174,6 +197,26 @@
 }
 
 
+Object* CallStubCompiler::CompileArrayPushCall(Object* object,
+                                               JSObject* holder,
+                                               JSFunction* function,
+                                               String* name,
+                                               CheckType check) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
+Object* CallStubCompiler::CompileArrayPopCall(Object* object,
+                                              JSObject* holder,
+                                              JSFunction* function,
+                                              String* name,
+                                              CheckType check) {
+  UNIMPLEMENTED_MIPS();
+  return reinterpret_cast<Object*>(NULL);   // UNIMPLEMENTED RETURN
+}
+
+
 Object* CallStubCompiler::CompileCallConstant(Object* object,
                                               JSObject* holder,
                                               JSFunction* function,
diff --git a/src/mips/virtual-frame-mips.cc b/src/mips/virtual-frame-mips.cc
index e89882f..c2116de 100644
--- a/src/mips/virtual-frame-mips.cc
+++ b/src/mips/virtual-frame-mips.cc
@@ -53,7 +53,12 @@
 
 
 void VirtualFrame::SyncRange(int begin, int end) {
-  UNIMPLEMENTED_MIPS();
+  // All elements are in memory on MIPS (ie, synced).
+#ifdef DEBUG
+  for (int i = begin; i <= end; i++) {
+    ASSERT(elements_[i].is_synced());
+  }
+#endif
 }
 
 
@@ -63,7 +68,13 @@
 
 
 void VirtualFrame::Enter() {
-  UNIMPLEMENTED_MIPS();
+  // TODO(MIPS): Implement DEBUG
+
+  // We are about to push four values to the frame.
+  Adjust(4);
+  __ MultiPush(ra.bit() | fp.bit() | cp.bit() | a1.bit());
+  // Adjust FP to point to saved FP.
+  __ addiu(fp, sp, 2 * kPointerSize);
 }
 
 
@@ -73,7 +84,17 @@
 
 
 void VirtualFrame::AllocateStackSlots() {
-  UNIMPLEMENTED_MIPS();
+  int count = local_count();
+  if (count > 0) {
+    Comment cmnt(masm(), "[ Allocate space for locals");
+    Adjust(count);
+      // Initialize stack slots with 'undefined' value.
+    __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+    __ addiu(sp, sp, -count * kPointerSize);
+    for (int i = 0; i < count; i++) {
+      __ sw(t0, MemOperand(sp, (count-i-1)*kPointerSize));
+    }
+  }
 }
 
 
@@ -128,12 +149,16 @@
 
 
 void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
-  UNIMPLEMENTED_MIPS();
+  PrepareForCall(arg_count, arg_count);
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ CallRuntime(f, arg_count);
 }
 
 
 void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
-  UNIMPLEMENTED_MIPS();
+  PrepareForCall(arg_count, arg_count);
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ CallRuntime(id, arg_count);
 }
 
 
@@ -155,16 +180,37 @@
 }
 
 
-void VirtualFrame::RawCallCodeObject(Handle<Code> code,
-                                       RelocInfo::Mode rmode) {
-  UNIMPLEMENTED_MIPS();
-}
-
-
 void VirtualFrame::CallCodeObject(Handle<Code> code,
                                   RelocInfo::Mode rmode,
                                   int dropped_args) {
-  UNIMPLEMENTED_MIPS();
+  switch (code->kind()) {
+    case Code::CALL_IC:
+      break;
+    case Code::FUNCTION:
+      UNIMPLEMENTED_MIPS();
+      break;
+    case Code::KEYED_LOAD_IC:
+      UNIMPLEMENTED_MIPS();
+      break;
+    case Code::LOAD_IC:
+      UNIMPLEMENTED_MIPS();
+      break;
+    case Code::KEYED_STORE_IC:
+      UNIMPLEMENTED_MIPS();
+      break;
+    case Code::STORE_IC:
+      UNIMPLEMENTED_MIPS();
+      break;
+    case Code::BUILTIN:
+      UNIMPLEMENTED_MIPS();
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  Forget(dropped_args);
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ Call(code, rmode);
 }
 
 
@@ -187,7 +233,24 @@
 
 
 void VirtualFrame::Drop(int count) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(count >= 0);
+  ASSERT(height() >= count);
+  int num_virtual_elements = (element_count() - 1) - stack_pointer_;
+
+  // Emit code to lower the stack pointer if necessary.
+  if (num_virtual_elements < count) {
+    int num_dropped = count - num_virtual_elements;
+    stack_pointer_ -= num_dropped;
+    __ addiu(sp, sp, num_dropped * kPointerSize);
+  }
+
+  // Discard elements from the virtual frame and free any registers.
+  for (int i = 0; i < count; i++) {
+    FrameElement dropped = elements_.RemoveLast();
+    if (dropped.is_register()) {
+      Unuse(dropped.reg());
+    }
+  }
 }
 
 
@@ -199,27 +262,50 @@
 Result VirtualFrame::Pop() {
   UNIMPLEMENTED_MIPS();
   Result res = Result();
-  return res;    // UNIMPLEMENTED RETUR
+  return res;    // UNIMPLEMENTED RETURN
 }
 
 
 void VirtualFrame::EmitPop(Register reg) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(stack_pointer_ == element_count() - 1);
+  stack_pointer_--;
+  elements_.RemoveLast();
+  __ Pop(reg);
 }
 
+
 void VirtualFrame::EmitMultiPop(RegList regs) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(stack_pointer_ == element_count() - 1);
+  for (int16_t i = 0; i < kNumRegisters; i++) {
+    if ((regs & (1 << i)) != 0) {
+      stack_pointer_--;
+      elements_.RemoveLast();
+    }
+  }
+  __ MultiPop(regs);
 }
 
 
 void VirtualFrame::EmitPush(Register reg) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(stack_pointer_ == element_count() - 1);
+  elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown()));
+  stack_pointer_++;
+  __ Push(reg);
 }
 
+
 void VirtualFrame::EmitMultiPush(RegList regs) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(stack_pointer_ == element_count() - 1);
+  for (int16_t i = kNumRegisters; i > 0; i--) {
+    if ((regs & (1 << i)) != 0) {
+      elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown()));
+      stack_pointer_++;
+    }
+  }
+  __ MultiPush(regs);
 }
 
+
 void VirtualFrame::EmitArgumentSlots(RegList reglist) {
   UNIMPLEMENTED_MIPS();
 }
diff --git a/src/mips/virtual-frame-mips.h b/src/mips/virtual-frame-mips.h
index 77c795c..b32e2ae 100644
--- a/src/mips/virtual-frame-mips.h
+++ b/src/mips/virtual-frame-mips.h
@@ -39,18 +39,18 @@
 // -------------------------------------------------------------------------
 // Virtual frames
 //
-// The virtual frame is an abstraction of the physical stack frame.  It
+// The virtual frame is an abstraction of the physical stack frame. It
 // encapsulates the parameters, frame-allocated locals, and the expression
-// stack.  It supports push/pop operations on the expression stack, as well
+// stack. It supports push/pop operations on the expression stack, as well
 // as random access to the expression stack elements, locals, and
 // parameters.
 
 class VirtualFrame : public ZoneObject {
  public:
   // A utility class to introduce a scope where the virtual frame is
-  // expected to remain spilled.  The constructor spills the code
+  // expected to remain spilled. The constructor spills the code
   // generator's current frame, but no attempt is made to require it
-  // to stay spilled.  It is intended as documentation while the code
+  // to stay spilled. It is intended as documentation while the code
   // generator is being transformed.
   class SpilledScope BASE_EMBEDDED {
    public:
@@ -105,12 +105,12 @@
   }
 
   // Add extra in-memory elements to the top of the frame to match an actual
-  // frame (eg, the frame after an exception handler is pushed).  No code is
+  // frame (eg, the frame after an exception handler is pushed). No code is
   // emitted.
   void Adjust(int count);
 
   // Forget elements from the top of the frame to match an actual frame (eg,
-  // the frame after a runtime call).  No code is emitted.
+  // the frame after a runtime call). No code is emitted.
   void Forget(int count) {
     ASSERT(count >= 0);
     ASSERT(stack_pointer_ == element_count() - 1);
@@ -121,7 +121,7 @@
   }
 
   // Forget count elements from the top of the frame and adjust the stack
-  // pointer downward.  This is used, for example, before merging frames at
+  // pointer downward. This is used, for example, before merging frames at
   // break, continue, and return targets.
   void ForgetElements(int count);
 
@@ -133,24 +133,24 @@
     if (is_used(reg)) SpillElementAt(register_location(reg));
   }
 
-  // Spill all occurrences of an arbitrary register if possible.  Return the
+  // Spill all occurrences of an arbitrary register if possible. Return the
   // register spilled or no_reg if it was not possible to free any register
   // (ie, they all have frame-external references).
   Register SpillAnyRegister();
 
   // Prepare this virtual frame for merging to an expected frame by
   // performing some state changes that do not require generating
-  // code.  It is guaranteed that no code will be generated.
+  // code. It is guaranteed that no code will be generated.
   void PrepareMergeTo(VirtualFrame* expected);
 
   // Make this virtual frame have a state identical to an expected virtual
-  // frame.  As a side effect, code may be emitted to make this frame match
+  // frame. As a side effect, code may be emitted to make this frame match
   // the expected one.
   void MergeTo(VirtualFrame* expected);
 
-  // Detach a frame from its code generator, perhaps temporarily.  This
+  // Detach a frame from its code generator, perhaps temporarily. This
   // tells the register allocator that it is free to use frame-internal
-  // registers.  Used when the code generator's frame is switched from this
+  // registers. Used when the code generator's frame is switched from this
   // one to NULL by an unconditional jump.
   void DetachFromCodeGenerator() {
     RegisterAllocator* cgen_allocator = cgen()->allocator();
@@ -159,7 +159,7 @@
     }
   }
 
-  // (Re)attach a frame to its code generator.  This informs the register
+  // (Re)attach a frame to its code generator. This informs the register
   // allocator that the frame-internal register references are active again.
   // Used when a code generator's frame is switched from NULL to this one by
   // binding a label.
@@ -170,17 +170,17 @@
     }
   }
 
-  // Emit code for the physical JS entry and exit frame sequences.  After
+  // Emit code for the physical JS entry and exit frame sequences. After
   // calling Enter, the virtual frame is ready for use; and after calling
-  // Exit it should not be used.  Note that Enter does not allocate space in
+  // Exit it should not be used. Note that Enter does not allocate space in
   // the physical frame for storing frame-allocated locals.
   void Enter();
   void Exit();
 
   // Prepare for returning from the frame by spilling locals and
-  // dropping all non-locals elements in the virtual frame.  This
+  // dropping all non-locals elements in the virtual frame. This
   // avoids generating unnecessary merge code when jumping to the
-  // shared return site.  Emits code for spills.
+  // shared return site. Emits code for spills.
   void PrepareForReturn();
 
   // Allocate and initialize the frame-allocated locals.
@@ -194,11 +194,11 @@
     return MemOperand(sp, index * kPointerSize);
   }
 
-  // Random-access store to a frame-top relative frame element.  The result
+  // Random-access store to a frame-top relative frame element. The result
   // becomes owned by the frame and is invalidated.
   void SetElementAt(int index, Result* value);
 
-  // Set a frame element to a constant.  The index is frame-top relative.
+  // Set a frame element to a constant. The index is frame-top relative.
   void SetElementAt(int index, Handle<Object> value) {
     Result temp(value);
     SetElementAt(index, &temp);
@@ -221,13 +221,13 @@
   }
 
   // Push the value of a local frame slot on top of the frame and invalidate
-  // the local slot.  The slot should be written to before trying to read
+  // the local slot. The slot should be written to before trying to read
   // from it again.
   void TakeLocalAt(int index) {
     TakeFrameSlotAt(local0_index() + index);
   }
 
-  // Store the top value on the virtual frame into a local frame slot.  The
+  // Store the top value on the virtual frame into a local frame slot. The
   // value is left in place on top of the frame.
   void StoreToLocalAt(int index) {
     StoreToFrameSlotAt(local0_index() + index);
@@ -267,7 +267,7 @@
   }
 
   // Push the value of a paramter frame slot on top of the frame and
-  // invalidate the parameter slot.  The slot should be written to before
+  // invalidate the parameter slot. The slot should be written to before
   // trying to read from it again.
   void TakeParameterAt(int index) {
     TakeFrameSlotAt(param0_index() + index);
@@ -292,12 +292,8 @@
     RawCallStub(stub);
   }
 
-  // Call stub that expects its argument in r0.  The argument is given
-  // as a result which must be the register r0.
   void CallStub(CodeStub* stub, Result* arg);
 
-  // Call stub that expects its arguments in r1 and r0.  The arguments
-  // are given as results which must be the appropriate registers.
   void CallStub(CodeStub* stub, Result* arg0, Result* arg1);
 
   // Call runtime given the number of arguments expected on (and
@@ -317,7 +313,7 @@
                      int arg_count);
 
   // Call into an IC stub given the number of arguments it removes
-  // from the stack.  Register arguments are passed as results and
+  // from the stack. Register arguments are passed as results and
   // consumed by the call.
   void CallCodeObject(Handle<Code> ic,
                       RelocInfo::Mode rmode,
@@ -333,8 +329,8 @@
                       int dropped_args,
                       bool set_auto_args_slots = false);
 
-  // Drop a number of elements from the top of the expression stack.  May
-  // emit code to affect the physical frame.  Does not clobber any registers
+  // Drop a number of elements from the top of the expression stack. May
+  // emit code to affect the physical frame. Does not clobber any registers
   // excepting possibly the stack pointer.
   void Drop(int count);
   // Similar to VirtualFrame::Drop but we don't modify the actual stack.
@@ -348,7 +344,7 @@
   // Duplicate the top element of the frame.
   void Dup() { PushFrameSlotAt(element_count() - 1); }
 
-  // Pop an element from the top of the expression stack.  Returns a
+  // Pop an element from the top of the expression stack. Returns a
   // Result, which may be a constant or a register.
   Result Pop();
 
@@ -356,15 +352,15 @@
   // emit a corresponding pop instruction.
   void EmitPop(Register reg);
   // Same but for multiple registers
-  void EmitMultiPop(RegList regs);  // higher indexed registers popped first
-  void EmitMultiPopReversed(RegList regs);  // lower first
+  void EmitMultiPop(RegList regs);
+  void EmitMultiPopReversed(RegList regs);
 
   // Push an element on top of the expression stack and emit a
   // corresponding push instruction.
   void EmitPush(Register reg);
   // Same but for multiple registers.
-  void EmitMultiPush(RegList regs);  // lower indexed registers are pushed first
-  void EmitMultiPushReversed(RegList regs);  // higher first
+  void EmitMultiPush(RegList regs);
+  void EmitMultiPushReversed(RegList regs);
 
   // Push an element on the virtual frame.
   inline void Push(Register reg, NumberInfo info = NumberInfo::Unknown());
@@ -384,7 +380,7 @@
 
   // Nip removes zero or more elements from immediately below the top
   // of the frame, leaving the previous top-of-frame value on top of
-  // the frame.  Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
+  // the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
   inline void Nip(int num_dropped);
 
   // This pushes 4 arguments slots on the stack and saves asked 'a' registers
@@ -392,6 +388,7 @@
   void EmitArgumentSlots(RegList reglist);
 
   inline void SetTypeForLocalAt(int index, NumberInfo info);
+  inline void SetTypeForParamAt(int index, NumberInfo info);
 
  private:
   static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
@@ -416,23 +413,23 @@
   int local_count() { return cgen()->scope()->num_stack_slots(); }
 
   // The index of the element that is at the processor's frame pointer
-  // (the fp register).  The parameters, receiver, function, and context
+  // (the fp register). The parameters, receiver, function, and context
   // are below the frame pointer.
   int frame_pointer() { return parameter_count() + 3; }
 
-  // The index of the first parameter.  The receiver lies below the first
+  // The index of the first parameter. The receiver lies below the first
   // parameter.
   int param0_index() { return 1; }
 
-  // The index of the context slot in the frame.  It is immediately
+  // The index of the context slot in the frame. It is immediately
   // below the frame pointer.
   int context_index() { return frame_pointer() - 1; }
 
-  // The index of the function slot in the frame.  It is below the frame
+  // The index of the function slot in the frame. It is below the frame
   // pointer and context slot.
   int function_index() { return frame_pointer() - 2; }
 
-  // The index of the first local.  Between the frame pointer and the
+  // The index of the first local. Between the frame pointer and the
   // locals lies the return address.
   int local0_index() { return frame_pointer() + 2; }
 
@@ -447,7 +444,7 @@
     return (frame_pointer() - index) * kPointerSize;
   }
 
-  // Record an occurrence of a register in the virtual frame.  This has the
+  // Record an occurrence of a register in the virtual frame. This has the
   // effect of incrementing the register's external reference count and
   // of updating the index of the register's location in the frame.
   void Use(Register reg, int index) {
@@ -456,7 +453,7 @@
     cgen()->allocator()->Use(reg);
   }
 
-  // Record that a register reference has been dropped from the frame.  This
+  // Record that a register reference has been dropped from the frame. This
   // decrements the register's external reference count and invalidates the
   // index of the register's location in the frame.
   void Unuse(Register reg) {
@@ -470,7 +467,7 @@
   // constant.
   void SpillElementAt(int index);
 
-  // Sync the element at a particular index.  If it is a register or
+  // Sync the element at a particular index. If it is a register or
   // constant that disagrees with the value on the stack, write it to memory.
   // Keep the element type as register or constant, and clear the dirty bit.
   void SyncElementAt(int index);
@@ -497,7 +494,7 @@
   void StoreToFrameSlotAt(int index);
 
   // Spill all elements in registers. Spill the top spilled_args elements
-  // on the frame.  Sync all other frame elements.
+  // on the frame. Sync all other frame elements.
   // Then drop dropped_args elements from the virtual frame, to match
   // the effect of an upcoming call that will drop them from the stack.
   void PrepareForCall(int spilled_args, int dropped_args);
@@ -518,14 +515,14 @@
   // Make the memory-to-register and constant-to-register moves
   // needed to make this frame equal the expected frame.
   // Called after all register-to-memory and register-to-register
-  // moves have been made.  After this function returns, the frames
+  // moves have been made. After this function returns, the frames
   // should be equal.
   void MergeMoveMemoryToRegisters(VirtualFrame* expected);
 
   // Invalidates a frame slot (puts an invalid frame element in it).
   // Copies on the frame are correctly handled, and if this slot was
   // the backing store of copies, the index of the new backing store
-  // is returned.  Otherwise, returns kIllegalIndex.
+  // is returned. Otherwise, returns kIllegalIndex.
   // Register counts are correctly updated.
   int InvalidateFrameSlotAt(int index);
 
diff --git a/src/mirror-delay.js b/src/mirror-debugger.js
similarity index 99%
rename from src/mirror-delay.js
rename to src/mirror-debugger.js
index 7c743ec..dfe297b 100644
--- a/src/mirror-delay.js
+++ b/src/mirror-debugger.js
@@ -25,13 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Touch the RegExp and Date functions to make sure that date-delay.js and
-// regexp-delay.js has been loaded. This is required as the mirrors use
-// functions within these files through the builtins object.
-RegExp;
-Date;
-
-
 // Handle id counters.
 var next_handle_ = 0;
 var next_transient_handle_ = -1;
diff --git a/src/mksnapshot.cc b/src/mksnapshot.cc
index 6457ae7..a30b450 100644
--- a/src/mksnapshot.cc
+++ b/src/mksnapshot.cc
@@ -35,6 +35,7 @@
 #include "natives.h"
 #include "platform.h"
 #include "serialize.h"
+#include "list.h"
 
 // use explicit namespace to avoid clashing with types in namespace v8
 namespace i = v8::internal;
@@ -96,7 +97,9 @@
 
 class CppByteSink : public i::SnapshotByteSink {
  public:
-  explicit CppByteSink(const char* snapshot_file) : bytes_written_(0) {
+  explicit CppByteSink(const char* snapshot_file)
+      : bytes_written_(0),
+        partial_sink_(this) {
     fp_ = i::OS::FOpen(snapshot_file, "wb");
     if (fp_ == NULL) {
       i::PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
@@ -111,11 +114,53 @@
   }
 
   virtual ~CppByteSink() {
-    if (fp_ != NULL) {
-      fprintf(fp_, "};\n\n");
-      fprintf(fp_, "int Snapshot::size_ = %d;\n\n", bytes_written_);
-      fprintf(fp_, "} }  // namespace v8::internal\n");
-      fclose(fp_);
+    fprintf(fp_, "const int Snapshot::size_ = %d;\n\n", bytes_written_);
+    fprintf(fp_, "} }  // namespace v8::internal\n");
+    fclose(fp_);
+  }
+
+  void WriteSpaceUsed(
+      int new_space_used,
+      int pointer_space_used,
+      int data_space_used,
+      int code_space_used,
+      int map_space_used,
+      int cell_space_used,
+      int large_space_used) {
+    fprintf(fp_, "};\n\n");
+    fprintf(fp_, "const int Snapshot::new_space_used_ = %d;\n", new_space_used);
+    fprintf(fp_,
+            "const int Snapshot::pointer_space_used_ = %d;\n",
+            pointer_space_used);
+    fprintf(fp_,
+            "const int Snapshot::data_space_used_ = %d;\n",
+            data_space_used);
+    fprintf(fp_,
+            "const int Snapshot::code_space_used_ = %d;\n",
+            code_space_used);
+    fprintf(fp_, "const int Snapshot::map_space_used_ = %d;\n", map_space_used);
+    fprintf(fp_,
+            "const int Snapshot::cell_space_used_ = %d;\n",
+            cell_space_used);
+    fprintf(fp_,
+            "const int Snapshot::large_space_used_ = %d;\n",
+            large_space_used);
+  }
+
+  void WritePartialSnapshot() {
+    int length = partial_sink_.Position();
+    fprintf(fp_, "};\n\n");
+    fprintf(fp_, "const int Snapshot::context_size_ = %d;\n",  length);
+    fprintf(fp_, "const byte Snapshot::context_data_[] = {\n");
+    for (int j = 0; j < length; j++) {
+      if ((j & 0x1f) == 0x1f) {
+        fprintf(fp_, "\n");
+      }
+      char byte = partial_sink_.at(j);
+      if (j != 0) {
+        fprintf(fp_, ",");
+      }
+      fprintf(fp_, "%d", byte);
     }
   }
 
@@ -125,7 +170,7 @@
     }
     fprintf(fp_, "%d", byte);
     bytes_written_++;
-    if ((bytes_written_ & 0x3f) == 0) {
+    if ((bytes_written_ & 0x1f) == 0) {
       fprintf(fp_, "\n");
     }
   }
@@ -134,9 +179,28 @@
     return bytes_written_;
   }
 
+  i::SnapshotByteSink* partial_sink() { return &partial_sink_; }
+
+  class PartialSnapshotSink : public i::SnapshotByteSink {
+   public:
+    explicit PartialSnapshotSink(CppByteSink* parent)
+        : parent_(parent),
+          data_() { }
+    virtual ~PartialSnapshotSink() { data_.Free(); }
+    virtual void Put(int byte, const char* description) {
+      data_.Add(byte);
+    }
+    virtual int Position() { return data_.length(); }
+    char at(int i) { return data_[i]; }
+   private:
+    CppByteSink* parent_;
+    i::List<char> data_;
+  };
+
  private:
   FILE* fp_;
   int bytes_written_;
+  PartialSnapshotSink partial_sink_;
 };
 
 
@@ -162,12 +226,31 @@
       i::Bootstrapper::NativesSourceLookup(i);
     }
   }
+  // If we don't do this then we end up with a stray root pointing at the
+  // context even after we have disposed of the context.
+  i::Heap::CollectAllGarbage(true);
+  i::Object* raw_context = *(v8::Utils::OpenHandle(*context));
   context.Dispose();
   CppByteSink sink(argv[1]);
   // This results in a somewhat smaller snapshot, probably because it gets rid
   // of some things that are cached between garbage collections.
-  i::Heap::CollectAllGarbage(true);
   i::StartupSerializer ser(&sink);
-  ser.Serialize();
+  ser.SerializeStrongReferences();
+
+  i::PartialSerializer partial_ser(&ser, sink.partial_sink());
+  partial_ser.Serialize(&raw_context);
+
+  ser.SerializeWeakReferences();
+
+  sink.WritePartialSnapshot();
+
+  sink.WriteSpaceUsed(
+      partial_ser.CurrentAllocationAddress(i::NEW_SPACE),
+      partial_ser.CurrentAllocationAddress(i::OLD_POINTER_SPACE),
+      partial_ser.CurrentAllocationAddress(i::OLD_DATA_SPACE),
+      partial_ser.CurrentAllocationAddress(i::CODE_SPACE),
+      partial_ser.CurrentAllocationAddress(i::MAP_SPACE),
+      partial_ser.CurrentAllocationAddress(i::CELL_SPACE),
+      partial_ser.CurrentAllocationAddress(i::LO_SPACE));
   return 0;
 }
diff --git a/src/natives.h b/src/natives.h
index fdfd213..639a2d3 100644
--- a/src/natives.h
+++ b/src/natives.h
@@ -44,13 +44,13 @@
  public:
   // Number of built-in scripts.
   static int GetBuiltinsCount();
-  // Number of delayed/lazy loading scripts.
-  static int GetDelayCount();
+  // Number of debugger implementation scripts.
+  static int GetDebuggerCount();
 
-  // These are used to access built-in scripts.
-  // The delayed script has an index in the interval [0, GetDelayCount()).
-  // The non-delayed script has an index in the interval
-  // [GetDelayCount(), GetNativesCount()).
+  // These are used to access built-in scripts.  The debugger implementation
+  // scripts have an index in the interval [0, GetDebuggerCount()).  The
+  // non-debugger scripts have an index in the interval [GetDebuggerCount(),
+  // GetNativesCount()).
   static int GetIndex(const char* name);
   static Vector<const char> GetScriptSource(int index);
   static Vector<const char> GetScriptName(int index);
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 44a3b1a..a6ad958 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -615,9 +615,6 @@
   if (is_undetectable()) {
     PrintF(" - undetectable\n");
   }
-  if (needs_loading()) {
-    PrintF(" - needs_loading\n");
-  }
   if (has_instance_call_handler()) {
     PrintF(" - instance_call_handler\n");
   }
diff --git a/src/objects-inl.h b/src/objects-inl.h
index e7daa2d..a26da7d 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -2368,8 +2368,8 @@
 ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
 #endif
 
-ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
 ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
+ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
 ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
           kInstanceClassNameOffset)
 ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
@@ -2401,6 +2401,7 @@
               kFormalParameterCountOffset)
 INT_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
               kExpectedNofPropertiesOffset)
+INT_ACCESSORS(SharedFunctionInfo, num_literals, kNumLiteralsOffset)
 INT_ACCESSORS(SharedFunctionInfo, start_position_and_type,
               kStartPositionAndTypeOffset)
 INT_ACCESSORS(SharedFunctionInfo, end_position, kEndPositionOffset)
@@ -2489,11 +2490,6 @@
 }
 
 
-bool JSObject::IsLoaded() {
-  return !map()->needs_loading();
-}
-
-
 Code* JSFunction::code() {
   return shared()->code();
 }
@@ -2573,6 +2569,7 @@
 
 
 int JSFunction::NumberOfLiterals() {
+  ASSERT(!IsBoilerplate());
   return literals()->length();
 }
 
diff --git a/src/objects.cc b/src/objects.cc
index 132aa9e..a1fbc99 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -338,55 +338,6 @@
 }
 
 
-Object* JSObject::GetLazyProperty(Object* receiver,
-                                  LookupResult* result,
-                                  String* name,
-                                  PropertyAttributes* attributes) {
-  HandleScope scope;
-  Handle<Object> this_handle(this);
-  Handle<Object> receiver_handle(receiver);
-  Handle<String> name_handle(name);
-  bool pending_exception;
-  LoadLazy(Handle<JSObject>(JSObject::cast(result->GetLazyValue())),
-           &pending_exception);
-  if (pending_exception) return Failure::Exception();
-  return this_handle->GetPropertyWithReceiver(*receiver_handle,
-                                              *name_handle,
-                                              attributes);
-}
-
-
-Object* JSObject::SetLazyProperty(LookupResult* result,
-                                  String* name,
-                                  Object* value,
-                                  PropertyAttributes attributes) {
-  ASSERT(!IsJSGlobalProxy());
-  HandleScope scope;
-  Handle<JSObject> this_handle(this);
-  Handle<String> name_handle(name);
-  Handle<Object> value_handle(value);
-  bool pending_exception;
-  LoadLazy(Handle<JSObject>(JSObject::cast(result->GetLazyValue())),
-           &pending_exception);
-  if (pending_exception) return Failure::Exception();
-  return this_handle->SetProperty(*name_handle, *value_handle, attributes);
-}
-
-
-Object* JSObject::DeleteLazyProperty(LookupResult* result,
-                                     String* name,
-                                     DeleteMode mode) {
-  HandleScope scope;
-  Handle<JSObject> this_handle(this);
-  Handle<String> name_handle(name);
-  bool pending_exception;
-  LoadLazy(Handle<JSObject>(JSObject::cast(result->GetLazyValue())),
-           &pending_exception);
-  if (pending_exception) return Failure::Exception();
-  return this_handle->DeleteProperty(*name_handle, mode);
-}
-
-
 Object* JSObject::GetNormalizedProperty(LookupResult* result) {
   ASSERT(!HasFastProperties());
   Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
@@ -530,12 +481,6 @@
     return Heap::undefined_value();
   }
   *attributes = result->GetAttributes();
-  if (!result->IsLoaded()) {
-    return JSObject::cast(this)->GetLazyProperty(receiver,
-                                                 result,
-                                                 name,
-                                                 attributes);
-  }
   Object* value;
   JSObject* holder = result->holder();
   switch (result->type()) {
@@ -1786,7 +1731,6 @@
           return;
         }
         value = JSGlobalPropertyCell::cast(value)->value();
-        ASSERT(result->IsLoaded());
       }
       // Make sure to disallow caching for uninitialized constants
       // found in the dictionary-mode objects.
@@ -1912,9 +1856,6 @@
     // Neither properties nor transitions found.
     return AddProperty(name, value, attributes);
   }
-  if (!result->IsLoaded()) {
-    return SetLazyProperty(result, name, value, attributes);
-  }
   if (result->IsReadOnly() && result->IsProperty()) return value;
   // This is a real property that is not read-only, or it is a
   // transition or null descriptor and there are no setters in the prototypes.
@@ -1994,9 +1935,7 @@
     // Neither properties nor transitions found.
     return AddProperty(name, value, attributes);
   }
-  if (!result.IsLoaded()) {
-    return SetLazyProperty(&result, name, value, attributes);
-  }
+
   PropertyDetails details = PropertyDetails(attributes, NORMAL);
 
   // Check of IsReadOnly removed from here in clone.
@@ -2514,11 +2453,6 @@
       }
       return DeletePropertyWithInterceptor(name);
     }
-    if (!result.IsLoaded()) {
-      return JSObject::cast(this)->DeleteLazyProperty(&result,
-                                                      name,
-                                                      mode);
-    }
     // Normalize object if needed.
     Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
     if (obj->IsFailure()) return obj;
diff --git a/src/objects.h b/src/objects.h
index d696570..01977f0 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1224,12 +1224,6 @@
   // Deletes the named property in a normalized object.
   Object* DeleteNormalizedProperty(String* name, DeleteMode mode);
 
-  // Sets a property that currently has lazy loading.
-  Object* SetLazyProperty(LookupResult* result,
-                          String* name,
-                          Object* value,
-                          PropertyAttributes attributes);
-
   // Returns the class name ([[Class]] property in the specification).
   String* class_name();
 
@@ -1264,13 +1258,6 @@
   Object* GetLocalPropertyPostInterceptor(JSObject* receiver,
                                           String* name,
                                           PropertyAttributes* attributes);
-  Object* GetLazyProperty(Object* receiver,
-                          LookupResult* result,
-                          String* name,
-                          PropertyAttributes* attributes);
-
-  // Tells whether this object needs to be loaded.
-  inline bool IsLoaded();
 
   // Returns true if this is an instance of an api function and has
   // been modified since it was created.  May give false positives.
@@ -1308,9 +1295,6 @@
 
   Object* DeleteProperty(String* name, DeleteMode mode);
   Object* DeleteElement(uint32_t index, DeleteMode mode);
-  Object* DeleteLazyProperty(LookupResult* result,
-                             String* name,
-                             DeleteMode mode);
 
   // Tests for the fast common case for property enumeration.
   bool IsSimpleEnum();
@@ -2892,20 +2876,6 @@
     return ((1 << kIsUndetectable) & bit_field()) != 0;
   }
 
-  inline void set_needs_loading(bool value) {
-    if (value) {
-      set_bit_field2(bit_field2() | (1 << kNeedsLoading));
-    } else {
-      set_bit_field2(bit_field2() & ~(1 << kNeedsLoading));
-    }
-  }
-
-  // Does this object or function require a lazily loaded script to be
-  // run before being used?
-  inline bool needs_loading() {
-    return ((1 << kNeedsLoading) & bit_field2()) != 0;
-  }
-
   // Tells whether the instance has a call-as-function handler.
   inline void set_has_instance_call_handler() {
     set_bit_field(bit_field() | (1 << kHasInstanceCallHandler));
@@ -3039,8 +3009,7 @@
   static const int kIsAccessCheckNeeded = 7;
 
   // Bit positions for bit field 2
-  static const int kNeedsLoading = 0;
-  static const int kIsExtensible = 1;
+  static const int kIsExtensible = 0;
 
   // Layout of the default cache. It holds alternating name and code objects.
   static const int kCodeCacheEntrySize = 2;
@@ -3204,6 +3173,10 @@
   // [script info]: Script from which the function originates.
   DECL_ACCESSORS(script, Object)
 
+  // [num_literals]: Number of literals used by this function.
+  inline int num_literals();
+  inline void set_num_literals(int value);
+
   // [start_position_and_type]: Field used to store both the source code
   // position, whether or not the function is a function expression,
   // and whether or not the function is a toplevel function. The two
@@ -3321,8 +3294,9 @@
   static const int kFormalParameterCountOffset = kLengthOffset + kIntSize;
   static const int kExpectedNofPropertiesOffset =
       kFormalParameterCountOffset + kIntSize;
+  static const int kNumLiteralsOffset = kExpectedNofPropertiesOffset + kIntSize;
   static const int kStartPositionAndTypeOffset =
-      kExpectedNofPropertiesOffset + kIntSize;
+      kNumLiteralsOffset + kIntSize;
   static const int kEndPositionOffset = kStartPositionAndTypeOffset + kIntSize;
   static const int kFunctionTokenPositionOffset = kEndPositionOffset + kIntSize;
   static const int kCompilerHintsOffset =
@@ -3331,6 +3305,7 @@
       kCompilerHintsOffset + kIntSize;
   // Total size.
   static const int kSize = kThisPropertyAssignmentsCountOffset + kIntSize;
+  static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
 
  private:
   // Bit positions in start_position_and_type.
diff --git a/src/parser.cc b/src/parser.cc
index cff56a3..b923a7f 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -148,6 +148,7 @@
   ParserLog* log_;
   bool is_pre_parsing_;
   ScriptDataImpl* pre_data_;
+  bool seen_loop_stmt_;  // Used for inner loop detection.
 
   bool inside_with() const  { return with_nesting_level_ > 0; }
   ParserFactory* factory() const  { return factory_; }
@@ -1205,7 +1206,8 @@
       factory_(factory),
       log_(log),
       is_pre_parsing_(is_pre_parsing == PREPARSE),
-      pre_data_(pre_data) {
+      pre_data_(pre_data),
+      seen_loop_stmt_(false) {
 }
 
 
@@ -1962,20 +1964,19 @@
   const int literals = fun->NumberOfLiterals();
   Handle<Code> code = Handle<Code>(fun->shared()->code());
   Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
-  Handle<JSFunction> boilerplate =
-      Factory::NewFunctionBoilerplate(name, literals, code);
-  boilerplate->shared()->set_construct_stub(*construct_stub);
+  Handle<SharedFunctionInfo> shared =
+      Factory::NewSharedFunctionInfo(name, literals, code);
+  shared->set_construct_stub(*construct_stub);
 
   // Copy the function data to the boilerplate.
-  boilerplate->shared()->set_function_data(fun->shared()->function_data());
+  shared->set_function_data(fun->shared()->function_data());
   int parameters = fun->shared()->formal_parameter_count();
-  boilerplate->shared()->set_formal_parameter_count(parameters);
+  shared->set_formal_parameter_count(parameters);
 
   // TODO(1240846): It's weird that native function declarations are
   // introduced dynamically when we meet their declarations, whereas
   // other functions are setup when entering the surrounding scope.
-  FunctionBoilerplateLiteral* lit =
-      NEW(FunctionBoilerplateLiteral(boilerplate));
+  SharedFunctionInfoLiteral* lit = NEW(SharedFunctionInfoLiteral(shared));
   VariableProxy* var = Declare(name, Variable::VAR, NULL, true, CHECK_OK);
   return NEW(ExpressionStatement(
       new Assignment(Token::INIT_VAR, var, lit, RelocInfo::kNoPosition)));
@@ -2644,6 +2645,7 @@
   }
 
   Expression* cond = ParseExpression(true, CHECK_OK);
+  if (cond != NULL) cond->set_is_loop_condition(true);
   Expect(Token::RPAREN, CHECK_OK);
 
   // Allow do-statements to be terminated with and without
@@ -2653,6 +2655,9 @@
   if (peek() == Token::SEMICOLON) Consume(Token::SEMICOLON);
 
   if (loop != NULL) loop->Initialize(cond, body);
+
+  seen_loop_stmt_ = true;
+
   return loop;
 }
 
@@ -2667,10 +2672,14 @@
   Expect(Token::WHILE, CHECK_OK);
   Expect(Token::LPAREN, CHECK_OK);
   Expression* cond = ParseExpression(true, CHECK_OK);
+  if (cond != NULL) cond->set_is_loop_condition(true);
   Expect(Token::RPAREN, CHECK_OK);
   Statement* body = ParseStatement(NULL, CHECK_OK);
 
   if (loop != NULL) loop->Initialize(cond, body);
+
+  seen_loop_stmt_ = true;
+
   return loop;
 }
 
@@ -2704,6 +2713,9 @@
           Block* result = NEW(Block(NULL, 2, false));
           result->AddStatement(variable_statement);
           result->AddStatement(loop);
+
+          seen_loop_stmt_ = true;
+
           // Parsed for-in loop w/ variable/const declaration.
           return result;
         }
@@ -2733,6 +2745,8 @@
         Statement* body = ParseStatement(NULL, CHECK_OK);
         if (loop) loop->Initialize(expression, enumerable, body);
 
+        seen_loop_stmt_ = true;
+
         // Parsed for-in loop.
         return loop;
 
@@ -2752,9 +2766,7 @@
   Expression* cond = NULL;
   if (peek() != Token::SEMICOLON) {
     cond = ParseExpression(true, CHECK_OK);
-    if (cond && cond->AsCompareOperation()) {
-      cond->AsCompareOperation()->set_is_for_loop_condition();
-    }
+    if (cond != NULL) cond->set_is_loop_condition(true);
   }
   Expect(Token::SEMICOLON, CHECK_OK);
 
@@ -2765,9 +2777,17 @@
   }
   Expect(Token::RPAREN, CHECK_OK);
 
+  seen_loop_stmt_ = false;
+
   Statement* body = ParseStatement(NULL, CHECK_OK);
 
+  // Mark this loop if it is an inner loop.
+  if (loop && !seen_loop_stmt_) loop->set_peel_this_loop(true);
+
   if (loop) loop->Initialize(init, cond, next, body);
+
+  seen_loop_stmt_ = true;
+
   return loop;
 }
 
@@ -3712,6 +3732,9 @@
   // Function ::
   //   '(' FormalParameterList? ')' '{' FunctionBody '}'
 
+  // Reset flag used for inner loop detection.
+  seen_loop_stmt_ = false;
+
   bool is_named = !var_name.is_null();
 
   // The name associated with this function. If it's a function expression,
@@ -3822,6 +3845,12 @@
     if (!is_pre_parsing_) {
       function_literal->set_function_token_position(function_token_position);
     }
+
+    // Set flag for inner loop detection. We treat loops that contain a function
+    // literal not as inner loops because we avoid duplicating function literals
+    // when peeling or unrolling such a loop.
+    seen_loop_stmt_ = true;
+
     return function_literal;
   }
 }
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 3617e8a..e5cdd2e 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -192,7 +192,8 @@
 
 
 void OS::DebugBreak() {
-#if defined(__arm__) || defined(__thumb__)
+#if (defined(__arm__) || defined(__thumb__)) && \
+    defined(CAN_USE_ARMV5_INSTRUCTIONS)
   asm("bkpt 0");
 #else
   asm("int $3");
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index f1812ff..cd7bcb1 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -266,7 +266,8 @@
 void OS::DebugBreak() {
 // TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x,
 //  which is the architecture of generated code).
-#if defined(__arm__) || defined(__thumb__)
+#if (defined(__arm__) || defined(__thumb__)) && \
+    defined(CAN_USE_ARMV5_INSTRUCTIONS)
   asm("bkpt 0");
 #elif defined(__mips__)
   asm("break");
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index 62e6004..f96e769 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -190,7 +190,8 @@
 
 
 void OS::DebugBreak() {
-#if defined(__arm__) || defined(__thumb__)
+#if (defined(__arm__) || defined(__thumb__)) && \
+    defined(CAN_USE_ARMV5_INSTRUCTIONS)
   asm("bkpt 0");
 #else
   asm("int $3");
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 04ffea9..48f306d 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -511,7 +511,7 @@
 // takes into account whether daylight saving is in effect at the time.
 // Only times in the 32-bit Unix range may be passed to this function.
 // Also, adding the time-zone offset to the input must not overflow.
-// The function EquivalentTime() in date-delay.js guarantees this.
+// The function EquivalentTime() in date.js guarantees this.
 int64_t Time::LocalOffset() {
   // Initialize timezone information, if needed.
   TzSet();
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index 6e2a60e..75f6fc3 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -227,10 +227,10 @@
 }
 
 
-void PrettyPrinter::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* node) {
+void PrettyPrinter::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* node) {
   Print("(");
-  PrintLiteral(node->boilerplate(), true);
+  PrintLiteral(node->shared_function_info(), true);
   Print(")");
 }
 
@@ -668,7 +668,8 @@
                                               Variable* var,
                                               Handle<Object> value,
                                               StaticType* type,
-                                              int num) {
+                                              int num,
+                                              bool is_primitive) {
   if (var == NULL) {
     PrintLiteralIndented(info, value, true);
   } else {
@@ -682,6 +683,8 @@
     if (num != AstNode::kNoNumber) {
       pos += OS::SNPrintF(buf + pos, ", num = %d", num);
     }
+    pos += OS::SNPrintF(buf + pos,
+                        is_primitive ? ", primitive" : ", non-primitive");
     OS::SNPrintF(buf + pos, ")");
     PrintLiteralIndented(buf.start(), value, true);
   }
@@ -740,7 +743,8 @@
       PrintLiteralWithModeIndented("VAR", scope->parameter(i),
                                    scope->parameter(i)->name(),
                                    scope->parameter(i)->type(),
-                                   AstNode::kNoNumber);
+                                   AstNode::kNoNumber,
+                                   false);
     }
   }
 }
@@ -786,7 +790,8 @@
                                  node->proxy()->AsVariable(),
                                  node->proxy()->name(),
                                  node->proxy()->AsVariable()->type(),
-                                 AstNode::kNoNumber);
+                                 AstNode::kNoNumber,
+                                 node->proxy()->IsPrimitive());
   } else {
     // function declarations
     PrintIndented("FUNCTION ");
@@ -918,10 +923,10 @@
 }
 
 
-void AstPrinter::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* node) {
+void AstPrinter::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* node) {
   IndentedScope indent("FUNC LITERAL");
-  PrintLiteralIndented("BOILERPLATE", node->boilerplate(), true);
+  PrintLiteralIndented("SHARED INFO", node->shared_function_info(), true);
 }
 
 
@@ -1022,7 +1027,7 @@
 
 void AstPrinter::VisitVariableProxy(VariableProxy* node) {
   PrintLiteralWithModeIndented("VAR PROXY", node->AsVariable(), node->name(),
-                               node->type(), node->num());
+                               node->type(), node->num(), node->IsPrimitive());
   Variable* var = node->var();
   if (var != NULL && var->rewrite() != NULL) {
     IndentedScope indent;
@@ -1326,9 +1331,9 @@
 }
 
 
-void JsonAstBuilder::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* expr) {
-  TagScope tag(this, "FunctionBoilerplateLiteral");
+void JsonAstBuilder::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
+  TagScope tag(this, "SharedFunctionInfoLiteral");
 }
 
 
diff --git a/src/prettyprinter.h b/src/prettyprinter.h
index 8e958c7..93ba0d9 100644
--- a/src/prettyprinter.h
+++ b/src/prettyprinter.h
@@ -103,7 +103,8 @@
                                     Variable* var,
                                     Handle<Object> value,
                                     StaticType* type,
-                                    int num);
+                                    int num,
+                                    bool is_primitive);
   void PrintLabelsIndented(const char* info, ZoneStringList* labels);
 
   void inc_indent() { indent_++; }
diff --git a/src/property.h b/src/property.h
index dc51348..15a5652 100644
--- a/src/property.h
+++ b/src/property.h
@@ -239,15 +239,6 @@
   bool IsCacheable() { return cacheable_; }
   void DisallowCaching() { cacheable_ = false; }
 
-  // Tells whether the value needs to be loaded.
-  bool IsLoaded() {
-    if (lookup_type_ == DESCRIPTOR_TYPE || lookup_type_ == DICTIONARY_TYPE) {
-      Object* target = GetLazyValue();
-      return !target->IsJSObject() || JSObject::cast(target)->IsLoaded();
-    }
-    return true;
-  }
-
   Object* GetLazyValue() {
     switch (type()) {
       case FIELD:
diff --git a/src/regexp-delay.js b/src/regexp.js
similarity index 100%
rename from src/regexp-delay.js
rename to src/regexp.js
diff --git a/src/rewriter.cc b/src/rewriter.cc
index e87fcce..c97408e 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -213,8 +213,8 @@
 }
 
 
-void AstOptimizer::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* node) {
+void AstOptimizer::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* node) {
   USE(node);
 }
 
@@ -804,8 +804,8 @@
 }
 
 
-void Processor::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* node) {
+void Processor::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* node) {
   USE(node);
   UNREACHABLE();
 }
diff --git a/src/runtime.cc b/src/runtime.cc
index 0fe2457..b349815 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -788,9 +788,10 @@
       }
     } else {
       // Copy the function and update its context. Use it as value.
-      Handle<JSFunction> boilerplate = Handle<JSFunction>::cast(value);
+      Handle<SharedFunctionInfo> shared =
+          Handle<SharedFunctionInfo>::cast(value);
       Handle<JSFunction> function =
-          Factory::NewFunctionFromBoilerplate(boilerplate, context, TENURED);
+          Factory::NewFunctionFromSharedFunctionInfo(shared, context, TENURED);
       value = function;
     }
 
@@ -1239,9 +1240,9 @@
 
 
 static void SetCustomCallGenerator(Handle<JSFunction> function,
-                                   CustomCallGenerator generator) {
+                                   ExternalReference* generator) {
   if (function->shared()->function_data()->IsUndefined()) {
-    function->shared()->set_function_data(*FromCData(generator));
+    function->shared()->set_function_data(*FromCData(generator->address()));
   }
 }
 
@@ -1249,7 +1250,7 @@
 static Handle<JSFunction> InstallBuiltin(Handle<JSObject> holder,
                                          const char* name,
                                          Builtins::Name builtin_name,
-                                         CustomCallGenerator generator = NULL) {
+                                         ExternalReference* generator = NULL) {
   Handle<String> key = Factory::LookupAsciiSymbol(name);
   Handle<Code> code(Builtins::builtin(builtin_name));
   Handle<JSFunction> optimized = Factory::NewFunction(key,
@@ -1266,22 +1267,22 @@
 }
 
 
-static Object* CompileArrayPushCall(CallStubCompiler* compiler,
-                                    Object* object,
-                                    JSObject* holder,
-                                    JSFunction* function,
-                                    String* name,
-                                    StubCompiler::CheckType check) {
+Object* CompileArrayPushCall(CallStubCompiler* compiler,
+                             Object* object,
+                             JSObject* holder,
+                             JSFunction* function,
+                             String* name,
+                             StubCompiler::CheckType check) {
   return compiler->CompileArrayPushCall(object, holder, function, name, check);
 }
 
 
-static Object* CompileArrayPopCall(CallStubCompiler* compiler,
-                                   Object* object,
-                                   JSObject* holder,
-                                   JSFunction* function,
-                                   String* name,
-                                   StubCompiler::CheckType check) {
+Object* CompileArrayPopCall(CallStubCompiler* compiler,
+                            Object* object,
+                            JSObject* holder,
+                            JSFunction* function,
+                            String* name,
+                            StubCompiler::CheckType check) {
   return compiler->CompileArrayPopCall(object, holder, function, name, check);
 }
 
@@ -1291,8 +1292,11 @@
   ASSERT(args.length() == 1);
   CONVERT_ARG_CHECKED(JSObject, holder, 0);
 
-  InstallBuiltin(holder, "pop", Builtins::ArrayPop, CompileArrayPopCall);
-  InstallBuiltin(holder, "push", Builtins::ArrayPush, CompileArrayPushCall);
+  ExternalReference pop = ExternalReference::compile_array_pop_call();
+  ExternalReference push = ExternalReference::compile_array_push_call();
+
+  InstallBuiltin(holder, "pop", Builtins::ArrayPop, &pop);
+  InstallBuiltin(holder, "push", Builtins::ArrayPush, &push);
   InstallBuiltin(holder, "shift", Builtins::ArrayShift);
   InstallBuiltin(holder, "unshift", Builtins::ArrayUnshift);
   InstallBuiltin(holder, "slice", Builtins::ArraySlice);
@@ -3099,7 +3103,7 @@
       // Lookup cache miss.  Perform lookup and update the cache if appropriate.
       LookupResult result;
       receiver->LocalLookup(key, &result);
-      if (result.IsProperty() && result.IsLoaded() && result.type() == FIELD) {
+      if (result.IsProperty() && result.type() == FIELD) {
         int offset = result.GetFieldIndex();
         KeyedLookupCache::Update(receiver_map, key, offset);
         return receiver->FastPropertyAt(offset);
@@ -5812,13 +5816,13 @@
   HandleScope scope;
   ASSERT(args.length() == 2);
   CONVERT_ARG_CHECKED(Context, context, 0);
-  CONVERT_ARG_CHECKED(JSFunction, boilerplate, 1);
+  CONVERT_ARG_CHECKED(SharedFunctionInfo, shared, 1);
 
   PretenureFlag pretenure = (context->global_context() == *context)
       ? TENURED       // Allocate global closures in old space.
       : NOT_TENURED;  // Allocate local closures in new space.
   Handle<JSFunction> result =
-      Factory::NewFunctionFromBoilerplate(boilerplate, context, pretenure);
+      Factory::NewFunctionFromSharedFunctionInfo(shared, context, pretenure);
   return *result;
 }
 
@@ -6503,13 +6507,13 @@
   Handle<Context> context(Top::context()->global_context());
   Compiler::ValidationState validate = (is_json->IsTrue())
     ? Compiler::VALIDATE_JSON : Compiler::DONT_VALIDATE_JSON;
-  Handle<JSFunction> boilerplate = Compiler::CompileEval(source,
-                                                         context,
-                                                         true,
-                                                         validate);
-  if (boilerplate.is_null()) return Failure::Exception();
+  Handle<SharedFunctionInfo> shared = Compiler::CompileEval(source,
+                                                            context,
+                                                            true,
+                                                            validate);
+  if (shared.is_null()) return Failure::Exception();
   Handle<JSFunction> fun =
-      Factory::NewFunctionFromBoilerplate(boilerplate, context, NOT_TENURED);
+      Factory::NewFunctionFromSharedFunctionInfo(shared, context, NOT_TENURED);
   return *fun;
 }
 
@@ -6582,14 +6586,14 @@
   // Deal with a normal eval call with a string argument. Compile it
   // and return the compiled function bound in the local context.
   Handle<String> source = args.at<String>(1);
-  Handle<JSFunction> boilerplate = Compiler::CompileEval(
+  Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
       source,
       Handle<Context>(Top::context()),
       Top::context()->IsGlobalContext(),
       Compiler::DONT_VALIDATE_JSON);
-  if (boilerplate.is_null()) return MakePair(Failure::Exception(), NULL);
-  callee = Factory::NewFunctionFromBoilerplate(
-      boilerplate,
+  if (shared.is_null()) return MakePair(Failure::Exception(), NULL);
+  callee = Factory::NewFunctionFromSharedFunctionInfo(
+      shared,
       Handle<Context>(Top::context()),
       NOT_TENURED);
   return MakePair(*callee, args[2]);
@@ -8571,14 +8575,14 @@
   Handle<String> function_source =
       Factory::NewStringFromAscii(Vector<const char>(source_str,
                                                      source_str_length));
-  Handle<JSFunction> boilerplate =
+  Handle<SharedFunctionInfo> shared =
       Compiler::CompileEval(function_source,
                             context,
                             context->IsGlobalContext(),
                             Compiler::DONT_VALIDATE_JSON);
-  if (boilerplate.is_null()) return Failure::Exception();
+  if (shared.is_null()) return Failure::Exception();
   Handle<JSFunction> compiled_function =
-      Factory::NewFunctionFromBoilerplate(boilerplate, context);
+      Factory::NewFunctionFromSharedFunctionInfo(shared, context);
 
   // Invoke the result of the compilation to get the evaluation function.
   bool has_pending_exception;
@@ -8639,15 +8643,15 @@
   Handle<Context> context = Top::global_context();
 
   // Compile the source to be evaluated.
-  Handle<JSFunction> boilerplate =
-      Handle<JSFunction>(Compiler::CompileEval(source,
-                                               context,
-                                               true,
-                                               Compiler::DONT_VALIDATE_JSON));
-  if (boilerplate.is_null()) return Failure::Exception();
+  Handle<SharedFunctionInfo> shared =
+      Compiler::CompileEval(source,
+                            context,
+                            true,
+                            Compiler::DONT_VALIDATE_JSON);
+  if (shared.is_null()) return Failure::Exception();
   Handle<JSFunction> compiled_function =
-      Handle<JSFunction>(Factory::NewFunctionFromBoilerplate(boilerplate,
-                                                             context));
+      Handle<JSFunction>(Factory::NewFunctionFromSharedFunctionInfo(shared,
+                                                                    context));
 
   // Invoke the result of the compilation to get the evaluation function.
   bool has_pending_exception;
diff --git a/src/serialize.cc b/src/serialize.cc
index 0e38151..980a1bc 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -409,36 +409,44 @@
       UNCLASSIFIED,
       19,
       "compare_doubles");
+  Add(ExternalReference::compile_array_pop_call().address(),
+      UNCLASSIFIED,
+      20,
+      "compile_array_pop");
+  Add(ExternalReference::compile_array_push_call().address(),
+      UNCLASSIFIED,
+      21,
+      "compile_array_push");
 #ifdef V8_NATIVE_REGEXP
   Add(ExternalReference::re_case_insensitive_compare_uc16().address(),
       UNCLASSIFIED,
-      20,
+      22,
       "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
   Add(ExternalReference::re_check_stack_guard_state().address(),
       UNCLASSIFIED,
-      21,
+      23,
       "RegExpMacroAssembler*::CheckStackGuardState()");
   Add(ExternalReference::re_grow_stack().address(),
       UNCLASSIFIED,
-      22,
+      24,
       "NativeRegExpMacroAssembler::GrowStack()");
   Add(ExternalReference::re_word_character_map().address(),
       UNCLASSIFIED,
-      23,
+      25,
       "NativeRegExpMacroAssembler::word_character_map");
 #endif
   // Keyed lookup cache.
   Add(ExternalReference::keyed_lookup_cache_keys().address(),
       UNCLASSIFIED,
-      24,
+      26,
       "KeyedLookupCache::keys()");
   Add(ExternalReference::keyed_lookup_cache_field_offsets().address(),
       UNCLASSIFIED,
-      25,
+      27,
       "KeyedLookupCache::field_offsets()");
   Add(ExternalReference::transcendental_cache_array_address().address(),
       UNCLASSIFIED,
-      26,
+      28,
       "TranscendentalCache::caches()");
 }
 
@@ -547,7 +555,7 @@
     HeapObject* new_object = HeapObject::cast(new_allocation);
     // Record all large objects in the same space.
     address = new_object->address();
-    high_water_[LO_SPACE] = address + size;
+    pages_[LO_SPACE].Add(address);
   }
   last_object_address_ = address;
   return address;
@@ -900,7 +908,7 @@
 Serializer::Serializer(SnapshotByteSink* sink)
     : sink_(sink),
       current_root_index_(0),
-      external_reference_encoder_(NULL),
+      external_reference_encoder_(new ExternalReferenceEncoder),
       large_object_total_(0) {
   for (int i = 0; i <= LAST_SPACE; i++) {
     fullness_[i] = 0;
@@ -908,28 +916,28 @@
 }
 
 
+Serializer::~Serializer() {
+  delete external_reference_encoder_;
+}
+
+
 void StartupSerializer::SerializeStrongReferences() {
   // No active threads.
   CHECK_EQ(NULL, ThreadState::FirstInUse());
   // No active or weak handles.
   CHECK(HandleScopeImplementer::instance()->blocks()->is_empty());
   CHECK_EQ(0, GlobalHandles::NumberOfWeakHandles());
-  CHECK_EQ(NULL, external_reference_encoder_);
   // We don't support serializing installed extensions.
   for (RegisteredExtension* ext = RegisteredExtension::first_extension();
        ext != NULL;
        ext = ext->next()) {
     CHECK_NE(v8::INSTALLED, ext->state());
   }
-  external_reference_encoder_ = new ExternalReferenceEncoder();
   Heap::IterateStrongRoots(this, VISIT_ONLY_STRONG);
-  delete external_reference_encoder_;
-  external_reference_encoder_ = NULL;
 }
 
 
 void PartialSerializer::Serialize(Object** object) {
-  external_reference_encoder_ = new ExternalReferenceEncoder();
   this->VisitPointer(object);
 
   // After we have done the partial serialization the partial snapshot cache
@@ -943,9 +951,6 @@
     startup_serializer_->VisitPointer(&partial_snapshot_cache_[index]);
   }
   partial_snapshot_cache_length_ = kPartialSnapshotCacheCapacity;
-
-  delete external_reference_encoder_;
-  external_reference_encoder_ = NULL;
 }
 
 
@@ -997,6 +1002,7 @@
     Object* entry = partial_snapshot_cache_[i];
     if (entry == heap_object) return i;
   }
+
   // We didn't find the object in the cache.  So we add it to the cache and
   // then visit the pointer so that it becomes part of the startup snapshot
   // and we can refer to it from the partial snapshot.
diff --git a/src/serialize.h b/src/serialize.h
index ce3b006..ab2ae9f 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -120,28 +120,9 @@
     return data_[position_++];
   }
 
-  void CopyRaw(byte* to, int number_of_bytes) {
-    memcpy(to, data_ + position_, number_of_bytes);
-    position_ += number_of_bytes;
-  }
+  inline void CopyRaw(byte* to, int number_of_bytes);
 
-  int GetInt() {
-    // A little unwind to catch the really small ints.
-    int snapshot_byte = Get();
-    if ((snapshot_byte & 0x80) == 0) {
-      return snapshot_byte;
-    }
-    int accumulator = (snapshot_byte & 0x7f) << 7;
-    while (true) {
-      snapshot_byte = Get();
-      if ((snapshot_byte & 0x80) == 0) {
-        return accumulator | snapshot_byte;
-      }
-      accumulator = (accumulator | (snapshot_byte & 0x7f)) << 7;
-    }
-    UNREACHABLE();
-    return accumulator;
-  }
+  inline int GetInt();
 
   bool AtEOF() {
     return position_ == length_;
@@ -235,11 +216,35 @@
   }
 
   static int partial_snapshot_cache_length_;
-  static const int kPartialSnapshotCacheCapacity = 1024;
+  static const int kPartialSnapshotCacheCapacity = 1300;
   static Object* partial_snapshot_cache_[];
 };
 
 
+int SnapshotByteSource::GetInt() {
+  // A little unwind to catch the really small ints.
+  int snapshot_byte = Get();
+  if ((snapshot_byte & 0x80) == 0) {
+    return snapshot_byte;
+  }
+  int accumulator = (snapshot_byte & 0x7f) << 7;
+  while (true) {
+    snapshot_byte = Get();
+    if ((snapshot_byte & 0x80) == 0) {
+      return accumulator | snapshot_byte;
+    }
+    accumulator = (accumulator | (snapshot_byte & 0x7f)) << 7;
+  }
+  UNREACHABLE();
+  return accumulator;
+}
+
+
+void SnapshotByteSource::CopyRaw(byte* to, int number_of_bytes) {
+  memcpy(to, data_ + position_, number_of_bytes);
+  position_ += number_of_bytes;
+}
+
 
 // A Deserializer reads a snapshot and reconstructs the Object graph it defines.
 class Deserializer: public SerializerDeserializer {
@@ -364,6 +369,7 @@
 class Serializer : public SerializerDeserializer {
  public:
   explicit Serializer(SnapshotByteSink* sink);
+  ~Serializer();
   void VisitPointers(Object** start, Object** end);
   // You can call this after serialization to find out how much space was used
   // in each space.
@@ -492,7 +498,12 @@
   virtual int RootIndex(HeapObject* o);
   virtual int PartialSnapshotCacheIndex(HeapObject* o);
   virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
-    return o->IsString() || o->IsSharedFunctionInfo();
+    // Scripts should be referred only through shared function infos.  We can't
+    // allow them to be part of the partial snapshot because they contain a
+    // unique ID, and deserializing several partial snapshots containing script
+    // would cause dupes.
+    ASSERT(!o->IsScript());
+    return o->IsString() || o->IsSharedFunctionInfo() || o->IsHeapNumber();
   }
 
  private:
@@ -530,6 +541,7 @@
   }
 };
 
+
 } }  // namespace v8::internal
 
 #endif  // V8_SERIALIZE_H_
diff --git a/src/snapshot-common.cc b/src/snapshot-common.cc
index 1e81b8e..f1106e1 100644
--- a/src/snapshot-common.cc
+++ b/src/snapshot-common.cc
@@ -59,4 +59,24 @@
   return false;
 }
 
+
+Handle<Context> Snapshot::NewContextFromSnapshot() {
+  if (context_size_ == 0) {
+    return Handle<Context>();
+  }
+  Heap::ReserveSpace(new_space_used_,
+                     pointer_space_used_,
+                     data_space_used_,
+                     code_space_used_,
+                     map_space_used_,
+                     cell_space_used_,
+                     large_space_used_);
+  SnapshotByteSource source(context_data_, context_size_);
+  Deserializer deserializer(&source);
+  Object* root;
+  deserializer.DeserializePartial(&root);
+  CHECK(root->IsContext());
+  return Handle<Context>(Context::cast(root));
+}
+
 } }  // namespace v8::internal
diff --git a/src/snapshot-empty.cc b/src/snapshot-empty.cc
index 60ab1e5..cb26eb8 100644
--- a/src/snapshot-empty.cc
+++ b/src/snapshot-empty.cc
@@ -35,6 +35,16 @@
 namespace internal {
 
 const byte Snapshot::data_[] = { 0 };
-int Snapshot::size_ = 0;
+const int Snapshot::size_ = 0;
+const byte Snapshot::context_data_[] = { 0 };
+const int Snapshot::context_size_ = 0;
+
+const int Snapshot::new_space_used_ = 0;
+const int Snapshot::pointer_space_used_ = 0;
+const int Snapshot::data_space_used_ = 0;
+const int Snapshot::code_space_used_ = 0;
+const int Snapshot::map_space_used_ = 0;
+const int Snapshot::cell_space_used_ = 0;
+const int Snapshot::large_space_used_ = 0;
 
 } }  // namespace v8::internal
diff --git a/src/snapshot.h b/src/snapshot.h
index 88ba8db..9f77c20 100644
--- a/src/snapshot.h
+++ b/src/snapshot.h
@@ -38,6 +38,9 @@
   // could be found.
   static bool Initialize(const char* snapshot_file = NULL);
 
+  // Create a new context using the internal partial snapshot.
+  static Handle<Context> NewContextFromSnapshot();
+
   // Returns whether or not the snapshot is enabled.
   static bool IsEnabled() { return size_ != 0; }
 
@@ -47,7 +50,16 @@
 
  private:
   static const byte data_[];
-  static int size_;
+  static const byte context_data_[];
+  static const int new_space_used_;
+  static const int pointer_space_used_;
+  static const int data_space_used_;
+  static const int code_space_used_;
+  static const int map_space_used_;
+  static const int cell_space_used_;
+  static const int large_space_used_;
+  static const int size_;
+  static const int context_size_;
 
   static bool Deserialize(const byte* content, int len);
 
diff --git a/src/string.js b/src/string.js
index 6bb19e9..e663ec3 100644
--- a/src/string.js
+++ b/src/string.js
@@ -164,7 +164,7 @@
 
 // ECMA-262 section 15.5.4.10
 function StringMatch(regexp) {
-  if (!IS_REGEXP(regexp)) regexp = new ORIGINAL_REGEXP(regexp);
+  if (!IS_REGEXP(regexp)) regexp = new $RegExp(regexp);
   var subject = TO_STRING_INLINE(this);
 
   if (!regexp.global) return regexp.exec(subject);
@@ -183,7 +183,7 @@
   }
 
   %_Log('regexp', 'regexp-match,%0S,%1r', [subject, regexp]);
-  // lastMatchInfo is defined in regexp-delay.js.
+  // lastMatchInfo is defined in regexp.js.
   var result = %StringMatch(subject, regexp, lastMatchInfo);
   cache.type = 'match';
   cache.regExp = regexp;
@@ -523,7 +523,7 @@
 
 // ECMA-262 section 15.5.4.12
 function StringSearch(re) {
-  var regexp = new ORIGINAL_REGEXP(re);
+  var regexp = new $RegExp(re);
   var s = TO_STRING_INLINE(this);
   var last_idx = regexp.lastIndex; // keep old lastIndex
   regexp.lastIndex = 0;            // ignore re.global property
@@ -896,6 +896,7 @@
 
 // ReplaceResultBuilder support.
 function ReplaceResultBuilder(str) {
+  this.__proto__ = void 0;
   this.elements = new $Array();
   this.special_string = str;
 }
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 0e986dd..0ca37e7 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -610,6 +610,22 @@
                                        StubCompiler::CheckType check);
 
 
+Object* CompileArrayPushCall(CallStubCompiler* compiler,
+                             Object* object,
+                             JSObject* holder,
+                             JSFunction* function,
+                             String* name,
+                             StubCompiler::CheckType check);
+
+
+Object* CompileArrayPopCall(CallStubCompiler* compiler,
+                            Object* object,
+                            JSObject* holder,
+                            JSFunction* function,
+                            String* name,
+                            StubCompiler::CheckType check);
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_STUB_CACHE_H_
diff --git a/src/top.cc b/src/top.cc
index 0fcf458..2f75c8f 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -679,7 +679,7 @@
 
 
 void Top::ComputeLocation(MessageLocation* target) {
-  *target = MessageLocation(empty_script(), -1, -1);
+  *target = MessageLocation(Handle<Script>(Heap::empty_script()), -1, -1);
   StackTraceFrameIterator it;
   if (!it.done()) {
     JavaScriptFrame* frame = it.frame();
diff --git a/src/v8-counters.h b/src/v8-counters.h
index b595cd4..a5f3594 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -97,7 +97,11 @@
   /* Amount of source code compiled with the old codegen. */          \
   SC(total_old_codegen_source_size, V8.TotalOldCodegenSourceSize)     \
   /* Amount of source code compiled with the full codegen. */         \
-  SC(total_full_codegen_source_size, V8.TotalFullCodegenSourceSize)
+  SC(total_full_codegen_source_size, V8.TotalFullCodegenSourceSize)   \
+  /* Number of contexts created from scratch. */                      \
+  SC(contexts_created_from_scratch, V8.ContextsCreatedFromScratch)    \
+  /* Number of contexts created by partial snapshot. */               \
+  SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot)
 
 
 #define STATS_COUNTER_LIST_2(SC)                                      \
@@ -187,6 +191,7 @@
   SC(transcendental_cache_hit, V8.TranscendentalCacheHit)             \
   SC(transcendental_cache_miss, V8.TranscendentalCacheMiss)
 
+
 // This file contains all the v8 counters that are in use.
 class Counters : AllStatic {
  public:
diff --git a/src/variables.h b/src/variables.h
index a68aa33..618f6ac 100644
--- a/src/variables.h
+++ b/src/variables.h
@@ -70,8 +70,6 @@
 
  private:
   Kind kind_;
-
-  DISALLOW_COPY_AND_ASSIGN(StaticType);
 };
 
 
diff --git a/src/version.cc b/src/version.cc
index 74bef65..e0615cd 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     2
 #define MINOR_VERSION     1
-#define BUILD_NUMBER      7
+#define BUILD_NUMBER      8
 #define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index a6b0ffc..2f873c5 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -2229,9 +2229,8 @@
 }
 
 
-void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
-  ASSERT(boilerplate->IsBoilerplate());
-
+void CodeGenerator::InstantiateFunction(
+    Handle<SharedFunctionInfo> function_info) {
   // The inevitable call will sync frame elements to memory anyway, so
   // we do it eagerly to allow us to push the arguments directly into
   // place.
@@ -2239,16 +2238,16 @@
 
   // Use the fast case closure allocation code that allocates in new
   // space for nested functions that don't need literals cloning.
-  if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) {
+  if (scope()->is_function_scope() && function_info->num_literals() == 0) {
     FastNewClosureStub stub;
-    frame_->Push(boilerplate);
+    frame_->Push(function_info);
     Result answer = frame_->CallStub(&stub, 1);
     frame_->Push(&answer);
   } else {
     // Call the runtime to instantiate the function boilerplate
     // object.
     frame_->EmitPush(rsi);
-    frame_->EmitPush(boilerplate);
+    frame_->EmitPush(function_info);
     Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
     frame_->Push(&result);
   }
@@ -2258,19 +2257,19 @@
 void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
   Comment cmnt(masm_, "[ FunctionLiteral");
 
-  // Build the function boilerplate and instantiate it.
-  Handle<JSFunction> boilerplate =
-      Compiler::BuildBoilerplate(node, script(), this);
+  // Build the function info and instantiate it.
+  Handle<SharedFunctionInfo> function_info =
+      Compiler::BuildFunctionInfo(node, script(), this);
   // Check for stack-overflow exception.
   if (HasStackOverflow()) return;
-  InstantiateBoilerplate(boilerplate);
+  InstantiateFunction(function_info);
 }
 
 
-void CodeGenerator::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* node) {
-  Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
-  InstantiateBoilerplate(node->boilerplate());
+void CodeGenerator::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* node) {
+  Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
+  InstantiateFunction(node->shared_function_info());
 }
 
 
@@ -5068,9 +5067,9 @@
       Condition left_is_smi = masm_->CheckSmi(left_side.reg());
       is_smi.Branch(left_is_smi);
 
-      bool is_for_loop_compare = (node->AsCompareOperation() != NULL)
-          && node->AsCompareOperation()->is_for_loop_condition();
-      if (!is_for_loop_compare && right_val->IsSmi()) {
+      bool is_loop_condition = (node->AsExpression() != NULL) &&
+          node->AsExpression()->is_loop_condition();
+      if (!is_loop_condition && right_val->IsSmi()) {
         // Right side is a constant smi and left side has been checked
         // not to be a smi.
         JumpTarget not_number;
@@ -6361,12 +6360,12 @@
 
 
 void FastNewClosureStub::Generate(MacroAssembler* masm) {
-  // Clone the boilerplate in new space. Set the context to the
-  // current context in rsi.
+  // Create a new closure from the given function info in new
+  // space. Set the context to the current context in rsi.
   Label gc;
   __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
 
-  // Get the boilerplate function from the stack.
+  // Get the function info from the stack.
   __ movq(rdx, Operand(rsp, 1 * kPointerSize));
 
   // Compute the function map in the current global context and set that
@@ -6376,18 +6375,16 @@
   __ movq(rcx, Operand(rcx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
   __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx);
 
-  // Clone the rest of the boilerplate fields. We don't have to update
-  // the write barrier because the allocated object is in new space.
-  for (int offset = kPointerSize;
-       offset < JSFunction::kSize;
-       offset += kPointerSize) {
-    if (offset == JSFunction::kContextOffset) {
-      __ movq(FieldOperand(rax, offset), rsi);
-    } else {
-      __ movq(rbx, FieldOperand(rdx, offset));
-      __ movq(FieldOperand(rax, offset), rbx);
-    }
-  }
+  // Initialize the rest of the function. We don't have to update the
+  // write barrier because the allocated object is in new space.
+  __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
+  __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
+  __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
+  __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
+  __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), rcx);
+  __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
+  __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
+  __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
 
   // Return and remove the on-stack parameter.
   __ ret(1 * kPointerSize);
@@ -9105,52 +9102,58 @@
 
 
 int CompareStub::MinorKey() {
-  // Encode the three parameters in a unique 16 bit value.
-  ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
-  int nnn_value = (never_nan_nan_ ? 2 : 0);
-  if (cc_ != equal) nnn_value = 0;  // Avoid duplicate stubs.
-  return (static_cast<unsigned>(cc_) << 2) | nnn_value | (strict_ ? 1 : 0);
+  // Encode the three parameters in a unique 16 bit value. To avoid duplicate
+  // stubs the never NaN NaN condition is only taken into account if the
+  // condition is equals.
+  ASSERT(static_cast<unsigned>(cc_) < (1 << 13));
+  return ConditionField::encode(static_cast<unsigned>(cc_))
+         | StrictField::encode(strict_)
+         | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
+         | IncludeNumberCompareField::encode(include_number_compare_);
 }
 
 
+// Unfortunately you have to run without snapshots to see most of these
+// names in the profile since most compare stubs end up in the snapshot.
 const char* CompareStub::GetName() {
+  if (name_ != NULL) return name_;
+  const int kMaxNameLength = 100;
+  name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+  if (name_ == NULL) return "OOM";
+
+  const char* cc_name;
   switch (cc_) {
-    case less: return "CompareStub_LT";
-    case greater: return "CompareStub_GT";
-    case less_equal: return "CompareStub_LE";
-    case greater_equal: return "CompareStub_GE";
-    case not_equal: {
-      if (strict_) {
-        if (never_nan_nan_) {
-          return "CompareStub_NE_STRICT_NO_NAN";
-        } else {
-          return "CompareStub_NE_STRICT";
-        }
-      } else {
-        if (never_nan_nan_) {
-          return "CompareStub_NE_NO_NAN";
-        } else {
-          return "CompareStub_NE";
-        }
-      }
-    }
-    case equal: {
-      if (strict_) {
-        if (never_nan_nan_) {
-          return "CompareStub_EQ_STRICT_NO_NAN";
-        } else {
-          return "CompareStub_EQ_STRICT";
-        }
-      } else {
-        if (never_nan_nan_) {
-          return "CompareStub_EQ_NO_NAN";
-        } else {
-          return "CompareStub_EQ";
-        }
-      }
-    }
-    default: return "CompareStub";
+    case less: cc_name = "LT"; break;
+    case greater: cc_name = "GT"; break;
+    case less_equal: cc_name = "LE"; break;
+    case greater_equal: cc_name = "GE"; break;
+    case equal: cc_name = "EQ"; break;
+    case not_equal: cc_name = "NE"; break;
+    default: cc_name = "UnknownCondition"; break;
   }
+
+  const char* strict_name = "";
+  if (strict_ && (cc_ == equal || cc_ == not_equal)) {
+    strict_name = "_STRICT";
+  }
+
+  const char* never_nan_nan_name = "";
+  if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
+    never_nan_nan_name = "_NO_NAN";
+  }
+
+  const char* include_number_compare_name = "";
+  if (!include_number_compare_) {
+    include_number_compare_name = "_NO_NUMBER";
+  }
+
+  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+               "CompareStub_%s%s%s%s",
+               cc_name,
+               strict_name,
+               never_nan_nan_name,
+               include_number_compare_name);
+  return name_;
 }
 
 
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 42b4993..a43856f 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -527,8 +527,8 @@
   // name/value pairs.
   void DeclareGlobals(Handle<FixedArray> pairs);
 
-  // Instantiate the function boilerplate.
-  void InstantiateBoilerplate(Handle<JSFunction> boilerplate);
+  // Instantiate the function based on the shared function info.
+  void InstantiateFunction(Handle<SharedFunctionInfo> function_info);
 
   // Support for type checks.
   void GenerateIsSmi(ZoneList<Expression*>* args);
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 65f99a3..ab73706 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -781,15 +781,13 @@
   Comment cmnt(masm_, "[ FunctionLiteral");
 
   // Build the function boilerplate and instantiate it.
-  Handle<JSFunction> boilerplate =
-      Compiler::BuildBoilerplate(expr, script(), this);
+  Handle<SharedFunctionInfo> function_info =
+      Compiler::BuildFunctionInfo(expr, script(), this);
   if (HasStackOverflow()) return;
 
-  ASSERT(boilerplate->IsBoilerplate());
-
   // Create a new closure.
   __ push(rsi);
-  __ Push(boilerplate);
+  __ Push(function_info);
   __ CallRuntime(Runtime::kNewClosure, 2);
   Apply(context_, rax);
 }
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 1d28a1f..77043ce 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -253,22 +253,6 @@
 }
 
 
-// Helper function used to check that a value is either not an object
-// or is loaded if it is an object.
-static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss,
-                                           Register value) {
-  Label done;
-  // Check if the value is a Smi.
-  __ JumpIfSmi(value, &done);
-  // Check if the object has been loaded.
-  __ movq(kScratchRegister, FieldOperand(value, JSFunction::kMapOffset));
-  __ testb(FieldOperand(kScratchRegister, Map::kBitField2Offset),
-           Immediate(1 << Map::kNeedsLoading));
-  __ j(not_zero, miss);
-  __ bind(&done);
-}
-
-
 // One byte opcode for test eax,0xXXXXXXXX.
 static const byte kTestEaxByte = 0xA9;
 
@@ -522,7 +506,6 @@
                          rdx,
                          rax,
                          DICTIONARY_CHECK_DONE);
-  GenerateCheckNonObjectOrLoaded(masm, &slow, rcx);
   __ movq(rax, rcx);
   __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
   __ ret(0);
@@ -1231,10 +1214,6 @@
   // Check that the value is a JavaScript function.
   __ CmpObjectType(rdx, JS_FUNCTION_TYPE, rdx);
   __ j(not_equal, miss);
-  // Check that the function has been loaded.
-  __ testb(FieldOperand(rdx, Map::kBitField2Offset),
-           Immediate(1 << Map::kNeedsLoading));
-  __ j(not_zero, miss);
 
   // Patch the receiver with the global proxy if necessary.
   if (is_global_object) {
@@ -1431,7 +1410,6 @@
   // Search the dictionary placing the result in rax.
   __ bind(&probe);
   GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx, rcx, CHECK_DICTIONARY);
-  GenerateCheckNonObjectOrLoaded(masm, &miss, rax);
   __ ret(0);
 
   // Global object access: Check access rights.
diff --git a/test/cctest/SConscript b/test/cctest/SConscript
index 0d59b21..9c19c2b 100644
--- a/test/cctest/SConscript
+++ b/test/cctest/SConscript
@@ -83,7 +83,7 @@
   'arch:x64': ['test-assembler-x64.cc',
                'test-macro-assembler-x64.cc',
                'test-log-stack-tracer.cc'],
-  'arch:mips': ['test-assembler-mips.cc'],
+  'arch:mips': ['test-assembler-mips.cc', 'test-mips.cc'],
   'os:linux':  ['test-platform-linux.cc'],
   'os:macos':  ['test-platform-macos.cc'],
   'os:nullos': ['test-platform-nullos.cc'],
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index eefe71c..7689371 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -40,7 +40,6 @@
 test-serialize/TestThatAlwaysFails: FAIL
 test-serialize/DependentTestThatAlwaysFails: FAIL
 
-
 [ $arch == arm ]
 
 # BUG(240): Test seems flaky on ARM.
@@ -60,6 +59,7 @@
 test-alloc: SKIP
 test-api: SKIP
 test-compiler: SKIP
+test-cpu-profiler: SKIP
 test-debug: SKIP
 test-decls: SKIP
 test-func-name-inference: SKIP
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index e996a07..2eacf5a 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -38,7 +38,7 @@
 #include "utils.h"
 #include "cctest.h"
 
-static const bool kLogThreading = false;
+static const bool kLogThreading = true;
 
 static bool IsNaN(double x) {
 #ifdef WIN32
@@ -4605,6 +4605,7 @@
   value = v8_compile("other.accessible_prop = 3")->Run();
   CHECK(value->IsNumber());
   CHECK_EQ(3, value->Int32Value());
+  CHECK_EQ(3, g_echo_value);
 
   value = v8_compile("other.accessible_prop")->Run();
   CHECK(value->IsNumber());
@@ -9955,3 +9956,57 @@
     CHECK_EQ(23, c1->Get(v8_str("y"))->Int32Value());
   }
 }
+
+int prologue_call_count = 0;
+int epilogue_call_count = 0;
+int prologue_call_count_second = 0;
+int epilogue_call_count_second = 0;
+
+void PrologueCallback(v8::GCType, v8::GCCallbackFlags) {
+  ++prologue_call_count;
+}
+
+void EpilogueCallback(v8::GCType, v8::GCCallbackFlags) {
+  ++epilogue_call_count;
+}
+
+void PrologueCallbackSecond(v8::GCType, v8::GCCallbackFlags) {
+  ++prologue_call_count_second;
+}
+
+void EpilogueCallbackSecond(v8::GCType, v8::GCCallbackFlags) {
+  ++epilogue_call_count_second;
+}
+
+TEST(GCCallbacks) {
+  LocalContext context;
+
+  v8::V8::AddGCPrologueCallback(PrologueCallback);
+  v8::V8::AddGCEpilogueCallback(EpilogueCallback);
+  CHECK_EQ(0, prologue_call_count);
+  CHECK_EQ(0, epilogue_call_count);
+  i::Heap::CollectAllGarbage(false);
+  CHECK_EQ(1, prologue_call_count);
+  CHECK_EQ(1, epilogue_call_count);
+  v8::V8::AddGCPrologueCallback(PrologueCallbackSecond);
+  v8::V8::AddGCEpilogueCallback(EpilogueCallbackSecond);
+  i::Heap::CollectAllGarbage(false);
+  CHECK_EQ(2, prologue_call_count);
+  CHECK_EQ(2, epilogue_call_count);
+  CHECK_EQ(1, prologue_call_count_second);
+  CHECK_EQ(1, epilogue_call_count_second);
+  v8::V8::RemoveGCPrologueCallback(PrologueCallback);
+  v8::V8::RemoveGCEpilogueCallback(EpilogueCallback);
+  i::Heap::CollectAllGarbage(false);
+  CHECK_EQ(2, prologue_call_count);
+  CHECK_EQ(2, epilogue_call_count);
+  CHECK_EQ(2, prologue_call_count_second);
+  CHECK_EQ(2, epilogue_call_count_second);
+  v8::V8::RemoveGCPrologueCallback(PrologueCallbackSecond);
+  v8::V8::RemoveGCEpilogueCallback(EpilogueCallbackSecond);
+  i::Heap::CollectAllGarbage(false);
+  CHECK_EQ(2, prologue_call_count);
+  CHECK_EQ(2, epilogue_call_count);
+  CHECK_EQ(2, prologue_call_count_second);
+  CHECK_EQ(2, epilogue_call_count_second);
+}
diff --git a/test/cctest/test-assembler-arm.cc b/test/cctest/test-assembler-arm.cc
index 459b862..7f3404c 100644
--- a/test/cctest/test-assembler-arm.cc
+++ b/test/cctest/test-assembler-arm.cc
@@ -47,9 +47,6 @@
 
 // The test framework does not accept flags on the command line, so we set them
 static void InitializeVM() {
-  // disable compilation of natives by specifying an empty natives file
-  FLAG_natives_file = "";
-
   // enable generation of comments
   FLAG_debug_code = true;
 
diff --git a/test/cctest/test-assembler-mips.cc b/test/cctest/test-assembler-mips.cc
index ab011a7..0a2310e 100644
--- a/test/cctest/test-assembler-mips.cc
+++ b/test/cctest/test-assembler-mips.cc
@@ -49,8 +49,8 @@
 
 // The test framework does not accept flags on the command line, so we set them.
 static void InitializeVM() {
-  // Disable compilation of natives by specifying an empty natives file.
-  FLAG_natives_file = "";
+  // Disable compilation of natives.
+  FLAG_disable_native_files = true;
 
   // Enable generation of comments.
   FLAG_debug_code = true;
diff --git a/test/cctest/test-circular-queue.cc b/test/cctest/test-circular-queue.cc
index bb69c1b..3fa49bf 100644
--- a/test/cctest/test-circular-queue.cc
+++ b/test/cctest/test-circular-queue.cc
@@ -61,8 +61,6 @@
   SamplingCircularQueue scq(sizeof(Record),
                             kRecordsPerChunk * sizeof(Record),
                             3);
-  scq.SetUpProducer();
-  scq.SetUpConsumer();
 
   // Check that we are using non-reserved values.
   CHECK_NE(SamplingCircularQueue::kClear, 1);
@@ -121,7 +119,103 @@
   // Consumption must still be possible as the first cell of the
   // last chunk is not clean.
   CHECK_NE(NULL, scq.StartDequeue());
+}
 
-  scq.TearDownConsumer();
-  scq.TearDownProducer();
+
+namespace {
+
+class ProducerThread: public i::Thread {
+ public:
+  typedef SamplingCircularQueue::Cell Record;
+
+  ProducerThread(SamplingCircularQueue* scq,
+                 int records_per_chunk,
+                 Record value,
+                 i::Semaphore* finished)
+      : scq_(scq),
+        records_per_chunk_(records_per_chunk),
+        value_(value),
+        finished_(finished) { }
+
+  virtual void Run() {
+    for (Record i = value_; i < value_ + records_per_chunk_; ++i) {
+      Record* rec = reinterpret_cast<Record*>(scq_->Enqueue());
+      CHECK_NE(NULL, rec);
+      *rec = i;
+    }
+
+    finished_->Signal();
+  }
+
+ private:
+  SamplingCircularQueue* scq_;
+  const int records_per_chunk_;
+  Record value_;
+  i::Semaphore* finished_;
+};
+
+}  // namespace
+
+TEST(SamplingCircularQueueMultithreading) {
+  // Emulate multiple VM threads working 'one thread at a time.'
+  // This test enqueues data from different threads. This corresponds
+  // to the case of profiling under Linux, where signal handler that
+  // does sampling is called in the context of different VM threads.
+
+  typedef ProducerThread::Record Record;
+  const int kRecordsPerChunk = 4;
+  SamplingCircularQueue scq(sizeof(Record),
+                            kRecordsPerChunk * sizeof(Record),
+                            3);
+  i::Semaphore* semaphore = i::OS::CreateSemaphore(0);
+  // Don't poll ahead, making possible to check data in the buffer
+  // immediately after enqueuing.
+  scq.FlushResidualRecords();
+
+  // Check that we are using non-reserved values.
+  CHECK_NE(SamplingCircularQueue::kClear, 1);
+  CHECK_NE(SamplingCircularQueue::kEnd, 1);
+  ProducerThread producer1(&scq, kRecordsPerChunk, 1, semaphore);
+  ProducerThread producer2(&scq, kRecordsPerChunk, 10, semaphore);
+  ProducerThread producer3(&scq, kRecordsPerChunk, 20, semaphore);
+
+  CHECK_EQ(NULL, scq.StartDequeue());
+  producer1.Start();
+  semaphore->Wait();
+  for (Record i = 1; i < 1 + kRecordsPerChunk; ++i) {
+    Record* rec = reinterpret_cast<Record*>(scq.StartDequeue());
+    CHECK_NE(NULL, rec);
+    CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
+    CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
+    scq.FinishDequeue();
+    CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
+  }
+
+  CHECK_EQ(NULL, scq.StartDequeue());
+  producer2.Start();
+  semaphore->Wait();
+  for (Record i = 10; i < 10 + kRecordsPerChunk; ++i) {
+    Record* rec = reinterpret_cast<Record*>(scq.StartDequeue());
+    CHECK_NE(NULL, rec);
+    CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
+    CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
+    scq.FinishDequeue();
+    CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
+  }
+
+  CHECK_EQ(NULL, scq.StartDequeue());
+  producer3.Start();
+  semaphore->Wait();
+  for (Record i = 20; i < 20 + kRecordsPerChunk; ++i) {
+    Record* rec = reinterpret_cast<Record*>(scq.StartDequeue());
+    CHECK_NE(NULL, rec);
+    CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
+    CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
+    scq.FinishDequeue();
+    CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
+  }
+
+  CHECK_EQ(NULL, scq.StartDequeue());
+
+  delete semaphore;
 }
diff --git a/test/cctest/test-compiler.cc b/test/cctest/test-compiler.cc
index d61a2a1..96549a3 100644
--- a/test/cctest/test-compiler.cc
+++ b/test/cctest/test-compiler.cc
@@ -114,7 +114,7 @@
 
 static Handle<JSFunction> Compile(const char* source) {
   Handle<String> source_code(Factory::NewStringFromUtf8(CStrVector(source)));
-  Handle<JSFunction> boilerplate =
+  Handle<SharedFunctionInfo> shared_function =
       Compiler::Compile(source_code,
                         Handle<String>(),
                         0,
@@ -123,8 +123,8 @@
                         NULL,
                         Handle<String>::null(),
                         NOT_NATIVES_CODE);
-  return Factory::NewFunctionFromBoilerplate(boilerplate,
-                                             Top::global_context());
+  return Factory::NewFunctionFromSharedFunctionInfo(shared_function,
+                                                    Top::global_context());
 }
 
 
diff --git a/test/cctest/test-cpu-profiler.cc b/test/cctest/test-cpu-profiler.cc
index bd966fa..2fff4fa 100644
--- a/test/cctest/test-cpu-profiler.cc
+++ b/test/cctest/test-cpu-profiler.cc
@@ -64,7 +64,6 @@
   ProfileGenerator generator(&profiles);
   ProfilerEventsProcessor processor(&generator);
   processor.Start();
-  processor.SetUpSamplesProducer();
   while (!processor.running()) {
     i::Thread::YieldCPU();
   }
@@ -117,8 +116,6 @@
   CodeEntry* entry5 = generator.code_map()->FindEntry(ToAddress(0x1700));
   CHECK_NE(NULL, entry5);
   CHECK_EQ(aaa_str, entry5->name());
-
-  processor.TearDownSamplesProducer();
 }
 
 
@@ -133,7 +130,6 @@
   ProfileGenerator generator(&profiles);
   ProfilerEventsProcessor processor(&generator);
   processor.Start();
-  processor.SetUpSamplesProducer();
   while (!processor.running()) {
     i::Thread::YieldCPU();
   }
@@ -197,6 +193,4 @@
   bottom_up_ddd_children.last()->GetChildren(&bottom_up_ddd_stub_children);
   CHECK_EQ(1, bottom_up_ddd_stub_children.length());
   CHECK_EQ("bbb", bottom_up_ddd_stub_children.last()->entry()->name());
-
-  processor.TearDownSamplesProducer();
 }
diff --git a/test/cctest/test-func-name-inference.cc b/test/cctest/test-func-name-inference.cc
index 28e8649..67791fb 100644
--- a/test/cctest/test-func-name-inference.cc
+++ b/test/cctest/test-func-name-inference.cc
@@ -62,8 +62,16 @@
                               const char* func_pos_src,
                               const char* ref_inferred_name) {
   // Get script source.
-  Handle<JSFunction> fun = v8::Utils::OpenHandle(*script);
-  Handle<Script> i_script(Script::cast(fun->shared()->script()));
+  Handle<Object> obj = v8::Utils::OpenHandle(*script);
+  Handle<SharedFunctionInfo> shared_function;
+  if (obj->IsSharedFunctionInfo()) {
+    shared_function =
+        Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(*obj));
+  } else {
+    shared_function =
+        Handle<SharedFunctionInfo>(JSFunction::cast(*obj)->shared());
+  }
+  Handle<Script> i_script(Script::cast(shared_function->script()));
   CHECK(i_script->source()->IsString());
   Handle<String> script_src(String::cast(i_script->source()));
 
diff --git a/test/cctest/test-log-stack-tracer.cc b/test/cctest/test-log-stack-tracer.cc
index aa912a3..d96e5e7 100644
--- a/test/cctest/test-log-stack-tracer.cc
+++ b/test/cctest/test-log-stack-tracer.cc
@@ -191,7 +191,9 @@
 
 
 static Handle<JSFunction> CompileFunction(const char* source) {
-  return v8::Utils::OpenHandle(*Script::Compile(String::New(source)));
+  Handle<JSFunction> result(JSFunction::cast(
+      *v8::Utils::OpenHandle(*Script::Compile(String::New(source)))));
+  return result;
 }
 
 
@@ -201,16 +203,16 @@
 
 
 static Handle<JSFunction> GetGlobalJSFunction(const char* name) {
-  Handle<JSFunction> js_func(JSFunction::cast(
-                                 *(v8::Utils::OpenHandle(
-                                       *GetGlobalProperty(name)))));
-  return js_func;
+  Handle<JSFunction> result(JSFunction::cast(
+      *v8::Utils::OpenHandle(*GetGlobalProperty(name))));
+  return result;
 }
 
 
 static void CheckRetAddrIsInJSFunction(const char* func_name,
-                                       Address ret_addr) {
-  CheckRetAddrIsInJSFunction(func_name, ret_addr,
+                                               Address ret_addr) {
+  CheckRetAddrIsInJSFunction(func_name,
+                             ret_addr,
                              GetGlobalJSFunction(func_name));
 }
 
@@ -278,6 +280,7 @@
 #endif
 
   SetGlobalProperty(func_name, v8::ToApi<Value>(func));
+  CHECK_EQ(*func, *GetGlobalJSFunction(func_name));
 }
 
 
@@ -288,11 +291,13 @@
   InitializeVM();
   v8::HandleScope scope;
   CreateTraceCallerFunction("JSFuncDoTrace", "trace");
-  CompileRun(
+  Local<Value> result = CompileRun(
       "function JSTrace() {"
       "         JSFuncDoTrace();"
       "};\n"
-      "JSTrace();");
+      "JSTrace();\n"
+      "true;");
+  CHECK(!result.IsEmpty());
   CHECK_GT(sample.frames_count, 1);
   // Stack sampling will start from the first JS function, i.e. "JSFuncDoTrace"
   CheckRetAddrIsInJSFunction("JSFuncDoTrace",
@@ -309,14 +314,16 @@
   InitializeVM();
   v8::HandleScope scope;
   CreateTraceCallerFunction("JSFuncDoTrace", "js_trace");
-  CompileRun(
+  Local<Value> result = CompileRun(
       "function JSTrace() {"
       "         JSFuncDoTrace();"
       "};\n"
       "function OuterJSTrace() {"
       "         JSTrace();"
       "};\n"
-      "OuterJSTrace();");
+      "OuterJSTrace();\n"
+      "true;");
+  CHECK(!result.IsEmpty());
   // The last JS function called.
   CHECK_EQ(GetGlobalJSFunction("JSFuncDoTrace")->address(),
            sample.function);
diff --git a/test/cctest/test-mips.cc b/test/cctest/test-mips.cc
new file mode 100644
index 0000000..efd4cc9
--- /dev/null
+++ b/test/cctest/test-mips.cc
@@ -0,0 +1,52 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include "v8.h"
+#include "execution.h"
+
+#include "cctest.h"
+
+using ::v8::Local;
+using ::v8::String;
+using ::v8::Script;
+
+namespace i = ::v8::internal;
+
+TEST(MIPSFunctionCalls) {
+  // Disable compilation of natives.
+  i::FLAG_disable_native_files = true;
+  i::FLAG_full_compiler = false;
+
+  v8::HandleScope scope;
+  LocalContext env;  // from cctest.h
+
+  const char* c_source = "function foo() { return 0x1234; }; foo();";
+  Local<String> source = ::v8::String::New(c_source);
+  Local<Script> script = ::v8::Script::Compile(source);
+  CHECK_EQ(0x1234,  script->Run()->Int32Value());
+}
diff --git a/test/cctest/test-serialize.cc b/test/cctest/test-serialize.cc
index 4308ff5..54e69a1 100644
--- a/test/cctest/test-serialize.cc
+++ b/test/cctest/test-serialize.cc
@@ -289,65 +289,68 @@
 
 
 DEPENDENT_TEST(Deserialize, Serialize) {
-  v8::HandleScope scope;
+  // The serialize-deserialize tests only work if the VM is built without
+  // serialization.  That doesn't matter.  We don't need to be able to
+  // serialize a snapshot in a VM that is booted from a snapshot.
+  if (!Snapshot::IsEnabled()) {
+    v8::HandleScope scope;
 
-  Deserialize();
+    Deserialize();
 
-  v8::Persistent<v8::Context> env = v8::Context::New();
-  env->Enter();
+    v8::Persistent<v8::Context> env = v8::Context::New();
+    env->Enter();
 
-  SanityCheck();
+    SanityCheck();
+  }
 }
 
 
 DEPENDENT_TEST(DeserializeFromSecondSerialization, SerializeTwice) {
-  // BUG(632): Disable this test until the partial_snapshots branch is
-  // merged back.
-  return;
+  if (!Snapshot::IsEnabled()) {
+    v8::HandleScope scope;
 
-  v8::HandleScope scope;
+    Deserialize();
 
-  Deserialize();
+    v8::Persistent<v8::Context> env = v8::Context::New();
+    env->Enter();
 
-  v8::Persistent<v8::Context> env = v8::Context::New();
-  env->Enter();
-
-  SanityCheck();
+    SanityCheck();
+  }
 }
 
 
 DEPENDENT_TEST(DeserializeAndRunScript2, Serialize) {
-  v8::HandleScope scope;
+  if (!Snapshot::IsEnabled()) {
+    v8::HandleScope scope;
 
-  Deserialize();
+    Deserialize();
 
-  v8::Persistent<v8::Context> env = v8::Context::New();
-  env->Enter();
+    v8::Persistent<v8::Context> env = v8::Context::New();
+    env->Enter();
 
-  const char* c_source = "\"1234\".length";
-  v8::Local<v8::String> source = v8::String::New(c_source);
-  v8::Local<v8::Script> script = v8::Script::Compile(source);
-  CHECK_EQ(4, script->Run()->Int32Value());
+    const char* c_source = "\"1234\".length";
+    v8::Local<v8::String> source = v8::String::New(c_source);
+    v8::Local<v8::Script> script = v8::Script::Compile(source);
+    CHECK_EQ(4, script->Run()->Int32Value());
+  }
 }
 
 
 DEPENDENT_TEST(DeserializeFromSecondSerializationAndRunScript2,
                SerializeTwice) {
-  // BUG(632): Disable this test until the partial_snapshots branch is
-  // merged back.
-  return;
+  if (!Snapshot::IsEnabled()) {
+    v8::HandleScope scope;
 
-  v8::HandleScope scope;
+    Deserialize();
 
-  Deserialize();
+    v8::Persistent<v8::Context> env = v8::Context::New();
+    env->Enter();
 
-  v8::Persistent<v8::Context> env = v8::Context::New();
-  env->Enter();
-
-  const char* c_source = "\"1234\".length";
-  v8::Local<v8::String> source = v8::String::New(c_source);
-  v8::Local<v8::Script> script = v8::Script::Compile(source);
-  CHECK_EQ(4, script->Run()->Int32Value());
+    const char* c_source = "\"1234\".length";
+    v8::Local<v8::String> source = v8::String::New(c_source);
+    v8::Local<v8::Script> script = v8::Script::Compile(source);
+    CHECK_EQ(4, script->Run()->Int32Value());
+  }
 }
 
 
@@ -400,14 +403,8 @@
 }
 
 
-DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
-  int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
-  Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
-  OS::SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
-
-  CHECK(Snapshot::Initialize(startup_name.start()));
-
-  const char* file_name = FLAG_testing_serialization_file;
+static void ReserveSpaceForPartialSnapshot(const char* file_name) {
+  int file_name_length = StrLength(file_name) + 10;
   Vector<char> name = Vector<char>::New(file_name_length + 1);
   OS::SNPrintF(name, "%s.size", file_name);
   FILE* fp = OS::FOpen(name.start(), "r");
@@ -436,26 +433,122 @@
                      map_size,
                      cell_size,
                      large_size);
-  int snapshot_size = 0;
-  byte* snapshot = ReadBytes(file_name, &snapshot_size);
+}
 
-  Object* root;
-  {
-    SnapshotByteSource source(snapshot, snapshot_size);
-    Deserializer deserializer(&source);
-    deserializer.DeserializePartial(&root);
-    CHECK(root->IsString());
+
+DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
+  if (!Snapshot::IsEnabled()) {
+    int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
+    Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
+    OS::SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+
+    CHECK(Snapshot::Initialize(startup_name.start()));
+
+    const char* file_name = FLAG_testing_serialization_file;
+    ReserveSpaceForPartialSnapshot(file_name);
+
+    int snapshot_size = 0;
+    byte* snapshot = ReadBytes(file_name, &snapshot_size);
+
+    Object* root;
+    {
+      SnapshotByteSource source(snapshot, snapshot_size);
+      Deserializer deserializer(&source);
+      deserializer.DeserializePartial(&root);
+      CHECK(root->IsString());
+    }
+    v8::HandleScope handle_scope;
+    Handle<Object>root_handle(root);
+
+    Object* root2;
+    {
+      SnapshotByteSource source(snapshot, snapshot_size);
+      Deserializer deserializer(&source);
+      deserializer.DeserializePartial(&root2);
+      CHECK(root2->IsString());
+      CHECK(*root_handle == root2);
+    }
   }
-  v8::HandleScope handle_scope;
-  Handle<Object>root_handle(root);
+}
 
-  Object* root2;
-  {
-    SnapshotByteSource source(snapshot, snapshot_size);
-    Deserializer deserializer(&source);
-    deserializer.DeserializePartial(&root2);
-    CHECK(root2->IsString());
-    CHECK(*root_handle == root2);
+
+TEST(ContextSerialization) {
+  Serializer::Enable();
+  v8::V8::Initialize();
+
+  v8::Persistent<v8::Context> env = v8::Context::New();
+  ASSERT(!env.IsEmpty());
+  env->Enter();
+  // Make sure all builtin scripts are cached.
+  { HandleScope scope;
+    for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
+      Bootstrapper::NativesSourceLookup(i);
+    }
+  }
+  // If we don't do this then we end up with a stray root pointing at the
+  // context even after we have disposed of env.
+  Heap::CollectAllGarbage(true);
+
+  int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
+  Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
+  OS::SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+
+  env->Exit();
+
+  Object* raw_context = *(v8::Utils::OpenHandle(*env));
+
+  env.Dispose();
+
+  FileByteSink startup_sink(startup_name.start());
+  StartupSerializer startup_serializer(&startup_sink);
+  startup_serializer.SerializeStrongReferences();
+
+  FileByteSink partial_sink(FLAG_testing_serialization_file);
+  PartialSerializer p_ser(&startup_serializer, &partial_sink);
+  p_ser.Serialize(&raw_context);
+  startup_serializer.SerializeWeakReferences();
+  partial_sink.WriteSpaceUsed(p_ser.CurrentAllocationAddress(NEW_SPACE),
+                              p_ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
+                              p_ser.CurrentAllocationAddress(OLD_DATA_SPACE),
+                              p_ser.CurrentAllocationAddress(CODE_SPACE),
+                              p_ser.CurrentAllocationAddress(MAP_SPACE),
+                              p_ser.CurrentAllocationAddress(CELL_SPACE),
+                              p_ser.CurrentAllocationAddress(LO_SPACE));
+}
+
+
+DEPENDENT_TEST(ContextDeserialization, ContextSerialization) {
+  if (!Snapshot::IsEnabled()) {
+    int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
+    Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
+    OS::SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+
+    CHECK(Snapshot::Initialize(startup_name.start()));
+
+    const char* file_name = FLAG_testing_serialization_file;
+    ReserveSpaceForPartialSnapshot(file_name);
+
+    int snapshot_size = 0;
+    byte* snapshot = ReadBytes(file_name, &snapshot_size);
+
+    Object* root;
+    {
+      SnapshotByteSource source(snapshot, snapshot_size);
+      Deserializer deserializer(&source);
+      deserializer.DeserializePartial(&root);
+      CHECK(root->IsContext());
+    }
+    v8::HandleScope handle_scope;
+    Handle<Object>root_handle(root);
+
+    Object* root2;
+    {
+      SnapshotByteSource source(snapshot, snapshot_size);
+      Deserializer deserializer(&source);
+      deserializer.DeserializePartial(&root2);
+      CHECK(root2->IsContext());
+      CHECK(*root_handle != root2);
+    }
   }
 }
 
@@ -463,6 +556,7 @@
 TEST(LinearAllocation) {
   v8::V8::Initialize();
   int new_space_max = 512 * KB;
+
   for (int size = 1000; size < 5 * MB; size += size >> 1) {
     int new_space_size = (size < new_space_max) ? size : new_space_max;
     Heap::ReserveSpace(
diff --git a/test/mjsunit/date.js b/test/mjsunit/date.js
index a592e4c..b264a19 100644
--- a/test/mjsunit/date.js
+++ b/test/mjsunit/date.js
@@ -46,12 +46,18 @@
 
 var dMax = new Date(8.64e15);
 assertEquals(8.64e15, dMax.getTime());
+assertEquals(275760, dMax.getFullYear());
+assertEquals(8, dMax.getMonth());
+assertEquals(13, dMax.getUTCDate());
 
 var dOverflow = new Date(8.64e15+1);
 assertTrue(isNaN(dOverflow.getTime()));
 
 var dMin = new Date(-8.64e15);
 assertEquals(-8.64e15, dMin.getTime());
+assertEquals(-271821, dMin.getFullYear());
+assertEquals(3, dMin.getMonth());
+assertEquals(20, dMin.getUTCDate());
 
 var dUnderflow = new Date(-8.64e15-1);
 assertTrue(isNaN(dUnderflow.getTime()));
diff --git a/test/mjsunit/debug-script.js b/test/mjsunit/debug-script.js
index 402f90c..643dd8c 100644
--- a/test/mjsunit/debug-script.js
+++ b/test/mjsunit/debug-script.js
@@ -52,7 +52,7 @@
 }
 
 // This has to be updated if the number of native scripts change.
-assertEquals(13, named_native_count);
+assertEquals(14, named_native_count);
 // If no snapshot is used, only the 'gc' extension is loaded.
 // If snapshot is used, all extensions are cached in the snapshot.
 assertTrue(extension_count == 1 || extension_count == 5);
diff --git a/test/mjsunit/div-mod.js b/test/mjsunit/div-mod.js
index 1d352b5..3e343de 100644
--- a/test/mjsunit/div-mod.js
+++ b/test/mjsunit/div-mod.js
@@ -169,3 +169,24 @@
   assertEquals(somenum, somenum % -0x40000000, "%minsmi-32");
   assertEquals(somenum, somenum % -0x80000000, "%minsmi-64");
 })();
+
+
+// Side-effect-free expressions containing bit operations use
+// an optimized compiler with int32 values.   Ensure that modulus
+// produces negative zeros correctly.
+function negative_zero_modulus_test() {
+  var x = 4;
+  var y = -4;
+  x = x + x - x;
+  y = y + y - y;
+  var z = (y | y | y | y) % x;
+  assertEquals(-1 / 0, 1 / z);
+  z = (x | x | x | x) % x;
+  assertEquals(1 / 0, 1 / z);
+  z = (y | y | y | y) % y;
+  assertEquals(-1 / 0, 1 / z);
+  z = (x | x | x | x) % y;
+  assertEquals(1 / 0, 1 / z);
+}
+
+negative_zero_modulus_test();
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index 958a592..d531437 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -573,12 +573,12 @@
           '../../src/math.js',
           '../../src/messages.js',
           '../../src/apinatives.js',
-          '../../src/debug-delay.js',
-          '../../src/liveedit-delay.js',
-          '../../src/mirror-delay.js',
-          '../../src/date-delay.js',
-          '../../src/json-delay.js',
-          '../../src/regexp-delay.js',
+          '../../src/debug-debugger.js',
+          '../../src/mirror-debugger.js',
+          '../../src/liveedit-debugger.js',
+          '../../src/date.js',
+          '../../src/json.js',
+          '../../src/regexp.js',
           '../../src/macros.py',
         ],
       },
diff --git a/tools/js2c.py b/tools/js2c.py
index b889530..64de7d3 100755
--- a/tools/js2c.py
+++ b/tools/js2c.py
@@ -220,8 +220,8 @@
   }
 
   template <>
-  int NativesCollection<%(type)s>::GetDelayCount() {
-    return %(delay_count)i;
+  int NativesCollection<%(type)s>::GetDebuggerCount() {
+    return %(debugger_count)i;
   }
 
   template <>
@@ -252,23 +252,23 @@
 """
 
 
-GET_DELAY_INDEX_CASE = """\
+GET_DEBUGGER_INDEX_CASE = """\
     if (strcmp(name, "%(id)s") == 0) return %(i)i;
 """
 
 
-GET_DELAY_SCRIPT_SOURCE_CASE = """\
+GET_DEBUGGER_SCRIPT_SOURCE_CASE = """\
     if (index == %(i)i) return Vector<const char>(%(id)s, %(length)i);
 """
 
 
-GET_DELAY_SCRIPT_NAME_CASE = """\
+GET_DEBUGGER_SCRIPT_NAME_CASE = """\
     if (index == %(i)i) return Vector<const char>("%(name)s", %(length)i);
 """
 
 def JS2C(source, target, env):
   ids = []
-  delay_ids = []
+  debugger_ids = []
   modules = []
   # Locate the macros file name.
   consts = {}
@@ -287,7 +287,7 @@
   source_lines_empty = []
   for module in modules:
     filename = str(module)
-    delay = filename.endswith('-delay.js')
+    debugger = filename.endswith('-debugger.js')
     lines = ReadFile(filename)
     lines = ExpandConstants(lines, consts)
     lines = ExpandMacros(lines, macros)
@@ -295,29 +295,29 @@
     lines = minifier.JSMinify(lines)
     data = ToCArray(lines)
     id = (os.path.split(filename)[1])[:-3]
-    if delay: id = id[:-6]
-    if delay:
-      delay_ids.append((id, len(lines)))
+    if debugger: id = id[:-9]
+    if debugger:
+      debugger_ids.append((id, len(lines)))
     else:
       ids.append((id, len(lines)))
     source_lines.append(SOURCE_DECLARATION % { 'id': id, 'data': data })
     source_lines_empty.append(SOURCE_DECLARATION % { 'id': id, 'data': data })
 
-  # Build delay support functions
+  # Build debugger support functions
   get_index_cases = [ ]
   get_script_source_cases = [ ]
   get_script_name_cases = [ ]
 
   i = 0
-  for (id, length) in delay_ids:
+  for (id, length) in debugger_ids:
     native_name = "native %s.js" % id
-    get_index_cases.append(GET_DELAY_INDEX_CASE % { 'id': id, 'i': i })
-    get_script_source_cases.append(GET_DELAY_SCRIPT_SOURCE_CASE % {
+    get_index_cases.append(GET_DEBUGGER_INDEX_CASE % { 'id': id, 'i': i })
+    get_script_source_cases.append(GET_DEBUGGER_SCRIPT_SOURCE_CASE % {
       'id': id,
       'length': length,
       'i': i
     })
-    get_script_name_cases.append(GET_DELAY_SCRIPT_NAME_CASE % {
+    get_script_name_cases.append(GET_DEBUGGER_SCRIPT_NAME_CASE % {
       'name': native_name,
       'length': len(native_name),
       'i': i
@@ -326,13 +326,13 @@
 
   for (id, length) in ids:
     native_name = "native %s.js" % id
-    get_index_cases.append(GET_DELAY_INDEX_CASE % { 'id': id, 'i': i })
-    get_script_source_cases.append(GET_DELAY_SCRIPT_SOURCE_CASE % {
+    get_index_cases.append(GET_DEBUGGER_INDEX_CASE % { 'id': id, 'i': i })
+    get_script_source_cases.append(GET_DEBUGGER_SCRIPT_SOURCE_CASE % {
       'id': id,
       'length': length,
       'i': i
     })
-    get_script_name_cases.append(GET_DELAY_SCRIPT_NAME_CASE % {
+    get_script_name_cases.append(GET_DEBUGGER_SCRIPT_NAME_CASE % {
       'name': native_name,
       'length': len(native_name),
       'i': i
@@ -342,8 +342,8 @@
   # Emit result
   output = open(str(target[0]), "w")
   output.write(HEADER_TEMPLATE % {
-    'builtin_count': len(ids) + len(delay_ids),
-    'delay_count': len(delay_ids),
+    'builtin_count': len(ids) + len(debugger_ids),
+    'debugger_count': len(debugger_ids),
     'source_lines': "\n".join(source_lines),
     'get_index_cases': "".join(get_index_cases),
     'get_script_source_cases': "".join(get_script_source_cases),
@@ -355,8 +355,8 @@
   if len(target) > 1:
     output = open(str(target[1]), "w")
     output.write(HEADER_TEMPLATE % {
-      'builtin_count': len(ids) + len(delay_ids),
-      'delay_count': len(delay_ids),
+      'builtin_count': len(ids) + len(debugger_ids),
+      'debugger_count': len(debugger_ids),
       'source_lines': "\n".join(source_lines_empty),
       'get_index_cases': "".join(get_index_cases),
       'get_script_source_cases': "".join(get_script_source_cases),
diff --git a/tools/visual_studio/js2c.cmd b/tools/visual_studio/js2c.cmd
index 54b1bfb..82722ff 100644
--- a/tools/visual_studio/js2c.cmd
+++ b/tools/visual_studio/js2c.cmd
@@ -3,4 +3,4 @@
 set TARGET_DIR=%2
 set PYTHON="..\..\..\third_party\python_24\python.exe"
 if not exist %PYTHON% set PYTHON=python.exe
-%PYTHON% ..\js2c.py %TARGET_DIR%\natives.cc %TARGET_DIR%\natives-empty.cc CORE %SOURCE_DIR%\macros.py %SOURCE_DIR%\runtime.js %SOURCE_DIR%\v8natives.js %SOURCE_DIR%\array.js %SOURCE_DIR%\string.js %SOURCE_DIR%\uri.js %SOURCE_DIR%\math.js %SOURCE_DIR%\messages.js %SOURCE_DIR%\apinatives.js %SOURCE_DIR%\debug-delay.js %SOURCE_DIR%\liveedit-delay.js %SOURCE_DIR%\mirror-delay.js %SOURCE_DIR%\date-delay.js %SOURCE_DIR%\regexp-delay.js %SOURCE_DIR%\json-delay.js
+%PYTHON% ..\js2c.py %TARGET_DIR%\natives.cc %TARGET_DIR%\natives-empty.cc CORE %SOURCE_DIR%\macros.py %SOURCE_DIR%\runtime.js %SOURCE_DIR%\v8natives.js %SOURCE_DIR%\array.js %SOURCE_DIR%\string.js %SOURCE_DIR%\uri.js %SOURCE_DIR%\math.js %SOURCE_DIR%\messages.js %SOURCE_DIR%\apinatives.js %SOURCE_DIR%\debug-debugger.js %SOURCE_DIR%\liveedit-debugger.js %SOURCE_DIR%\mirror-debugger.js %SOURCE_DIR%\date.js %SOURCE_DIR%\regexp.js %SOURCE_DIR%\json.js
diff --git a/tools/visual_studio/v8.vcproj b/tools/visual_studio/v8.vcproj
index 3122c6d..30b488f 100644
--- a/tools/visual_studio/v8.vcproj
+++ b/tools/visual_studio/v8.vcproj
@@ -135,15 +135,15 @@
 				>
 			</File>
 			<File
-				RelativePath="..\..\src\date-delay.js"
+				RelativePath="..\..\src\date.js"
 				>
 			</File>
 			<File
-				RelativePath="..\..\src\debug-delay.js"
+				RelativePath="..\..\src\debug-debugger.js"
 				>
 			</File>
 			<File
-				RelativePath="..\..\src\liveedit-delay.js"
+				RelativePath="..\..\src\liveedit-debugger.js"
 				>
 			</File>
 			<File
@@ -159,15 +159,15 @@
 				>
 			</File>
 			<File
-				RelativePath="..\..\src\mirror-delay.js"
+				RelativePath="..\..\src\mirror-debugger.js"
 				>
 			</File>
 			<File
-				RelativePath="..\..\src\regexp-delay.js"
+				RelativePath="..\..\src\regexp.js"
 				>
 			</File>
 			<File
-				RelativePath="..\..\src\json-delay.js"
+				RelativePath="..\..\src\json.js"
 				>
 			</File>
 			<File
@@ -192,7 +192,7 @@
 						Name="VCCustomBuildTool"
 						Description="Processing js files..."
 						CommandLine=".\js2c.cmd ..\..\src &quot;$(IntDir)\DerivedSources&quot;"
-						AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-delay.js;..\..\src\mirror-delay.js;..\..\src\date-delay.js;..\..\src\regexp-delay.js;..\..\src\json-delay.js"
+						AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-debugger.js;..\..\src\mirror-debugger.js;..\..\src\liveedit-debugger.js;..\..\src\date.js;..\..\src\regexp.js;..\..\src\json.js"
 						Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"
 					/>
 				</FileConfiguration>
@@ -203,7 +203,7 @@
 						Name="VCCustomBuildTool"
 						Description="Processing js files..."
 						CommandLine=".\js2c.cmd ..\..\src &quot;$(IntDir)\DerivedSources&quot;"
-						AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-delay.js;..\..\src\mirror-delay.js;..\..\src\date-delay.js;..\..\src\regexp-delay.js;..\..\src\json-delay.js"
+						AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-debugger.js;..\..\src\mirror-debugger.js;..\..\src\liveedit-debugger.js;..\..\src\date.js;..\..\src\regexp.js;..\..\src\json.js"
 						Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"
 					/>
 				</FileConfiguration>
diff --git a/tools/visual_studio/v8_arm.vcproj b/tools/visual_studio/v8_arm.vcproj
index cb7519b..cdba58e 100644
--- a/tools/visual_studio/v8_arm.vcproj
+++ b/tools/visual_studio/v8_arm.vcproj
@@ -135,15 +135,15 @@
 				>

 			</File>

 			<File

-				RelativePath="..\..\src\date-delay.js"

+				RelativePath="..\..\src\date.js"

 				>

 			</File>

 			<File

-				RelativePath="..\..\src\debug-delay.js"

+				RelativePath="..\..\src\debug-debugger.js"

 				>

 			</File>

 			<File

-				RelativePath="..\..\src\liveedit-delay.js"

+				RelativePath="..\..\src\liveedit-debugger.js"

 				>

 			</File>

 			<File

@@ -159,15 +159,15 @@
 				>

 			</File>

 			<File

-				RelativePath="..\..\src\mirror-delay.js"

+				RelativePath="..\..\src\mirror-debugger.js"

 				>

 			</File>

 			<File

-				RelativePath="..\..\src\regexp-delay.js"

+				RelativePath="..\..\src\regexp.js"

 				>

 			</File>

 			<File

-				RelativePath="..\..\src\json-delay.js"

+				RelativePath="..\..\src\json.js"

 				>

 			</File>

 			<File

@@ -192,7 +192,7 @@
 						Name="VCCustomBuildTool"

 						Description="Processing js files..."

 						CommandLine=".\js2c.cmd ..\..\src &quot;$(IntDir)\DerivedSources&quot;"

-						AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-delay.js;..\..\src\mirror-delay.js;..\..\src\date-delay.js;..\..\src\regexp-delay.js;..\..\src\json-delay.js"

+						AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-debugger.js;..\..\src\mirror-debugger.js;..\..\src\liveedit-debugger.js;..\..\src\date.js;..\..\src\regexp.js;..\..\src\json.js"

 						Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"

 					/>

 				</FileConfiguration>

@@ -203,7 +203,7 @@
 						Name="VCCustomBuildTool"

 						Description="Processing js files..."

 						CommandLine=".\js2c.cmd ..\..\src &quot;$(IntDir)\DerivedSources&quot;"

-						AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-delay.js;..\..\src\mirror-delay.js;..\..\src\date-delay.js;..\..\src\regexp-delay.js;..\..\src\json-delay.js"

+						AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-debugger.js;..\..\src\mirror-debugger.js;..\..\src\liveedit-debugger.js;..\..\src\date.js;..\..\src\regexp.js;..\..\src\json.js"

 						Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"

 					/>

 				</FileConfiguration>

diff --git a/tools/visual_studio/v8_base.vcproj b/tools/visual_studio/v8_base.vcproj
index 1d6d605..193e2a0 100644
--- a/tools/visual_studio/v8_base.vcproj
+++ b/tools/visual_studio/v8_base.vcproj
@@ -237,6 +237,10 @@
 				>
 			</File>
 			<File
+				RelativePath="..\..\src\cached-powers.h"
+				>
+			</File>
+			<File
 				RelativePath="..\..\src\char-predicates-inl.h"
 				>
 			</File>
@@ -405,6 +409,18 @@
 				>
 			</File>
 			<File
+				RelativePath="..\..\src\diy-fp.cc"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\diy-fp.h"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\double.h"
+				>
+			</File>
+			<File
 				RelativePath="..\..\src\execution.cc"
 				>
 			</File>
@@ -433,6 +449,14 @@
                                 >
                         </File>
 			<File
+				RelativePath="..\..\src\fast-dtoa.cc"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\fast-dtoa.h"
+				>
+			</File>
+			<File
 				RelativePath="..\..\src\flags.cc"
 				>
 			</File>
diff --git a/tools/visual_studio/v8_x64.vcproj b/tools/visual_studio/v8_x64.vcproj
index a476d7d..5ffd291 100644
--- a/tools/visual_studio/v8_x64.vcproj
+++ b/tools/visual_studio/v8_x64.vcproj
@@ -135,15 +135,15 @@
 				>
 			</File>
 			<File
-				RelativePath="..\..\src\date-delay.js"
+				RelativePath="..\..\src\date.js"
 				>
 			</File>
 			<File
-				RelativePath="..\..\src\debug-delay.js"
+				RelativePath="..\..\src\debug-debugger.js"
 				>
 			</File>
 			<File
-				RelativePath="..\..\src\liveedit-delay.js"
+				RelativePath="..\..\src\liveedit-debugger.js"
 				>
 			</File>
 			<File
@@ -159,15 +159,15 @@
 				>
 			</File>
 			<File
-				RelativePath="..\..\src\mirror-delay.js"
+				RelativePath="..\..\src\mirror-debugger.js"
 				>
 			</File>
 			<File
-				RelativePath="..\..\src\regexp-delay.js"
+				RelativePath="..\..\src\regexp.js"
 				>
 			</File>
 			<File
-				RelativePath="..\..\src\json-delay.js"
+				RelativePath="..\..\src\json.js"
 				>
 			</File>
 			<File
@@ -192,7 +192,7 @@
 						Name="VCCustomBuildTool"
 						Description="Processing js files..."
 						CommandLine=".\js2c.cmd ..\..\src &quot;$(IntDir)\DerivedSources&quot;"
-						AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-delay.js;..\..\src\mirror-delay.js;..\..\src\date-delay.js;..\..\src\regexp-delay.js;..\..\src\json-delay.js"
+						AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-debugger.js;..\..\src\mirror-debugger.js;..\..\src\liveedit-debugger.js;..\..\src\date.js;..\..\src\regexp.js;..\..\src\json.js"
 						Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"
 					/>
 				</FileConfiguration>
@@ -203,7 +203,7 @@
 						Name="VCCustomBuildTool"
 						Description="Processing js files..."
 						CommandLine=".\js2c.cmd ..\..\src &quot;$(IntDir)\DerivedSources&quot;"
-						AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-delay.js;..\..\src\mirror-delay.js;..\..\src\date-delay.js;..\..\src\regexp-delay.js;..\..\src\json-delay.js"
+						AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-debugger.js;..\..\src\mirror-debugger.js;..\..\src\liveedit-debugger.js;..\..\src\date.js;..\..\src\regexp.js;..\..\src\json.js"
 						Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"
 					/>
 				</FileConfiguration>